Compare commits

..

No commits in common. "master" and "v3.2-dev15" have entirely different histories.

446 changed files with 7591 additions and 14685 deletions

21
.github/matrix.py vendored
View File

@ -125,7 +125,7 @@ def main(ref_name):
# Ubuntu
if "haproxy-" in ref_name:
os = "ubuntu-24.04" # stable branch
os = "ubuntu-22.04" # stable branch
else:
os = "ubuntu-24.04" # development branch
@ -218,7 +218,6 @@ def main(ref_name):
"stock",
"OPENSSL_VERSION=1.0.2u",
"OPENSSL_VERSION=1.1.1s",
"OPENSSL_VERSION=3.5.1",
"QUICTLS=yes",
"WOLFSSL_VERSION=5.7.0",
"AWS_LC_VERSION=1.39.0",
@ -233,7 +232,8 @@ def main(ref_name):
for ssl in ssl_versions:
flags = ["USE_OPENSSL=1"]
skipdup=0
if ssl == "BORINGSSL=yes" or ssl == "QUICTLS=yes" or "LIBRESSL" in ssl or "WOLFSSL" in ssl or "AWS_LC" in ssl:
flags.append("USE_QUIC=1")
if "WOLFSSL" in ssl:
flags.append("USE_OPENSSL_WOLFSSL=1")
if "AWS_LC" in ssl:
@ -243,23 +243,8 @@ def main(ref_name):
flags.append("SSL_INC=${HOME}/opt/include")
if "LIBRESSL" in ssl and "latest" in ssl:
ssl = determine_latest_libressl(ssl)
skipdup=1
if "OPENSSL" in ssl and "latest" in ssl:
ssl = determine_latest_openssl(ssl)
skipdup=1
# if "latest" equals a version already in the list
if ssl in ssl_versions and skipdup == 1:
continue
openssl_supports_quic = False
try:
openssl_supports_quic = version.Version(ssl.split("OPENSSL_VERSION=",1)[1]) >= version.Version("3.5.0")
except:
pass
if ssl == "BORINGSSL=yes" or ssl == "QUICTLS=yes" or "LIBRESSL" in ssl or "WOLFSSL" in ssl or "AWS_LC" in ssl or openssl_supports_quic:
flags.append("USE_QUIC=1")
matrix.append(
{

View File

@ -5,8 +5,82 @@ on:
- cron: "0 0 * * 4"
workflow_dispatch:
permissions:
contents: read
jobs:
test:
uses: ./.github/workflows/aws-lc-template.yml
with:
command: "from matrix import determine_latest_aws_lc_fips; print(determine_latest_aws_lc_fips(''))"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install VTest
run: |
scripts/build-vtest.sh
- name: Determine latest AWS-LC release
id: get_aws_lc_release
run: |
result=$(cd .github && python3 -c "from matrix import determine_latest_aws_lc_fips; print(determine_latest_aws_lc_fips(''))")
echo $result
echo "result=$result" >> $GITHUB_OUTPUT
- name: Cache AWS-LC
id: cache_aws_lc
uses: actions/cache@v4
with:
path: '~/opt/'
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb
- name: Install AWS-LC
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Install problem matcher for VTest
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy
id: vtest
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
# allow to catch coredumps
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi

View File

@ -1,103 +0,0 @@
name: AWS-LC template
on:
workflow_call:
inputs:
command:
required: true
type: string
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v4
- name: Install VTest
run: |
scripts/build-vtest.sh
- name: Determine latest AWS-LC release
id: get_aws_lc_release
run: |
result=$(cd .github && python3 -c "${{ inputs.command }}")
echo $result
echo "result=$result" >> $GITHUB_OUTPUT
- name: Cache AWS-LC
id: cache_aws_lc
uses: actions/cache@v4
with:
path: '~/opt/'
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb jose
- name: Install AWS-LC
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Install problem matcher for VTest
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy
id: vtest
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
# allow to catch coredumps
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi
- name: Show Unit-Tests results
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
run: |
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
printf "::group::"
cat $result
echo "::endgroup::"
done
exit 1

View File

@ -5,8 +5,82 @@ on:
- cron: "0 0 * * 4"
workflow_dispatch:
permissions:
contents: read
jobs:
test:
uses: ./.github/workflows/aws-lc-template.yml
with:
command: "from matrix import determine_latest_aws_lc; print(determine_latest_aws_lc(''))"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install VTest
run: |
scripts/build-vtest.sh
- name: Determine latest AWS-LC release
id: get_aws_lc_release
run: |
result=$(cd .github && python3 -c "from matrix import determine_latest_aws_lc; print(determine_latest_aws_lc(''))")
echo $result
echo "result=$result" >> $GITHUB_OUTPUT
- name: Cache AWS-LC
id: cache_aws_lc
uses: actions/cache@v4
with:
path: '~/opt/'
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb
- name: Install AWS-LC
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Install problem matcher for VTest
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy
id: vtest
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
# allow to catch coredumps
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi

View File

@ -11,8 +11,13 @@ permissions:
jobs:
h2spec:
name: h2spec
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
include:
- TARGET: linux-glibc
CC: gcc
os: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install h2spec
@ -23,12 +28,12 @@ jobs:
tar xvf h2spec.tar.gz
sudo install -m755 h2spec /usr/local/bin/h2spec
echo "version=${H2SPEC_VERSION}" >> $GITHUB_OUTPUT
- name: Compile HAProxy with gcc
- name: Compile HAProxy with ${{ matrix.CC }}
run: |
make -j$(nproc) all \
ERR=1 \
TARGET=linux-glibc \
CC=gcc \
TARGET=${{ matrix.TARGET }} \
CC=${{ matrix.CC }} \
DEBUG="-DDEBUG_POOL_INTEGRITY" \
USE_OPENSSL=1
sudo make install

View File

@ -38,7 +38,7 @@ jobs:
- name: Build with Coverity build tool
run: |
export PATH=`pwd`/coverity_tool/bin:$PATH
cov-build --dir cov-int make CC=clang TARGET=linux-glibc USE_ZLIB=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_LUA=1 USE_OPENSSL=1 USE_QUIC=1 USE_WURFL=1 WURFL_INC=addons/wurfl/dummy WURFL_LIB=addons/wurfl/dummy USE_DEVICEATLAS=1 DEVICEATLAS_SRC=addons/deviceatlas/dummy USE_51DEGREES=1 51DEGREES_SRC=addons/51degrees/dummy/pattern ADDLIB=\"-Wl,-rpath,$HOME/opt/lib/\" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include DEBUG+=-DDEBUG_STRICT=2 DEBUG+=-DDEBUG_USE_ABORT=1
cov-build --dir cov-int make CC=clang TARGET=linux-glibc USE_ZLIB=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_LUA=1 USE_OPENSSL=1 USE_QUIC=1 USE_WURFL=1 WURFL_INC=addons/wurfl/dummy WURFL_LIB=addons/wurfl/dummy USE_DEVICEATLAS=1 DEVICEATLAS_SRC=addons/deviceatlas/dummy USE_51DEGREES=1 51DEGREES_SRC=addons/51degrees/dummy/pattern ADDLIB=\"-Wl,-rpath,$HOME/opt/lib/\" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include DEBUG+=-DDEBUG_STRICT=1 DEBUG+=-DDEBUG_USE_ABORT=1
- name: Submit build result to Coverity Scan
run: |
tar czvf cov.tar.gz cov-int

View File

@ -22,11 +22,11 @@ jobs:
echo '/tmp/core/core.%h.%e.%t' > /proc/sys/kernel/core_pattern
- uses: actions/checkout@v4
- name: Install dependencies
run: apk add gcc gdb make tar git python3 libc-dev linux-headers pcre-dev pcre2-dev openssl-dev lua5.3-dev grep socat curl musl-dbg lua5.3-dbg jose
run: apk add gcc gdb make tar git python3 libc-dev linux-headers pcre-dev pcre2-dev openssl-dev lua5.3-dev grep socat curl musl-dbg lua5.3-dbg
- name: Install VTest
run: scripts/build-vtest.sh
- name: Build
run: make -j$(nproc) TARGET=linux-musl DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" ARCH_FLAGS='-ggdb3' CC=cc V=1 USE_LUA=1 LUA_INC=/usr/include/lua5.3 LUA_LIB=/usr/lib/lua5.3 USE_OPENSSL=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_PROMEX=1
run: make -j$(nproc) TARGET=linux-musl ARCH_FLAGS='-ggdb3' CC=cc V=1 USE_LUA=1 LUA_INC=/usr/include/lua5.3 LUA_LIB=/usr/lib/lua5.3 USE_OPENSSL=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_PROMEX=1
- name: Show version
run: ./haproxy -vv
- name: Show linked libraries
@ -37,10 +37,6 @@ jobs:
- name: Run VTest
id: vtest
run: make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
@ -64,13 +60,3 @@ jobs:
cat $folder/LOG
echo "::endgroup::"
done
- name: Show Unit-Tests results
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
run: |
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
printf "::group::"
cat $result
echo "::endgroup::"
done
exit 1

View File

@ -15,7 +15,6 @@ permissions:
jobs:
test:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v4
- name: Install VTest

View File

@ -11,7 +11,6 @@ permissions:
jobs:
test:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v4
- name: Install VTest
@ -20,7 +19,7 @@ jobs:
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb jose
sudo apt-get --no-install-recommends -y install socat gdb
- name: Install WolfSSL
run: env WOLFSSL_VERSION=git-master WOLFSSL_DEBUG=1 scripts/build-ssl.sh
- name: Compile HAProxy
@ -28,7 +27,7 @@ jobs:
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_OPENSSL_WOLFSSL=1 USE_QUIC=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
DEBUG="-DDEBUG_POOL_INTEGRITY" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
ARCH_FLAGS="-ggdb3 -fsanitize=address"
sudo make install
@ -49,10 +48,6 @@ jobs:
# allow to catch coredumps
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
@ -77,13 +72,3 @@ jobs:
if [ "$failed" = true ]; then
exit 1;
fi
- name: Show Unit-Tests results
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
run: |
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
printf "::group::"
cat $result
echo "::endgroup::"
done
exit 1

524
CHANGELOG
View File

@ -1,530 +1,6 @@
ChangeLog :
===========
2025/08/06 : 3.3-dev6
- MINOR: acme: implement traces
- BUG/MINOR: hlua: take default-path into account with lua-load-per-thread
- CLEANUP: counters: rename counters_be_shared_init to counters_be_shared_prepare
- MINOR: clock: make global_now_ms a pointer
- MINOR: clock: make global_now_ns a pointer as well
- MINOR: mux-quic: release conn after shutdown on BE reuse failure
- MINOR: session: strengthen connection attach to session
- MINOR: session: remove redundant target argument from session_add_conn()
- MINOR: session: strengthen idle conn limit check
- MINOR: session: do not release conn in session_check_idle_conn()
- MINOR: session: streamline session_check_idle_conn() usage
- MINOR: muxes: refactor private connection detach
- BUG/MEDIUM: mux-quic: ensure Early-data header is set
- BUILD: acme: avoid declaring TRACE_SOURCE in acme-t.h
- MINOR: acme: emit a log for DNS-01 challenge response
- MINOR: acme: emit the DNS-01 challenge details on the dpapi sink
- MEDIUM: acme: allow to wait and restart the task for DNS-01
- MINOR: acme: update the log for DNS-01
- BUG/MINOR: acme: possible integer underflow in acme_txt_record()
- BUG/MEDIUM: hlua_fcn: ensure systematic watcher cleanup for server list iterator
- MINOR: sample: Add le2dec (little endian to decimal) sample fetch
- BUILD: fcgi: fix the struct name of fcgi_flt_ctx
- BUILD: compat: provide relaxed versions of the MIN/MAX macros
- BUILD: quic: use _MAX() to avoid build issues in pools declarations
- BUILD: compat: always set _POSIX_VERSION to ease comparisons
- MINOR: implement ha_aligned_alloc() to return aligned memory areas
- MINOR: pools: support creating a pool from a pool registration
- MINOR: pools: add a new flag to declare static registrations
- MINOR: pools: force the name at creation time to be a const.
- MEDIUM: pools: change the static pool creation to pass a registration
- DEBUG: pools: store the pool registration file name and line number
- DEBUG: pools: also retrieve file and line for direct callers of create_pool()
- MEDIUM: pools: add an alignment property
- MINOR: pools: add macros to register aligned pools
- MINOR: pools: add macros to declare pools based on a struct type
- MEDIUM: pools: respect pool alignment in allocations
2025/07/28 : 3.3-dev5
- BUG/MEDIUM: queue/stats: also use stream_set_srv_target() for pendconns
- DOC: list missing global QUIC settings
2025/07/26 : 3.3-dev4
- CLEANUP: server: do not check for duplicates anymore in findserver()
- REORG: server: move findserver() from proxy.c to server.c
- MINOR: server: use the tree to look up the server name in findserver()
- CLEANUP: server: rename server_find_by_name() to server_find()
- CLEANUP: server: rename findserver() to server_find_by_name()
- CLEANUP: server: use server_find_by_name() where relevant
- CLEANUP: cfgparse: lookup proxy ID using existing functions
- CLEANUP: stream: lookup server ID using standard functions
- CLEANUP: server: simplify server_find_by_id()
- CLEANUP: server: add server_find_by_addr()
- CLEANUP: stream: use server_find_by_addr() in sticking_rule_find_target()
- CLEANUP: server: be sure never to compare src against a non-existing defsrv
- MEDIUM: proxy: take the defsrv out of the struct proxy
- MINOR: proxy: add checks for defsrv's validity
- MEDIUM: proxy: no longer allocate the default-server entry by default
- MEDIUM: proxy: register a post-section cleanup function
- MINOR: debug: report haproxy and operating system info in panic dumps
- BUG/MEDIUM: h3: do not overwrite interim with final response
- BUG/MINOR: h3: properly realloc buffer after interim response encoding
- BUG/MINOR: h3: ensure that invalid status code are not encoded (FE side)
- MINOR: qmux: change API for snd_buf FIN transmission
- BUG/MEDIUM: h3: handle interim response properly on FE side
- BUG/MINOR: h3: properly handle interim response on BE side
- BUG/MINOR: quic: Wrong source address use on FreeBSD
- MINOR: h3: remove unused outbuf in h3_resp_headers_send()
- BUG/MINOR: applet: Don't trigger BUG_ON if the tid is not on appctx init
- DEV: gdb: add a memprofile decoder to the debug tools
- MINOR: quic: Get rid of qc_is_listener()
- DOC: connection: explain the rules for idle/safe/avail connections
- BUG/MEDIUM: quic-be: CC buffer released from wrong pool
- BUG/MINOR: halog: exit with error when some output filters are set simultaneosly
- MINOR: cpu-topo: split cpu_dump_topology() to show its summary in show dev
- MINOR: cpu-topo: write thread-cpu bindings into trash buffer
- MINOR: debug: align output style of debug_parse_cli_show_dev with cpu_dump_topology
- MINOR: debug: add thread-cpu bindings info in 'show dev' output
- MINOR: quic: Remove pool_head_quic_be_cc_buf pool
- BUILD: debug: add missed guard USE_CPU_AFFINITY to show cpu bindings
- BUG/MEDIUM: threads: Disable the workaround to load libgcc_s on macOS
- BUG/MINOR: logs: fix log-steps extra log origins selection
- BUG/MINOR: hq-interop: fix FIN transmission
- MINOR: ssl: Add ciphers in ssl traces
- MINOR: ssl: Add curve id to curve name table and mapping functions
- MINOR: ssl: Add curves in ssl traces
- MINOR: ssl: Dump ciphers and sigalgs details in trace with 'advanced' verbosity
- MINOR: ssl: Remove ClientHello specific traces if !HAVE_SSL_CLIENT_HELLO_CB
- MINOR: h3: use smallbuf for request header emission
- MINOR: h3: add traces to h3_req_headers_send()
- BUG/MINOR: h3: fix uninitialized value in h3_req_headers_send()
- MINOR: log: explicitly ignore "log-steps" on backends
- BUG/MEDIUM: acme: use POST-as-GET instead of GET for resources
- BUG/MINOR mux-quic: apply correctly timeout on output pending data
- BUG/MINOR: mux-quic: ensure close-spread-time is properly applied
- MINOR: mux-quic: refactor timeout code
- MINOR: mux-quic: correctly implement backend timeout
- MINOR: mux-quic: disable glitch on backend side
- MINOR: mux-quic: store session in QCS instance
- MEDIUM: mux-quic: implement be connection reuse
- MINOR: mux-quic: do not reuse connection if app already shut
- MEDIUM: mux-quic: support backend private connection
- MINOR: acme: remove acme_req_auth() and use acme_post_as_get() instead
- BUG/MINOR: acme: allow "processing" in challenge requests
- CLEANUP: acme: fix wrong spelling of "resources"
- CLEANUP: ssl: Use only NIDs in curve name to id table
- MINOR: acme: add ACME to the haproxy -vv feature list
- BUG/MINOR: hlua: Skip headers when a receive is performed on an HTTP applet
- BUG/MEDIUM: applet: State inbuf is no longer full if input data are skipped
- BUG/MEDIUM: stconn: Fix conditions to know an applet can get data from stream
- BUG/MINOR: applet: Fix applet_getword() to not return one extra byte
- BUG/MEDIUM: Remove sync sends from streams to applets
- MINOR: applet: Add HTX versions for applet_input_data() and applet_output_room()
- MINOR: applet: Improve applet API to take care of inbuf/outbuf alloc failures
- MEDIUM: hlua: Update the tcp applet to use its own buffers
- MINOR: hlua: Fill the request array on the first HTTP applet run
- MINOR: hlua: Use the buffer instead of the HTTP message to get HTTP headers
- MEDIUM: hlua: Update the http applet to use its own buffers
- BUG/MEDIUM: hlua: Report to SC when data were consumed on a lua socket
- BUG/MEDIUM: hlua: Report to SC when output data are blocked on a lua socket
- MEDIUM: hlua: Update the socket applet to use its own buffers
- BUG/MEDIUM: dns: Reset reconnect tempo when connection is finally established
- MEDIUM: dns: Update the dns_session applet to use its own buffers
- CLEANUP: http-client: Remove useless indentation when sending request body
- MINOR: http-client: Try to send request body with headers if possible
- MINOR: http-client: Trigger an error if first response block isn't a start-line
- BUG/MINOR: httpclient-cli: Don't try to dump raw headers in HTX mode
- MINOR: httpclient-cli: Reset httpclient HTX buffer instead of removing blocks
- MEDIUM: http-client: Update the http-client applet to use its own buffers
- MEDIUM: log: Update the log applet to use its own buffers
- MEDIUM: sink: Update the sink applets to use their own buffers
- MEDIUM: peers: Update the peer applet to use its own buffers
- MEDIUM: promex: Update the promex applet to use their own buffers
- MINOR: applet: Add support for flags on applets with a flag about the new API
- MEDIUM: applet: Emit a warning when a legacy applet is spawned
- BUG/MEDIUM: logs: fix sess_build_logline_orig() recursion with options
- MEDIUM: stats: avoid 1 indirection by storing the shared stats directly in counters struct
- CLEANUP: compiler: prefer char * over void * for pointer arithmetic
- CLEANUP: include: replace hand-rolled offsetof to avoid UB
- CLEANUP: peers: remove unused peer_session_target()
- OPTIM: stats: store fast sharded counters pointers at session and stream level
2025/07/11 : 3.3-dev3
- BUG/MINOR: quic-be: Wrong retry_source_connection_id check
- MEDIUM: sink: change the sink mode type to PR_MODE_SYSLOG
- MEDIUM: server: move _srv_check_proxy_mode() checks from server init to finalize
- MINOR: server: move send-proxy* incompatibility check in _srv_check_proxy_mode()
- MINOR: mailers: warn if mailers are configured but not actually used
- BUG/MEDIUM: counters/server: fix server and proxy last_change mixup
- MEDIUM: server: add and use a separate last_change variable for internal use
- MEDIUM: proxy: add and use a separate last_change variable for internal use
- MINOR: counters: rename last_change counter to last_state_change
- MINOR: ssl: check TLS1.3 ciphersuites again in clienthello with recent AWS-LC
- BUG/MEDIUM: hlua: Forbid any L6/L7 sample fetche functions from lua services
- BUG/MEDIUM: mux-h2: Properly handle connection error during preface sending
- BUG/MINOR: jwt: Copy input and parameters in dedicated buffers in jwt_verify converter
- DOC: Fix 'jwt_verify' converter doc
- MINOR: jwt: Rename pkey to pubkey in jwt_cert_tree_entry struct
- MINOR: jwt: Remove unused parameter in convert_ecdsa_sig
- MAJOR: jwt: Allow certificate instead of public key in jwt_verify converter
- MINOR: ssl: Allow 'commit ssl cert' with no privkey
- MINOR: ssl: Prevent delete on certificate used by jwt_verify
- REGTESTS: jwt: Add test with actual certificate passed to jwt_verify
- REGTESTS: jwt: Test update of certificate used in jwt_verify
- DOC: 'jwt_verify' converter now supports certificates
- REGTESTS: restrict execution to a single thread group
- MINOR: ssl: Introduce new smp_client_hello_parse() function
- MEDIUM: stats: add persistent state to typed output format
- BUG/MINOR: httpclient: wrongly named httpproxy flag
- MINOR: ssl/ocsp: stop using the flags from the httpclient CLI
- MEDIUM: httpclient: split the CLI from the actual httpclient API
- MEDIUM: httpclient: implement a way to use directly htx data
- MINOR: httpclient/cli: add --htx option
- BUILD: dev/phash: remove the accidentally committed a.out file
- BUG/MINOR: ssl: crash in ssl_sock_io_cb() with SSL traces and idle connections
- BUILD/MEDIUM: deviceatlas: fix when installed in custom locations.
- DOC: deviceatlas build clarifications
- BUG/MINOR: ssl/ocsp: fix definition discrepancies with ocsp_update_init()
- MINOR: proto-tcp: Add support for TCP MD5 signature for listeners and servers
- BUILD: cfgparse-tcp: Add _GNU_SOURCE for TCP_MD5SIG_MAXKEYLEN
- BUG/MINOR: proto-tcp: Take care to initialized tcp_md5sig structure
- BUG/MINOR: http-act: Fix parsing of the expression argument for pause action
- MEDIUM: httpclient: add a Content-Length when the payload is known
- CLEANUP: ssl: Rename ssl_trace-t.h to ssl_trace.h
- MINOR: pattern: add a counter of added/freed patterns
- CI: set DEBUG_STRICT=2 for coverity scan
- CI: enable USE_QUIC=1 for OpenSSL versions >= 3.5.0
- CI: github: add an OpenSSL 3.5.0 job
- CI: github: update the stable CI to ubuntu-24.04
- BUG/MEDIUM: quic: SSL/TCP handshake failures with OpenSSL 3.5
- CI: github: update to OpenSSL 3.5.1
- BUG/MINOR: quic: Missing TLS 1.3 QUIC cipher suites and groups inits (OpenSSL 3.5 QUIC API)
- BUG/MINOR: quic-be: Malformed coalesced Initial packets
- MINOR: quic: Prevent QUIC backend use with the OpenSSL QUIC compatibility module (USE_OPENSS_COMPAT)
- MINOR: reg-tests: first QUIC+H3 reg tests (QUIC address validation)
- MINOR: quic-be: Set the backend alpn if not set by conf
- MINOR: quic-be: TLS version restriction to 1.3
- MINOR: cfgparse: enforce QUIC MUX compat on server line
- MINOR: server: support QUIC for dynamic servers
- CI: github: skip a ssl library version when latest is already in the list
- MEDIUM: resolvers: switch dns-accept-family to "auto" by default
- BUG/MINOR: resolvers: don't lower the case of binary DNS format
- MINOR: resolvers: do not duplicate the hostname_dn field
- MINOR: proto-tcp: Register a feature to report TCP MD5 signature support
- BUG/MINOR: listener: really assign distinct IDs to shards
- MINOR: quic: Prevent QUIC build with OpenSSL 3.5 new QUIC API version < 3.5.1
- BUG/MEDIUM: quic: Crash after QUIC server callbacks restoration (OpenSSL 3.5)
- REGTESTS: use two haproxy instances to distinguish the QUIC traces
- BUG/MEDIUM: http-client: Don't wake http-client applet if nothing was xferred
- BUG/MEDIUM: http-client: Properly inc input data when HTX blocks are xferred
- BUG/MEDIUM: http-client: Ask for more room when request data cannot be xferred
- BUG/MEDIUM: http-client: Test HTX_FL_EOM flag before commiting the HTX buffer
- BUG/MINOR: http-client: Ignore 1XX interim responses in non-HTX mode
- BUG/MINOR: http-client: Reject any 101-switching-protocols response
- BUG/MEDIUM: http-client: Drain the request if an early response is received
- BUG/MEDIUM: http-client: Notify applet has more data to deliver until the EOM
- BUG/MINOR: h3: fix https scheme request encoding for BE side
- MINOR: h1-htx: Add function to format an HTX message in its H1 representation
- BUG/MINOR: mux-h1: Use configured error files if possible for early H1 errors
- BUG/MINOR: h1-htx: Don't forget to init flags in h1_format_htx_msg function
- CLEANUP: assorted typo fixes in the code, commits and doc
- BUILD: adjust scripts/build-ssl.sh to modern CMake system of QuicTLS
- MINOR: debug: add distro name and version in postmortem
2025/06/26 : 3.3-dev2
- BUG/MINOR: config/server: reject QUIC addresses
- MINOR: server: implement helper to identify QUIC servers
- MINOR: server: mark QUIC support as experimental
- MINOR: mux-quic-be: allow QUIC proto on backend side
- MINOR: quic-be: Correct Version Information transp. param encoding
- MINOR: quic-be: Version Information transport parameter check
- MINOR: quic-be: Call ->prepare_srv() callback at parsing time
- MINOR: quic-be: QUIC backend XPRT and transport parameters init during parsing
- MINOR: quic-be: QUIC server xprt already set when preparing their CTXs
- MINOR: quic-be: Add a function for the TLS context allocations
- MINOR: quic-be: Correct the QUIC protocol lookup
- MINOR: quic-be: ssl_sock contexts allocation and misc adaptations
- MINOR: quic-be: SSL sessions initializations
- MINOR: quic-be: Add a function to initialize the QUIC client transport parameters
- MINOR: sock: Add protocol and socket types parameters to sock_create_server_socket()
- MINOR: quic-be: ->connect() protocol callback adaptations
- MINOR: quic-be: QUIC connection allocation adaptation (qc_new_conn())
- MINOR: quic-be: xprt ->init() adapatations
- MINOR: quic-be: add field for max_udp_payload_size into quic_conn
- MINOR: quic-be: Do not redispatch the datagrams
- MINOR: quic-be: Datagrams and packet parsing support
- MINOR: quic-be: Handshake packet number space discarding
- MINOR: h3-be: Correctly retrieve h3 counters
- MINOR: quic-be: Store asap the DCID
- MINOR: quic-be: Build post handshake frames
- MINOR: quic-be: Add the conn object to the server SSL context
- MINOR: quic-be: Initial packet number space discarding.
- MINOR: quic-be: I/O handler switch adaptation
- MINOR: quic-be: Store the remote transport parameters asap
- MINOR: quic-be: Missing callbacks initializations (USE_QUIC_OPENSSL_COMPAT)
- MINOR: quic-be: Make the secret derivation works for QUIC backends (USE_QUIC_OPENSSL_COMPAT)
- MINOR: quic-be: SSL_get_peer_quic_transport_params() not defined by OpenSSL 3.5 QUIC API
- MINOR: quic-be: get rid of ->li quic_conn member
- MINOR: quic-be: Prevent the MUX to send/receive data
- MINOR: quic: define proper proto on QUIC servers
- MEDIUM: quic-be: initialize MUX on handshake completion
- BUG/MINOR: hlua: Don't forget the return statement after a hlua_yieldk()
- BUILD: hlua: Fix warnings about uninitialized variables
- BUILD: listener: fix 'for' loop inline variable declaration
- BUILD: hlua: Fix warnings about uninitialized variables (2)
- BUG/MEDIUM: mux-quic: adjust wakeup behavior
- MEDIUM: backend: delay MUX init with ALPN even if proto is forced
- MINOR: quic: mark ctrl layer as ready on quic_connect_server()
- MINOR: mux-quic: improve documentation for snd/rcv app-ops
- MINOR: mux-quic: define flag for backend side
- MINOR: mux-quic: set expect data only on frontend side
- MINOR: mux-quic: instantiate first stream on backend side
- MINOR: quic: wakeup backend MUX on handshake completed
- MINOR: hq-interop: decode response into HTX for backend side support
- MINOR: hq-interop: encode request from HTX for backend side support
- CLEANUP: quic-be: Add comments about qc_new_conn() usage
- BUG/MINOR: quic-be: CID double free upon qc_new_conn() failures
- MINOR: quic-be: Avoid SSL context unreachable code without USE_QUIC_OPENSSL_COMPAT
- BUG/MINOR: quic: prevent crash on startup with -dt
- MINOR: server: reject QUIC servers without explicit SSL
- BUG/MINOR: quic: work around NEW_TOKEN parsing error on backend side
- BUG/MINOR: http-ana: Properly handle keep-query redirect option if no QS
- BUG/MINOR: quic: don't restrict reception on backend privileged ports
- MINOR: hq-interop: handle HTX response forward if not enough space
- BUG/MINOR: quic: Fix OSSL_FUNC_SSL_QUIC_TLS_got_transport_params_fn callback (OpenSSL3.5)
- BUG/MINOR: quic: fix ODCID initialization on frontend side
- BUG/MEDIUM: cli: Don't consume data if outbuf is full or not available
- MINOR: cli: handle EOS/ERROR first
- BUG/MEDIUM: check: Set SOCKERR by default when a connection error is reported
- BUG/MINOR: mux-quic: check sc_attach_mux return value
- MINOR: h3: support basic HTX start-line conversion into HTTP/3 request
- MINOR: h3: encode request headers
- MINOR: h3: complete HTTP/3 request method encoding
- MINOR: h3: complete HTTP/3 request scheme encoding
- MINOR: h3: adjust path request encoding
- MINOR: h3: adjust auth request encoding or fallback to host
- MINOR: h3: prepare support for response parsing
- MINOR: h3: convert HTTP/3 response into HTX for backend side support
- MINOR: h3: complete response status transcoding
- MINOR: h3: transcode H3 response headers into HTX blocks
- MINOR: h3: use BUG_ON() on missing request start-line
- MINOR: h3: reject invalid :status in response
- DOC: config: prefer-last-server: add notes for non-deterministic algorithms
- CLEANUP: connection: remove unused mux-ops dedicated to QUIC
- BUG/MINOR: mux-quic/h3: properly handle too low peer fctl initial stream
- MINOR: mux-quic: support max bidi streams value set by the peer
- MINOR: mux-quic: abort conn if cannot create stream due to fctl
- MEDIUM: mux-quic: implement attach for new streams on backend side
- BUG/MAJOR: fwlc: Count an avoided server as unusable.
- MINOR: fwlc: Factorize code.
- BUG/MEDIUM: quic: do not release BE quic-conn prior to upper conn
- MAJOR: cfgparse: turn the same proxy name warning to an error
- MAJOR: cfgparse: make sure server names are unique within a backend
- BUG/MINOR: tools: only reset argument start upon new argument
- BUG/MINOR: stream: Avoid recursive evaluation for unique-id based on itself
- BUG/MINOR: log: Be able to use %ID alias at anytime of the stream's evaluation
- MINOR: hlua: emit a log instead of an alert for aborted actions due to unavailable yield
- MAJOR: mailers: remove native mailers support
- BUG/MEDIUM: ssl/clienthello: ECDSA with ssl-max-ver TLSv1.2 and no ECDSA ciphers
- DOC: configuration: add details on prefer-client-ciphers
- MINOR: ssl: Add "renegotiate" server option
- DOC: remove the program section from the documentation
- MAJOR: mworker: remove program section support
- BUG/MINOR: quic: wrong QUIC_FT_CONNECTION_CLOSE(0x1c) frame encoding
- MINOR: quic-be: add a "CC connection" backend TX buffer pool
- MINOR: quic: Useless TX buffer size reduction in closing state
- MINOR: quic-be: Allow sending 1200 bytes Initial datagrams
- MINOR: quic-be: address validation support implementation (RETRY)
- MEDIUM: proxy: deprecate the "transparent" and "option transparent" directives
- REGTESTS: update http_reuse_be_transparent with "transparent" deprecated
- REGTESTS: script: also add a line pointing to the log file
- DOC: config: explain how to deal with "transparent" deprecation
- MEDIUM: proxy: mark the "dispatch" directive as deprecated
- DOC: config: crt-list clarify default cert + cert-bundle
- MEDIUM: cpu-topo: switch to the "performance" cpu-policy by default
- SCRIPTS: drop the HTML generation from announce-release
- BUG/MINOR: tools: use my_unsetenv instead of unsetenv
- CLEANUP: startup: move comment about nbthread where it's more appropriate
- BUILD: qpack: fix a build issue on older compilers
2025/06/11 : 3.3-dev1
- BUILD: tools: properly define ha_dump_backtrace() to avoid a build warning
- DOC: config: Fix a typo in 2.7 (Name format for maps and ACLs)
- REGTESTS: Do not use REQUIRE_VERSION for HAProxy 2.5+ (5)
- REGTESTS: Remove REQUIRE_VERSION=2.3 from all tests
- REGTESTS: Remove REQUIRE_VERSION=2.4 from all tests
- REGTESTS: Remove tests with REQUIRE_VERSION_BELOW=2.4
- REGTESTS: Remove support for REQUIRE_VERSION and REQUIRE_VERSION_BELOW
- MINOR: server: group postinit server tasks under _srv_postparse()
- MINOR: stats: add stat_col flags
- MINOR: stats: add ME_NEW_COMMON() helper
- MINOR: proxy: collect per-capability stat in proxy_cond_disable()
- MINOR: proxy: add a true list containing all proxies
- MINOR: log: only run postcheck_log_backend() checks on backend
- MEDIUM: proxy: use global proxy list for REGISTER_POST_PROXY_CHECK() hook
- MEDIUM: server: automatically add server to proxy list in new_server()
- MEDIUM: server: add and use srv_init() function
- BUG/MAJOR: leastconn: Protect tree_elt with the lbprm lock
- BUG/MEDIUM: check: Requeue healthchecks on I/O events to handle check timeout
- CLEANUP: applet: Update comment for applet_put* functions
- DEBUG: check: Add the healthcheck's expiration date in the trace messags
- BUG/MINOR: mux-spop: Fix null-pointer deref on SPOP stream allocation failure
- CLEANUP: sink: remove useless cleanup in sink_new_from_logger()
- MAJOR: counters: add shared counters base infrastructure
- MINOR: counters: add shared counters helpers to get and drop shared pointers
- MINOR: counters: add common struct and flags to {fe,be}_counters_shared
- MEDIUM: counters: manage shared counters using dedicated helpers
- CLEANUP: counters: merge some common counters between {fe,be}_counters_shared
- MINOR: counters: add local-only internal rates to compute some maxes
- MAJOR: counters: dispatch counters over thread groups
- BUG/MEDIUM: cli: Properly parse empty lines and avoid crashed
- BUG/MINOR: config: emit warning for empty args only in discovery mode
- BUG/MINOR: config: fix arg number reported on empty arg warning
- BUG/MINOR: quic: Missing SSL session object freeing
- MINOR: applet: Add API functions to manipulate input and output buffers
- MINOR: applet: Add API functions to get data from the input buffer
- CLEANUP: applet: Simplify a bit comments for applet_put* functions
- MEDIUM: hlua: Update TCP applet functions to use the new applet API
- BUG/MEDIUM: fd: Use the provided tgid in fd_insert() to get tgroup_info
- BUG/MINIR: h1: Fix doc of 'accept-unsafe-...-request' about URI parsing
2025/05/28 : 3.3-dev0
- MINOR: version: mention that it's development again
2025/05/28 : 3.2.0
- MINOR: promex: Add agent check status/code/duration metrics
- MINOR: ssl: support strict-sni in ssl-default-bind-options
- MINOR: ssl: also provide the "tls-tickets" bind option
- MINOR: server: define CLI I/O handler for "add server"
- MINOR: server: implement "add server help"
- MINOR: server: use stress mode for "add server help"
- BUG/MEDIUM: server: fix crash after duplicate GUID insertion
- BUG/MEDIUM: server: fix potential null-deref after previous fix
- MINOR: config: list recently added sections with -dKcfg
- BUG/MAJOR: cache: Crash because of wrong cache entry deleted
- DOC: configuration: fix the example in crt-store
- DOC: config: clarify the wording around single/double quotes
- DOC: config: clarify the legacy cookie and header captures
- DOC: config: fix alphabetical ordering of layer 7 sample fetch functions
- DOC: config: fix alphabetical ordering of layer 6 sample fetch functions
- DOC: config: fix alphabetical ordering of layer 5 sample fetch functions
- DOC: config: fix alphabetical ordering of layer 4 sample fetch functions
- DOC: config: fix alphabetical ordering of internal sample fetch functions
- BUG/MINOR: h3: Set HTX flags corresponding to the scheme found in the request
- BUG/MEDIUM: h3: Declare absolute URI as normalized when a :authority is found
- DOC: config: mention in bytes_in and bytes_out that they're read on input
- DOC: config: clarify the basics of ACLs (call point, multi-valued etc)
- REGTESTS: Make the script testing conditional set-var compatible with Vtest2
- REGTESTS: Explicitly allow failing shell commands in some scripts
- MINOR: listeners: Add support for a label on bind line
- BUG/MEDIUM: cli/ring: Properly handle shutdown in "show event" I/O handler
- BUG/MEDIUM: hlua: Properly detect shudowns for TCP applets based on the new API
- BUG/MEDIUM: hlua: Fix getline() for TCP applets to work with applet's buffers
- BUG/MEDIUM: hlua: Fix receive API for TCP applets to properly handle shutdowns
- CI: vtest: Rely on VTest2 to run regression tests
- CI: vtest: Fix the build script to properly work on MaOS
- CI: combine AWS-LC and AWS-LC-FIPS by template
- BUG/MEDIUM: httpclient: Throw an error if an lua httpclient instance is reused
- DOC: hlua: Add a note to warn user about httpclient object reuse
- DOC: hlua: fix a few typos in HTTPMessage.set_body_len() documentation
- DEV: patchbot: prepare for new version 3.3-dev
- MINOR: version: mention that it's 3.2 LTS now.
2025/05/21 : 3.2-dev17
- DOC: configuration: explicit multi-choice on bind shards option
- BUG/MINOR: sink: detect and warn when using "send-proxy" options with ring servers
- BUG/MEDIUM: peers: also limit the number of incoming updates
- MEDIUM: hlua: Add function to change the body length of an HTTP Message
- BUG/MEDIUM: stconn: Disable 0-copy forwarding for filters altering the payload
- BUG/MINOR: h3: don't insert more than one Host header
- BUG/MEDIUM: h1/h2/h3: reject forbidden chars in the Host header field
- DOC: config: properly index "table and "stick-table" in their section
- DOC: management: change reference to configuration manual
- BUILD: debug: mark ha_crash_now() as attribute(noreturn)
- IMPORT: slz: avoid multiple shifts on 64-bits
- IMPORT: slz: support crc32c for lookup hash on sse4 but only if requested
- IMPORT: slz: use a better hash for machines with a fast multiply
- IMPORT: slz: fix header used for empty zlib message
- IMPORT: slz: silence a build warning on non-x86 non-arm
- BUG/MAJOR: leastconn: do not loop forever when facing saturated servers
- BUG/MAJOR: queue: properly keep count of the queue length
- BUG/MINOR: quic: fix crash on quic_conn alloc failure
- BUG/MAJOR: leastconn: never reuse the node after dropping the lock
- MINOR: acme: renewal notification over the dpapi sink
- CLEANUP: quic: Useless BIO_METHOD initialization
- MINOR: quic: Add useful error traces about qc_ssl_sess_init() failures
- MINOR: quic: Allow the use of the new OpenSSL 3.5.0 QUIC TLS API (to be completed)
- MINOR: quic: implement all remaining callbacks for OpenSSL 3.5 QUIC API
- MINOR: quic: OpenSSL 3.5 internal QUIC custom extension for transport parameters reset
- MINOR: quic: OpenSSL 3.5 trick to support 0-RTT
- DOC: update INSTALL for QUIC with OpenSSL 3.5 usages
- DOC: management: update 'acme status'
- BUG/MEDIUM: wdt: always ignore the first watchdog wakeup
- CLEANUP: wdt: clarify the comments on the common exit path
- BUILD: ssl: avoid possible printf format warning in traces
- BUILD: acme: fix build issue on 32-bit archs with 64-bit time_t
- DOC: management: precise some of the fields of "show servers conn"
- BUG/MEDIUM: mux-quic: fix BUG_ON() on rxbuf alloc error
- DOC: watchdog: update the doc to reflect the recent changes
- BUG/MEDIUM: acme: check if acme domains are configured
- BUG/MINOR: acme: fix formatting issue in error and logs
- EXAMPLES: lua: avoid screen refresh effect in "trisdemo"
- CLEANUP: quic: remove unused cbuf module
- MINOR: quic: move function to check stream type in utils
- MINOR: quic: refactor handling of streams after MUX release
- MINOR: quic: add some missing includes
- MINOR: quic: adjust quic_conn-t.h include list
- CLEANUP: cfgparse: alphabetically sort the global keywords
- MINOR: glitches: add global setting "tune.glitches.kill.cpu-usage"
2025/05/14 : 3.2-dev16
- BUG/MEDIUM: mux-quic: fix crash on invalid fctl frame dereference
- DEBUG: pool: permit per-pool UAF configuration
- MINOR: acme: add the global option 'acme.scheduler'
- DEBUG: pools: add a new integrity mode "backup" to copy the released area
- MEDIUM: sock-inet: re-check IPv6 connectivity every 30s
- BUG/MINOR: ssl: doesn't fill conf->crt with first arg
- BUG/MINOR: ssl: prevent multiple 'crt' on the same ssl-f-use line
- BUG/MINOR: ssl/ckch: always free() the previous entry during parsing
- MINOR: tools: ha_freearray() frees an array of string
- BUG/MINOR: ssl/ckch: always ha_freearray() the previous entry during parsing
- MINOR: ssl/ckch: warn when the same keyword was used twice
- BUG/MINOR: threads: fix soft-stop without multithreading support
- BUG/MINOR: tools: improve parse_line()'s robustness against empty args
- BUG/MINOR: cfgparse: improve the empty arg position report's robustness
- BUG/MINOR: server: dont depend on proxy for server cleanup in srv_drop()
- BUG/MINOR: server: perform lbprm deinit for dynamic servers
- MINOR: http: add a function to validate characters of :authority
- BUG/MEDIUM: h2/h3: reject some forbidden chars in :authority before reassembly
- MINOR: quic: account Tx data per stream
- MINOR: mux-quic: account Rx data per stream
- MINOR: quic: add stream format for "show quic"
- MINOR: quic: display QCS info on "show quic stream"
- MINOR: quic: display stream age
- BUG/MINOR: cpu-topo: fix group-by-cluster policy for disordered clusters
- MINOR: cpu-topo: add a new "group-by-ccx" CPU policy
- MINOR: cpu-topo: provide a function to sort clusters by average capacity
- MEDIUM: cpu-topo: change "performance" to consider per-core capacity
- MEDIUM: cpu-topo: change "efficiency" to consider per-core capacity
- MEDIUM: cpu-topo: prefer grouping by CCX for "performance" and "efficiency"
- MEDIUM: config: change default limits to 1024 threads and 32 groups
- BUG/MINOR: hlua: Fix Channel:data() and Channel:line() to respect documentation
- DOC: config: Fix a typo in the "term_events" definition
- BUG/MINOR: spoe: Don't report error on applet release if filter is in DONE state
- BUG/MINOR: mux-spop: Don't report error for stream if ACK was already received
- BUG/MINOR: mux-spop: Make the demux stream ID a signed integer
- BUG/MINOR: mux-spop: Don't open new streams for SPOP connection on error
- MINOR: mux-spop: Don't set SPOP connection state to FRAME_H after ACK parsing
- BUG/MEDIUM: mux-spop: Remove frame parsing states from the SPOP connection state
- BUG/MEDIUM: mux-spop: Properly handle CLOSING state
- BUG/MEDIUM: spop-conn: Report short read for partial frames payload
- BUG/MEDIUM: mux-spop: Properly detect truncated frames on demux to report error
- BUG/MEDIUM: mux-spop; Don't report a read error if there are pending data
- DEBUG: mux-spop: Review some trace messages to adjust the message or the level
- DOC: config: move address formats definition to section 2
- DOC: config: move stick-tables and peers to their own section
- DOC: config: move the extraneous sections out of the "global" definition
- CI: AWS-LC(fips): enable unit tests
- CI: AWS-LC: enable unit tests
- CI: compliance: limit run on forks only to manual + cleanup
- CI: musl: enable unit tests
- CI: QuicTLS (weekly): limit run on forks only to manual dispatch
- CI: WolfSSL: enable unit tests
2025/05/09 : 3.2-dev15
- BUG/MEDIUM: stktable: fix sc_*(<ctr>) BUG_ON() regression with ctx > 9
- BUG/MINOR: acme/cli: don't output error on success

33
INSTALL
View File

@ -237,7 +237,7 @@ to forcefully enable it using "USE_LIBCRYPT=1".
-----------------
For SSL/TLS, it is necessary to use a cryptography library. HAProxy currently
supports the OpenSSL library, and is known to build and work with branches
1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, and 3.0 to 3.5. It is recommended to use
1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, and 3.0 to 3.4. It is recommended to use
at least OpenSSL 1.1.1 to have support for all SSL keywords and configuration
in HAProxy. OpenSSL follows a long-term support cycle similar to HAProxy's,
and each of the branches above receives its own fixes, without forcing you to
@ -259,10 +259,10 @@ reported to work as well. While there are some efforts from the community to
ensure they work well, OpenSSL remains the primary target and this means that
in case of conflicting choices, OpenSSL support will be favored over other
options. Note that QUIC is not fully supported when haproxy is built with
OpenSSL < 3.5 version. In this case, QUICTLS is the preferred alternative.
As of writing this, the QuicTLS project follows OpenSSL very closely and provides
update simultaneously, but being a volunteer-driven project, its long-term future
does not look certain enough to convince operating systems to package it, so it
OpenSSL. In this case, QUICTLS is the preferred alternative. As of writing
this, the QuicTLS project follows OpenSSL very closely and provides update
simultaneously, but being a volunteer-driven project, its long-term future does
not look certain enough to convince operating systems to package it, so it
needs to be build locally. See the section about QUIC in this document.
A fifth option is wolfSSL (https://github.com/wolfSSL/wolfssl). It is the only
@ -500,11 +500,10 @@ QUIC is the new transport layer protocol and is required for HTTP/3. This
protocol stack is currently supported as an experimental feature in haproxy on
the frontend side. In order to enable it, use "USE_QUIC=1 USE_OPENSSL=1".
Note that QUIC is not always fully supported by the OpenSSL library depending on
its version. Indeed QUIC 0-RTT cannot be supported by OpenSSL for versions before
3.5 contrary to others libraries with full QUIC support. The preferred option is
to use QUICTLS. This is a fork of OpenSSL with a QUIC-compatible API. Its
repository is available at this location:
Note that QUIC is not fully supported by the OpenSSL library. Indeed QUIC 0-RTT
cannot be supported by OpenSSL contrary to others libraries with full QUIC
support. The preferred option is to use QUICTLS. This is a fork of OpenSSL with
a QUIC-compatible API. Its repository is available at this location:
https://github.com/quictls/openssl
@ -532,18 +531,14 @@ way assuming that wolfSSL was installed in /opt/wolfssl-5.6.0 as shown in 4.5:
SSL_INC=/opt/wolfssl-5.6.0/include SSL_LIB=/opt/wolfssl-5.6.0/lib
LDFLAGS="-Wl,-rpath,/opt/wolfssl-5.6.0/lib"
As last resort, haproxy may be compiled against OpenSSL as follows from 3.5
version with 0-RTT support:
$ make TARGET=generic USE_OPENSSL=1 USE_QUIC=1
or as follows for all OpenSSL versions but without O-RTT support:
As last resort, haproxy may be compiled against OpenSSL as follows:
$ make TARGET=generic USE_OPENSSL=1 USE_QUIC=1 USE_QUIC_OPENSSL_COMPAT=1
In addition to this requirements, the QUIC listener bindings must be explicitly
enabled with a specific QUIC tuning parameter. (see "limited-quic" global
parameter of haproxy Configuration Manual).
Note that QUIC 0-RTT is not supported by haproxy QUIC stack when built against
OpenSSL. In addition to this compilation requirements, the QUIC listener
bindings must be explicitly enabled with a specific QUIC tuning parameter.
(see "limited-quic" global parameter of haproxy Configuration Manual).
5) How to build HAProxy

View File

@ -660,7 +660,7 @@ OPTIONS_OBJS += src/mux_quic.o src/h3.o src/quic_rx.o src/quic_tx.o \
src/quic_cc_nocc.o src/quic_cc.o src/quic_pacing.o \
src/h3_stats.o src/quic_stats.o src/qpack-enc.o \
src/qpack-tbl.o src/quic_cc_drs.o src/quic_fctl.o \
src/quic_enc.o
src/cbuf.o src/quic_enc.o
endif
ifneq ($(USE_QUIC_OPENSSL_COMPAT:0=),)
@ -982,9 +982,9 @@ OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \
src/cfgcond.o src/proto_udp.o src/lb_fwlc.o src/ebmbtree.o \
src/proto_uxdg.o src/cfgdiag.o src/sock_unix.o src/sha1.o \
src/lb_fas.o src/clock.o src/sock_inet.o src/ev_select.o \
src/lb_map.o src/shctx.o src/hpack-dec.o \
src/lb_map.o src/shctx.o src/mworker-prog.o src/hpack-dec.o \
src/arg.o src/signal.o src/fix.o src/dynbuf.o src/guid.o \
src/cfgparse-tcp.o src/lb_ss.o src/chunk.o src/counters.o \
src/cfgparse-tcp.o src/lb_ss.o src/chunk.o \
src/cfgparse-unix.o src/regex.o src/fcgi.o src/uri_auth.o \
src/eb64tree.o src/eb32tree.o src/eb32sctree.o src/lru.o \
src/limits.o src/ebimtree.o src/wdt.o src/hpack-tbl.o \
@ -992,7 +992,7 @@ OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \
src/ebsttree.o src/freq_ctr.o src/systemd.o src/init.o \
src/http_acl.o src/dict.o src/dgram.o src/pipe.o \
src/hpack-huff.o src/hpack-enc.o src/ebtree.o src/hash.o \
src/httpclient_cli.o src/version.o
src/version.o
ifneq ($(TRACE),)
OBJS += src/calltrace.o

View File

@ -1,2 +1,2 @@
$Format:%ci$
2025/08/06
2025/05/09

View File

@ -1 +1 @@
3.3-dev6
3.2-dev15

View File

@ -5,8 +5,7 @@ CXX := c++
CXXLIB := -lstdc++
ifeq ($(DEVICEATLAS_SRC),)
OPTIONS_CFLAGS += -I$(DEVICEATLAS_INC)
OPTIONS_LDFLAGS += -Wl,-rpath,$(DEVICEATLAS_LIB) -L$(DEVICEATLAS_LIB) -lda
OPTIONS_LDFLAGS += -lda
else
DEVICEATLAS_INC = $(DEVICEATLAS_SRC)
DEVICEATLAS_LIB = $(DEVICEATLAS_SRC)

View File

@ -389,9 +389,6 @@ listed below. Metrics from extra counters are not listed.
| haproxy_server_max_connect_time_seconds |
| haproxy_server_max_response_time_seconds |
| haproxy_server_max_total_time_seconds |
| haproxy_server_agent_status |
| haproxy_server_agent_code |
| haproxy_server_agent_duration_seconds |
| haproxy_server_internal_errors_total |
| haproxy_server_unsafe_idle_connections_current |
| haproxy_server_safe_idle_connections_current |

View File

@ -32,7 +32,7 @@
/* Prometheus exporter flags (ctx->flags) */
#define PROMEX_FL_METRIC_HDR 0x00000001
#define PROMEX_FL_BODYLESS_RESP 0x00000002
/* unused: 0x00000002 */
/* unused: 0x00000004 */
/* unused: 0x00000008 */
/* unused: 0x00000010 */

View File

@ -173,8 +173,6 @@ const struct ist promex_st_metric_desc[ST_I_PX_MAX] = {
[ST_I_PX_CTIME] = IST("Avg. connect time for last 1024 successful connections."),
[ST_I_PX_RTIME] = IST("Avg. response time for last 1024 successful connections."),
[ST_I_PX_TTIME] = IST("Avg. total time for last 1024 successful connections."),
[ST_I_PX_AGENT_STATUS] = IST("Status of last agent check, per state label value."),
[ST_I_PX_AGENT_DURATION] = IST("Total duration of the latest server agent check, in seconds."),
[ST_I_PX_QT_MAX] = IST("Maximum observed time spent in the queue"),
[ST_I_PX_CT_MAX] = IST("Maximum observed time spent waiting for a connection to complete"),
[ST_I_PX_RT_MAX] = IST("Maximum observed time spent waiting for a server response"),
@ -427,8 +425,9 @@ static int promex_dump_global_metrics(struct appctx *appctx, struct htx *htx)
static struct ist prefix = IST("haproxy_process_");
struct promex_ctx *ctx = appctx->svcctx;
struct field val;
struct channel *chn = sc_ic(appctx_sc(appctx));
struct ist name, desc, out = ist2(trash.area, 0);
size_t max = htx_get_max_blksz(htx, applet_htx_output_room(appctx));
size_t max = htx_get_max_blksz(htx, channel_htx_recv_max(chn, htx));
int ret = 1;
if (!stats_fill_info(stat_line_info, ST_I_INF_MAX, 0))
@ -494,6 +493,7 @@ static int promex_dump_global_metrics(struct appctx *appctx, struct htx *htx)
if (out.len) {
if (!htx_add_data_atonce(htx, out))
return -1; /* Unexpected and unrecoverable error */
channel_add_input(chn, out.len);
}
return ret;
full:
@ -510,8 +510,9 @@ static int promex_dump_front_metrics(struct appctx *appctx, struct htx *htx)
struct proxy *px = ctx->p[0];
struct stats_module *mod = ctx->p[1];
struct field val;
struct channel *chn = sc_ic(appctx_sc(appctx));
struct ist name, desc, out = ist2(trash.area, 0);
size_t max = htx_get_max_blksz(htx, applet_htx_output_room(appctx));
size_t max = htx_get_max_blksz(htx, channel_htx_recv_max(chn, htx));
struct field *stats = stat_lines[STATS_DOMAIN_PROXY];
int ret = 1;
enum promex_front_state state;
@ -691,6 +692,7 @@ static int promex_dump_front_metrics(struct appctx *appctx, struct htx *htx)
if (out.len) {
if (!htx_add_data_atonce(htx, out))
return -1; /* Unexpected and unrecoverable error */
channel_add_input(chn, out.len);
}
/* Save pointers (0=current proxy, 1=current stats module) of the current context */
@ -712,8 +714,9 @@ static int promex_dump_listener_metrics(struct appctx *appctx, struct htx *htx)
struct listener *li = ctx->p[1];
struct stats_module *mod = ctx->p[2];
struct field val;
struct channel *chn = sc_ic(appctx_sc(appctx));
struct ist name, desc, out = ist2(trash.area, 0);
size_t max = htx_get_max_blksz(htx, applet_htx_output_room(appctx));
size_t max = htx_get_max_blksz(htx, channel_htx_recv_max(chn, htx));
struct field *stats = stat_lines[STATS_DOMAIN_PROXY];
int ret = 1;
enum li_status status;
@ -894,6 +897,7 @@ static int promex_dump_listener_metrics(struct appctx *appctx, struct htx *htx)
if (out.len) {
if (!htx_add_data_atonce(htx, out))
return -1; /* Unexpected and unrecoverable error */
channel_add_input(chn, out.len);
}
/* Save pointers (0=current proxy, 1=current listener, 2=current stats module) of the current context */
ctx->p[0] = px;
@ -915,8 +919,9 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
struct stats_module *mod = ctx->p[1];
struct server *sv;
struct field val;
struct channel *chn = sc_ic(appctx_sc(appctx));
struct ist name, desc, out = ist2(trash.area, 0);
size_t max = htx_get_max_blksz(htx, applet_htx_output_room(appctx));
size_t max = htx_get_max_blksz(htx, channel_htx_recv_max(chn, htx));
struct field *stats = stat_lines[STATS_DOMAIN_PROXY];
int ret = 1;
double secs;
@ -1178,6 +1183,7 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
if (out.len) {
if (!htx_add_data_atonce(htx, out))
return -1; /* Unexpected and unrecoverable error */
channel_add_input(chn, out.len);
}
/* Save pointers (0=current proxy, 1=current stats module) of the current context */
ctx->p[0] = px;
@ -1198,8 +1204,9 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
struct server *sv = ctx->p[1];
struct stats_module *mod = ctx->p[2];
struct field val;
struct channel *chn = sc_ic(appctx_sc(appctx));
struct ist name, desc, out = ist2(trash.area, 0);
size_t max = htx_get_max_blksz(htx, applet_htx_output_room(appctx));
size_t max = htx_get_max_blksz(htx, channel_htx_recv_max(chn, htx));
struct field *stats = stat_lines[STATS_DOMAIN_PROXY];
int ret = 1;
double secs;
@ -1335,7 +1342,6 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
secs = (double)sv->check.duration / 1000.0;
val = mkf_flt(FN_DURATION, secs);
break;
case ST_I_PX_REQ_TOT:
if (px->mode != PR_MODE_HTTP) {
sv = NULL;
@ -1358,36 +1364,6 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
labels[lb_idx+1].value = promex_hrsp_code[ctx->field_num - ST_I_PX_HRSP_1XX];
break;
case ST_I_PX_AGENT_STATUS:
if ((sv->agent.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) != CHK_ST_ENABLED)
goto next_sv;
for (; ctx->obj_state < HCHK_STATUS_SIZE; ctx->obj_state++) {
if (get_check_status_result(ctx->obj_state) < CHK_RES_FAILED)
continue;
val = mkf_u32(FO_STATUS, sv->agent.status == ctx->obj_state);
check_state = get_check_status_info(ctx->obj_state);
labels[lb_idx+1].name = ist("state");
labels[lb_idx+1].value = ist(check_state);
if (!promex_dump_ts(appctx, prefix, name, desc,
type,
&val, labels, &out, max))
goto full;
}
ctx->obj_state = 0;
goto next_sv;
case ST_I_PX_AGENT_CODE:
if ((sv->agent.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) != CHK_ST_ENABLED)
goto next_sv;
val = mkf_u32(FN_OUTPUT, (sv->agent.status < HCHK_STATUS_L57DATA) ? 0 : sv->agent.code);
break;
case ST_I_PX_AGENT_DURATION:
if (sv->agent.status < HCHK_STATUS_CHECKED)
goto next_sv;
secs = (double)sv->agent.duration / 1000.0;
val = mkf_flt(FN_DURATION, secs);
break;
default:
break;
}
@ -1498,6 +1474,7 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
if (out.len) {
if (!htx_add_data_atonce(htx, out))
return -1; /* Unexpected and unrecoverable error */
channel_add_input(chn, out.len);
}
/* Decrement server refcount if it was saved through ctx.p[1]. */
@ -1593,8 +1570,9 @@ static int promex_dump_ref_modules_metrics(struct appctx *appctx, struct htx *ht
{
struct promex_ctx *ctx = appctx->svcctx;
struct promex_module_ref *ref = ctx->p[0];
struct channel *chn = sc_ic(appctx_sc(appctx));
struct ist out = ist2(trash.area, 0);
size_t max = htx_get_max_blksz(htx, applet_htx_output_room(appctx));
size_t max = htx_get_max_blksz(htx, channel_htx_recv_max(chn, htx));
int ret = 1;
if (!ref) {
@ -1618,6 +1596,7 @@ static int promex_dump_ref_modules_metrics(struct appctx *appctx, struct htx *ht
if (out.len) {
if (!htx_add_data_atonce(htx, out))
return -1; /* Unexpected and unrecoverable error */
channel_add_input(chn, out.len);
}
ctx->p[0] = ref;
return ret;
@ -1632,8 +1611,9 @@ static int promex_dump_all_modules_metrics(struct appctx *appctx, struct htx *ht
{
struct promex_ctx *ctx = appctx->svcctx;
struct promex_module *mod = ctx->p[0];
struct channel *chn = sc_ic(appctx_sc(appctx));
struct ist out = ist2(trash.area, 0);
size_t max = htx_get_max_blksz(htx, applet_htx_output_room(appctx));
size_t max = htx_get_max_blksz(htx, channel_htx_recv_max(chn, htx));
int ret = 1;
if (!mod) {
@ -1657,6 +1637,7 @@ static int promex_dump_all_modules_metrics(struct appctx *appctx, struct htx *ht
if (out.len) {
if (!htx_add_data_atonce(htx, out))
return -1; /* Unexpected and unrecoverable error */
channel_add_input(chn, out.len);
}
ctx->p[0] = mod;
return ret;
@ -1671,7 +1652,7 @@ static int promex_dump_all_modules_metrics(struct appctx *appctx, struct htx *ht
* Uses <appctx.ctx.stats.px> as a pointer to the current proxy and <sv>/<li>
* as pointers to the current server/listener respectively.
*/
static int promex_dump_metrics(struct appctx *appctx, struct htx *htx)
static int promex_dump_metrics(struct appctx *appctx, struct stconn *sc, struct htx *htx)
{
struct promex_ctx *ctx = appctx->svcctx;
int ret;
@ -1795,7 +1776,7 @@ static int promex_dump_metrics(struct appctx *appctx, struct htx *htx)
return 1;
full:
applet_have_more_data(appctx);
sc_need_room(sc, channel_htx_recv_max(sc_ic(appctx_sc(appctx)), htx) + 1);
return 0;
error:
/* unrecoverable error */
@ -1808,11 +1789,12 @@ static int promex_dump_metrics(struct appctx *appctx, struct htx *htx)
/* Parse the query string of request URI to filter the metrics. It returns 1 on
* success and -1 on error. */
static int promex_parse_uri(struct appctx *appctx)
static int promex_parse_uri(struct appctx *appctx, struct stconn *sc)
{
struct promex_ctx *ctx = appctx->svcctx;
struct buffer *outbuf;
struct htx *req_htx;
struct channel *req = sc_oc(sc);
struct channel *res = sc_ic(sc);
struct htx *req_htx, *res_htx;
struct htx_sl *sl;
char *p, *key, *value;
const char *end;
@ -1822,13 +1804,10 @@ static int promex_parse_uri(struct appctx *appctx)
int len;
/* Get the query-string */
req_htx = htxbuf(DISGUISE(applet_get_inbuf(appctx)));
req_htx = htxbuf(&req->buf);
sl = http_get_stline(req_htx);
if (!sl)
goto bad_req_error;
if (sl->info.req.meth == HTTP_METH_HEAD)
ctx->flags |= PROMEX_FL_BODYLESS_RESP;
goto error;
p = http_find_param_list(HTX_SL_REQ_UPTR(sl), HTX_SL_REQ_ULEN(sl), '?');
if (!p)
goto end;
@ -1861,27 +1840,27 @@ static int promex_parse_uri(struct appctx *appctx)
*p = 0;
len = url_decode(key, 1);
if (len == -1)
goto bad_req_error;
goto error;
/* decode value */
if (value) {
while (p < end && *p != '=' && *p != '&' && *p != '#')
++p;
if (*p == '=')
goto bad_req_error;
goto error;
if (*p == '&')
*(p++) = 0;
else if (*p == '#')
*p = 0;
len = url_decode(value, 1);
if (len == -1)
goto bad_req_error;
goto error;
}
if (strcmp(key, "scope") == 0) {
default_scopes = 0; /* at least a scope defined, unset default scopes */
if (!value)
goto bad_req_error;
goto error;
else if (*value == 0)
ctx->flags &= ~PROMEX_FL_SCOPE_ALL;
else if (*value == '*' && *(value+1) == 0)
@ -1912,14 +1891,14 @@ static int promex_parse_uri(struct appctx *appctx)
}
}
if (!(ctx->flags & PROMEX_FL_SCOPE_MODULE))
goto bad_req_error;
goto error;
}
}
else if (strcmp(key, "metrics") == 0) {
struct ist args;
if (!value)
goto bad_req_error;
goto error;
for (args = ist(value); istlen(args); args = istadv(istfind(args, ','), 1)) {
struct eb32_node *node;
@ -1970,28 +1949,30 @@ static int promex_parse_uri(struct appctx *appctx)
ctx->flags |= (default_scopes | default_metrics_filter);
return 1;
bad_req_error:
error:
err = &http_err_chunks[HTTP_ERR_400];
goto error;
channel_erase(res);
res->buf.data = b_data(err);
memcpy(res->buf.area, b_head(err), b_data(err));
res_htx = htx_from_buf(&res->buf);
channel_add_input(res, res_htx->data);
return -1;
internal_error:
err = &http_err_chunks[HTTP_ERR_500];
goto error;
error:
outbuf = DISGUISE(applet_get_outbuf(appctx));
b_reset(outbuf);
outbuf->data = b_data(err);
memcpy(outbuf->area, b_head(err), b_data(err));
applet_set_eoi(appctx);
applet_set_eos(appctx);
err = &http_err_chunks[HTTP_ERR_400];
channel_erase(res);
res->buf.data = b_data(err);
memcpy(res->buf.area, b_head(err), b_data(err));
res_htx = htx_from_buf(&res->buf);
channel_add_input(res, res_htx->data);
return -1;
}
/* Send HTTP headers of the response. It returns 1 on success and 0 if <htx> is
* full. */
static int promex_send_headers(struct appctx *appctx, struct htx *htx)
static int promex_send_headers(struct appctx *appctx, struct stconn *sc, struct htx *htx)
{
struct channel *chn = sc_ic(sc);
struct htx_sl *sl;
unsigned int flags;
@ -2006,10 +1987,11 @@ static int promex_send_headers(struct appctx *appctx, struct htx *htx)
!htx_add_endof(htx, HTX_BLK_EOH))
goto full;
channel_add_input(chn, htx->data);
return 1;
full:
htx_reset(htx);
applet_have_more_data(appctx);
sc_need_room(sc, 0);
return 0;
}
@ -2063,51 +2045,52 @@ static void promex_appctx_release(struct appctx *appctx)
/* The main I/O handler for the promex applet. */
static void promex_appctx_handle_io(struct appctx *appctx)
{
struct promex_ctx *ctx = appctx->svcctx;
struct buffer *outbuf;
struct htx *res_htx;
struct stconn *sc = appctx_sc(appctx);
struct stream *s = __sc_strm(sc);
struct channel *req = sc_oc(sc);
struct channel *res = sc_ic(sc);
struct htx *req_htx, *res_htx;
int ret;
if (unlikely(applet_fl_test(appctx, APPCTX_FL_EOS|APPCTX_FL_ERROR)))
res_htx = htx_from_buf(&res->buf);
if (unlikely(se_fl_test(appctx->sedesc, (SE_FL_EOS|SE_FL_ERROR|SE_FL_SHR|SE_FL_SHW))))
goto out;
/* Check if the input buffer is available. */
outbuf = applet_get_outbuf(appctx);
if (outbuf == NULL) {
applet_have_more_data(appctx);
if (!b_size(&res->buf)) {
sc_need_room(sc, 0);
goto out;
}
res_htx = htx_from_buf(outbuf);
switch (appctx->st0) {
case PROMEX_ST_INIT:
if (!applet_get_inbuf(appctx) || !applet_htx_input_data(appctx)) {
if (!co_data(req)) {
applet_need_more_data(appctx);
break;
goto out;
}
ret = promex_parse_uri(appctx);
ret = promex_parse_uri(appctx, sc);
if (ret <= 0) {
if (ret == -1)
applet_set_error(appctx);
break;
goto error;
goto out;
}
appctx->st0 = PROMEX_ST_HEAD;
appctx->st1 = PROMEX_DUMPER_INIT;
__fallthrough;
case PROMEX_ST_HEAD:
if (!promex_send_headers(appctx, res_htx))
break;
appctx->st0 = ((ctx->flags & PROMEX_FL_BODYLESS_RESP) ? PROMEX_ST_DONE : PROMEX_ST_DUMP);
if (!promex_send_headers(appctx, sc, res_htx))
goto out;
appctx->st0 = ((s->txn->meth == HTTP_METH_HEAD) ? PROMEX_ST_DONE : PROMEX_ST_DUMP);
__fallthrough;
case PROMEX_ST_DUMP:
ret = promex_dump_metrics(appctx, res_htx);
ret = promex_dump_metrics(appctx, sc, res_htx);
if (ret <= 0) {
if (ret == -1)
applet_set_error(appctx);
break;
goto error;
goto out;
}
appctx->st0 = PROMEX_ST_DONE;
__fallthrough;
@ -2121,36 +2104,41 @@ static void promex_appctx_handle_io(struct appctx *appctx)
*/
if (htx_is_empty(res_htx)) {
if (!htx_add_endof(res_htx, HTX_BLK_EOT)) {
applet_have_more_data(appctx);
break;
sc_need_room(sc, sizeof(struct htx_blk) + 1);
goto out;
}
channel_add_input(res, 1);
}
res_htx->flags |= HTX_FL_EOM;
applet_set_eoi(appctx);
se_fl_set(appctx->sedesc, SE_FL_EOI);
appctx->st0 = PROMEX_ST_END;
__fallthrough;
case PROMEX_ST_END:
applet_set_eos(appctx);
se_fl_set(appctx->sedesc, SE_FL_EOS);
}
htx_to_buf(res_htx, outbuf);
out:
htx_to_buf(res_htx, &res->buf);
/* eat the whole request */
applet_reset_input(appctx);
if (co_data(req)) {
req_htx = htx_from_buf(&req->buf);
co_htx_skip(req, req_htx, co_data(req));
}
return;
error:
se_fl_set(appctx->sedesc, SE_FL_ERROR);
goto out;
}
struct applet promex_applet = {
.obj_type = OBJ_TYPE_APPLET,
.flags = APPLET_FL_NEW_API,
.name = "<PROMEX>", /* used for logging */
.init = promex_appctx_init,
.release = promex_appctx_release,
.fct = promex_appctx_handle_io,
.rcv_buf = appctx_htx_rcv_buf,
.snd_buf = appctx_htx_snd_buf,
};
static enum act_parse_ret service_parse_prometheus_exporter(const char **args, int *cur_arg, struct proxy *px,

View File

@ -123,22 +123,6 @@ struct url_stat {
#define FILT2_PRESERVE_QUERY 0x02
#define FILT2_EXTRACT_CAPTURE 0x04
#define FILT_OUTPUT_FMT (FILT_COUNT_ONLY| \
FILT_COUNT_STATUS| \
FILT_COUNT_SRV_STATUS| \
FILT_COUNT_COOK_CODES| \
FILT_COUNT_TERM_CODES| \
FILT_COUNT_URL_ONLY| \
FILT_COUNT_URL_COUNT| \
FILT_COUNT_URL_ERR| \
FILT_COUNT_URL_TAVG| \
FILT_COUNT_URL_TTOT| \
FILT_COUNT_URL_TAVGO| \
FILT_COUNT_URL_TTOTO| \
FILT_COUNT_URL_BAVG| \
FILT_COUNT_URL_BTOT| \
FILT_COUNT_IP_COUNT)
unsigned int filter = 0;
unsigned int filter2 = 0;
unsigned int filter_invert = 0;
@ -208,7 +192,7 @@ void help()
" you can also use -n to start from earlier then field %d\n"
" -query preserve the query string for per-URL (-u*) statistics\n"
"\n"
"Output format - **only one** may be used at a time\n"
"Output format - only one may be used at a time\n"
" -c only report the number of lines that would have been printed\n"
" -pct output connect and response times percentiles\n"
" -st output number of requests per HTTP status code\n"
@ -914,9 +898,6 @@ int main(int argc, char **argv)
if (!filter && !filter2)
die("No action specified.\n");
if ((filter & FILT_OUTPUT_FMT) & ((filter & FILT_OUTPUT_FMT) - 1))
die("Please, set only one output filter.\n");
if (filter & FILT_ACC_COUNT && !filter_acc_count)
filter_acc_count=1;

View File

@ -1,19 +0,0 @@
# show non-null memprofile entries with method, alloc/free counts/tot and caller
define memprof_dump
set $i = 0
set $meth={ "UNKN", "MALL", "CALL", "REAL", "STRD", "FREE", "P_AL", "P_FR", "STND", "VALL", "ALAL", "PALG", "MALG", "PVAL" }
while $i < sizeof(memprof_stats) / sizeof(memprof_stats[0])
if memprof_stats[$i].alloc_calls || memprof_stats[$i].free_calls
set $m = memprof_stats[$i].method
printf "m:%s ac:%u fc:%u at:%u ft:%u ", $meth[$m], \
memprof_stats[$i].alloc_calls, memprof_stats[$i].free_calls, \
memprof_stats[$i].alloc_tot, memprof_stats[$i].free_tot
output/a memprof_stats[$i].caller
printf "\n"
end
set $i = $i + 1
end
end

View File

@ -1,70 +0,0 @@
BEGININPUT
BEGINCONTEXT
HAProxy's development cycle consists in one development branch, and multiple
maintenance branches.
All the development is made into the development branch exclusively. This
includes mostly new features, doc updates, cleanups and or course, fixes.
The maintenance branches, also called stable branches, never see any
development, and only receive ultra-safe fixes for bugs that affect them,
that are picked from the development branch.
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
release, the development branch enters maintenance and a new development branch
is created with a new, higher version. The current development branch is
3.3-dev, and maintenance branches are 3.2 and below.
Fixes created in the development branch for issues that were introduced in an
earlier branch are applied in descending order to each and every version till
that branch that introduced the issue: 3.2 first, then 3.1, then 3.0, then 2.9
and so on. This operation is called "backporting". A fix for an issue is never
backported beyond the branch that introduced the issue. An important point is
that the project maintainers really aim at zero regression in maintenance
branches, so they're never willing to take any risk backporting patches that
are not deemed strictly necessary.
Fixes consist of patches managed using the Git version control tool and are
identified by a Git commit ID and a commit message. For this reason we
indistinctly talk about backporting fixes, commits, or patches; all mean the
same thing. When mentioning commit IDs, developers always use a short form
made of the first 8 characters only, and expect the AI assistant to do the
same.
It seldom happens that some fixes depend on changes that were brought by other
patches that were not in some branches and that will need to be backported as
well for the fix to work. In this case, such information is explicitly provided
in the commit message by the patch's author in natural language.
Developers are serious and always indicate if a patch needs to be backported.
Sometimes they omit the exact target branch, or they will say that the patch is
"needed" in some older branch, but it means the same. If a commit message
doesn't mention any backport instructions, it means that the commit does not
have to be backported. And patches that are not strictly bug fixes nor doc
improvements are normally not backported. For example, fixes for design
limitations, architectural improvements and performance optimizations are
considered too risky for a backport. Finally, all bug fixes are tagged as
"BUG" at the beginning of their subject line. Patches that are not tagged as
such are not bugs, and must never be backported unless their commit message
explicitly requests so.
ENDCONTEXT
A developer is reviewing the development branch, trying to spot which commits
need to be backported to maintenance branches. This person is already expert
on HAProxy and everything related to Git, patch management, and the risks
associated with backports, so he doesn't want to be told how to proceed nor to
review the contents of the patch.
The goal for this developer is to get some help from the AI assistant to save
some precious time on this tedious review work. In order to do a better job, he
needs an accurate summary of the information and instructions found in each
commit message. Specifically he needs to figure if the patch fixes a problem
affecting an older branch or not, if it needs to be backported, if so to which
branches, and if other patches need to be backported along with it.
The indented text block below after an "id" line and starting with a Subject line
is a commit message from the HAProxy development branch that describes a patch
applied to that branch, starting with its subject line, please read it carefully.

View File

@ -1,29 +0,0 @@
ENDINPUT
BEGININSTRUCTION
You are an AI assistant that follows instruction extremely well. Help as much
as you can, responding to a single question using a single response.
The developer wants to know if he needs to backport the patch above to fix
maintenance branches, for which branches, and what possible dependencies might
be mentioned in the commit message. Carefully study the commit message and its
backporting instructions if any (otherwise it should probably not be backported),
then provide a very concise and short summary that will help the developer decide
to backport it, or simply to skip it.
Start by explaining in one or two sentences what you recommend for this one and why.
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
where X is a single word among:
- "yes", if you recommend to backport the patch right now either because
it explicitly states this or because it's a fix for a bug that affects
a maintenance branch (3.2 or lower);
- "wait", if this patch explicitly mentions that it must be backported, but
only after waiting some time.
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
lack of explicit backport instructions, or it's just an improvement);
- "uncertain" otherwise for cases not covered above
ENDINSTRUCTION
Explanation:

BIN
dev/phash/a.out Executable file

Binary file not shown.

View File

@ -3,9 +3,7 @@ DeviceAtlas Device Detection
In order to add DeviceAtlas Device Detection support, you would need to download
the API source code from https://deviceatlas.com/deviceatlas-haproxy-module.
Once extracted, two modes are supported :
1/ Build HAProxy and DeviceAtlas in one command
Once extracted :
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=<path to the API root folder>
@ -16,6 +14,10 @@ directory. Also, in the case the api cache support is not needed and/or a C++ to
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=<path to the API root folder> DEVICEATLAS_NOCACHE=1
However, if the API had been installed beforehand, DEVICEATLAS_SRC
can be omitted. Note that the DeviceAtlas C API version supported is from the 3.x
releases series (3.2.1 minimum recommended).
For HAProxy developers who need to verify that their changes didn't accidentally
break the DeviceAtlas code, it is possible to build a dummy library provided in
the addons/deviceatlas/dummy directory and to use it as an alternative for the
@ -25,29 +27,6 @@ validate API changes :
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=$PWD/addons/deviceatlas/dummy
2/ Build and install DeviceAtlas according to https://docs.deviceatlas.com/apis/enterprise/c/<release version>/README.html
For example :
In the deviceatlas library folder :
$ cmake .
$ make
$ sudo make install
In the HAProxy folder :
$ make TARGET=<target> USE_DEVICEATLAS=1
Note that if the -DCMAKE_INSTALL_PREFIX cmake option had been used, it is necessary to set as well DEVICEATLAS_LIB and
DEVICEATLAS_INC as follow :
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_INC=<CMAKE_INSTALL_PREFIX value>/include DEVICEATLAS_LIB=<CMAKE_INSTALL_PREFIX value>/lib
For example :
$ cmake -DCMAKE_INSTALL_PREFIX=/opt/local
$ make
$ sudo make install
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_INC=/opt/local/include DEVICEATLAS_LIB=/opt/local/lib
Note that DEVICEATLAS_SRC is omitted in this case.
These are supported DeviceAtlas directives (see doc/configuration.txt) :
- deviceatlas-json-file <path to the DeviceAtlas JSON data file>.
- deviceatlas-log-level <number> (0 to 3, level of information returned by

File diff suppressed because it is too large Load Diff

View File

@ -204,14 +204,6 @@ the cache, when this option is set, objects are picked from the cache from the
oldest one instead of the freshest one. This way even late memory corruptions
have a chance to be detected.
Another non-destructive approach is to use "-dMbackup". A full copy of the
object is made after its end, which eases inspection (e.g. of the parts
scratched by the pool_item elements), and a comparison is made upon allocation
of that object, just like with "-dMintegrity", causing a crash on mismatch. The
initial 4 words corresponding to the list are ignored as well. Note that when
both "-dMbackup" and "-dMintegrity" are used, the copy is performed before
being scratched, and the comparison is done by "-dMintegrity" only.
When build option DEBUG_MEMORY_POOLS is set, or the boot-time option "-dMtag"
is passed on the executable's command line, pool objects are allocated with
one extra pointer compared to the requested size, so that the bytes that follow
@ -350,9 +342,7 @@ struct pool_head *create_pool(char *name, uint size, uint flags)
"-dMno-merge" is passed on the executable's command line, the pools
also need to have the exact same name to be merged. In addition, unless
MEM_F_EXACT is set in <flags>, the object size will usually be rounded
up to the size of pointers (16 or 32 bytes). MEM_F_UAF may be set on a
per-pool basis to enable the UAF detection only for this specific pool,
saving the massive overhead of global usage. The name that will appear
up to the size of pointers (16 or 32 bytes). The name that will appear
in the pool upon merging is the name of the first created pool. The
returned pointer is the new (or reused) pool head, or NULL upon error.
Pools created this way must be destroyed using pool_destroy().

View File

@ -21,7 +21,7 @@ falls back to CLOCK_REALTIME. The former is more accurate as it really counts
the time spent in the process, while the latter might also account for time
stuck on paging in etc.
Then wdt_ping() is called to arm the timer. It's set to trigger every
Then wdt_ping() is called to arm the timer. t's set to trigger every
<wdt_warn_blocked_traffic_ns> interval. It is also called by wdt_handler()
to reprogram a new wakeup after it has ticked.
@ -37,18 +37,15 @@ If the thread was not marked as stuck, it's verified that no progress was made
for at least one second, in which case the TH_FL_STUCK flag is set. The lack of
progress is measured by the distance between the thread's current cpu_time and
its prev_cpu_time. If the lack of progress is at least as large as the warning
threshold, then the signal is bounced to the faulty thread if it's not the
current one. Since this bounce is based on the time spent without update, it
already doesn't happen often.
threshold and no context switch happened since last call, ha_stuck_warning() is
called to emit a warning about that thread. In any case the context switch
counter for that thread is updated.
Once on the faulty thread, two checks are performed:
1) if the thread was already marked as stuck, then the thread is considered
as definitely stuck, and ha_panic() is called. It will not return.
2) a check is made to verify if the scheduler is still ticking, by reading
and setting a variable that only the scheduler can clear when leaving a
task. If the scheduler didn't make any progress, ha_stuck_warning() is
called to emit a warning about that thread.
If the thread was already marked as stuck, then the thread is considered as
definitely stuck. Then ha_panic() is directly called if the thread is the
current one, otherwise ha_kill() is used to resend the signal directly to the
target thread, which will in turn go through this handler and handle the panic
itself.
Most of the time there's no panic of course, and a wdt_ping() is performed
before leaving the handler to reprogram a check for that thread.
@ -64,12 +61,12 @@ set TAINTED_WARN_BLOCKED_TRAFFIC.
ha_panic() uses the current thread's trash buffer to produce the messages, as
we don't care about its contents since that thread will never return. However
ha_stuck_warning() instead uses a local 8kB buffer in the thread's stack.
ha_stuck_warning() instead uses a local 4kB buffer in the thread's stack.
ha_panic() will call ha_thread_dump_fill() for each thread, to complete the
buffer being filled with each thread's dump messages. ha_stuck_warning() only
calls ha_thread_dump_one(), which works on the current thread. In both cases
the message is then directly sent to fd #2 (stderr) and ha_thread_dump_done()
is called to release the dumped thread.
calls the function for the current thread. In both cases the message is then
directly sent to fd #2 (stderr) and ha_thread_dump_one() is called to release
the dumped thread.
Both print a few extra messages, but ha_panic() just ends by looping on abort()
until the process dies.
@ -113,19 +110,13 @@ ha_dump_backtrace() before returning.
ha_dump_backtrace() produces a backtrace into a local buffer (100 entries max),
then dumps the code bytes nearby the crashing instrution, dumps pointers and
tries to resolve function names, and sends all of that into the target buffer.
On some architectures (x86_64, arm64), it will also try to detect and decode
call instructions and resolve them to called functions.
3. Improvements
---------------
The symbols resolution is extremely expensive, particularly for the warnings
which should be fast. But we need it, it's just unfortunate that it strikes at
the wrong moment. At least ha_dump_backtrace() does disable signals while it's
resolving, in order to avoid unwanted re-entrance. In addition, the called
function resolve_sym_name() uses some locking and refrains from calling the
dladdr family of functions in a re-entrant way (in the worst case only well
known symbols will be resolved)..
the wrong moment.
In an ideal case, ha_dump_backtrace() would dump the pointers to a local array,
which would then later be resolved asynchronously in a tasklet. This can work

View File

@ -1,7 +1,7 @@
-----------------------
HAProxy Starter Guide
-----------------------
version 3.3
version 3.2
This document is an introduction to HAProxy for all those who don't know it, as

View File

@ -893,9 +893,7 @@ Core class
**context**: init, task, action
This function returns a new object of a *httpclient* class. An *httpclient*
object must be used to process one and only one request. It must never be
reused to process several requests.
This function returns a new object of a *httpclient* class.
:returns: A :ref:`httpclient_class` object.
@ -935,7 +933,7 @@ Core class
Give back the hand at the HAProxy scheduler. Unlike :js:func:`core.yield`
the task will not be woken up automatically to resume as fast as possible.
Instead, it will wait for an event to wake the task. If milliseconds argument
is provided then the Lua execution will be automatically resumed passed this
is provided then the Lua excecution will be automatically resumed passed this
delay even if no event caused the task to wake itself up.
:param integer milliseconds: automatic wakeup passed this delay. (optional)
@ -945,7 +943,7 @@ Core class
**context**: task, action
Give back the hand at the HAProxy scheduler. It is used when the LUA
processing consumes a lot of processing time. Lua execution will be resumed
processing consumes a lot of processing time. Lua excecution will be resumed
automatically (automatic reschedule).
.. js:function:: core.parse_addr(address)
@ -1089,13 +1087,18 @@ Core class
perform the heavy job in a dedicated task and allow remaining events to be
processed more quickly.
.. js:function:: core.use_native_mailers_config()
.. js:function:: core.disable_legacy_mailers()
**context**: body
**LEGACY**
Inform haproxy that the script will make use of the native "mailers"
config section (although legacy). In other words, inform haproxy that
:js:func:`Proxy.get_mailers()` will be used later in the program.
**context**: body, init
Disable the sending of email alerts through the legacy email sending
function when mailers are used in the configuration.
Use this when sending email alerts directly from lua.
:see: :js:func:`Proxy.get_mailers()`
.. _proxy_class:
@ -1224,14 +1227,8 @@ Proxy class
**LEGACY**
Returns a table containing legacy mailers config (from haproxy configuration
file) for the current proxy or nil if mailers are not available for the proxy.
.. warning::
When relying on :js:func:`Proxy.get_mailers()` to retrieve mailers
configuration, :js:func:`core.use_native_mailers_config()` must be called
first from body or init context to inform haproxy that Lua makes use of the
legacy mailers config.
Returns a table containing mailers config for the current proxy or nil
if mailers are not available for the proxy.
:param class_proxy px: A :ref:`proxy_class` which indicates the manipulated
proxy.
@ -1248,6 +1245,10 @@ ProxyMailers class
This class provides mailers config for a given proxy.
If sending emails directly from lua, please consider
:js:func:`core.disable_legacy_mailers()` to disable the email sending from
haproxy. (Or email alerts will be sent twice...)
.. js:attribute:: ProxyMailers.track_server_health
Boolean set to true if the option "log-health-checks" is configured on
@ -2580,9 +2581,7 @@ HTTPClient class
.. js:class:: HTTPClient
The httpclient class allows issue of outbound HTTP requests through a simple
API without the knowledge of HAProxy internals. Any instance must be used to
process one and only one request. It must never be reused to process several
requests.
API without the knowledge of HAProxy internals.
.. js:function:: HTTPClient.get(httpclient, request)
.. js:function:: HTTPClient.head(httpclient, request)
@ -3917,25 +3916,21 @@ AppletTCP class
*size* is missing, the function tries to read all the content of the stream
until the end. An optional timeout may be specified in milliseconds. In this
case the function will return no longer than this delay, with the amount of
available data, or nil if there is no data. An empty string is returned if the
connection is closed.
available data (possibly none).
:param class_AppletTCP applet: An :ref:`applettcp_class`
:param integer size: the required read size.
:returns: return nil if the timeout has expired and no data was available but
can still be received. Otherwise, a string is returned, possibly an empty
string if the connection is closed.
:returns: always return a string, the string can be empty if the connection is
closed.
.. js:function:: AppletTCP.try_receive(applet)
Reads available data from the TCP stream and returns immediately. Returns a
string containing read bytes or nil if no bytes are available at that time. An
empty string is returned if the connection is closed.
string containing read bytes that may possibly be empty if no bytes are
available at that time.
:param class_AppletTCP applet: An :ref:`applettcp_class`
:returns: return nil if no data was available but can still be
received. Otherwise, a string is returned, possibly an empty string if the
connection is closed.
:returns: always return a string, the string can be empty.
.. js:function:: AppletTCP.send(appletmsg)
@ -4612,27 +4607,6 @@ HTTPMessage class
data by default.
:returns: an integer containing the amount of bytes copied or -1.
.. js:function:: HTTPMessage.set_body_len(http_msg, length)
This function changes the expected payload length of the HTTP message
**http_msg**. **length** can be an integer value. In that case, a
"Content-Length" header is added with the given value. It is also possible to
pass the **"chunked"** string instead of an integer value to force the HTTP
message to be chunk-encoded. In that case, a "Transfer-Encoding" header is
added with the "chunked" value. In both cases, all existing "Content-Length"
and "Transfer-Encoding" headers are removed.
This function should be used in the filter context to be able to alter the
payload of the HTTP message. The internal state of the HTTP message is updated
accordingly. :js:func:`HTTPMessage.add_header()` or
:js:func:`HTTPMessage.set_header()` functions must be used in that case.
:param class_httpmessage http_msg: The manipulated HTTP message.
:param type length: The new payload length to set. It can be an integer or
the string "chunked".
:returns: true if the payload length was successfully updated, false
otherwise.
.. js:function:: HTTPMessage.set_eom(http_msg)
This function set the end of message for the HTTP message **http_msg**.

View File

@ -1,7 +1,7 @@
------------------------
HAProxy Management Guide
------------------------
version 3.3
version 3.2
This document describes how to start, stop, manage, and troubleshoot HAProxy,
@ -325,16 +325,6 @@ list of options is :
last released. This works best with "no-merge", "cold-first" and "tag".
Enabling this option will slightly increase the CPU usage.
- backup / no-backup:
This option performs a copy of each released object at release time,
allowing developers to inspect them. It also performs a comparison at
allocation time to detect if anything changed in between, indicating a
use-after-free condition. This doubles the memory usage and slightly
increases the CPU usage (similar to "integrity"). If combined with
"integrity", it still duplicates the contents but doesn't perform the
comparison (which is performed by "integrity"). Just like "integrity",
it works best with "no-merge", "cold-first" and "tag".
- no-global / global:
Depending on the operating system, a process-wide global memory cache
may be enabled if it is estimated that the standard allocator is too
@ -1346,10 +1336,9 @@ The first column designates the object or metric being dumped. Its format is
specific to the command producing this output and will not be described in this
section. Usually it will consist in a series of identifiers and field names.
The second column contains 4 characters respectively indicating the origin, the
nature, the scope and the persistence state of the value being reported. The
first character (the origin) indicates where the value was extracted from.
Possible characters are :
The second column contains 3 characters respectively indicating the origin, the
nature and the scope of the value being reported. The first character (the
origin) indicates where the value was extracted from. Possible characters are :
M The value is a metric. It is valid at one instant any may change depending
on its nature .
@ -1465,16 +1454,7 @@ characters are currently supported :
current date or resource usage. At the moment this scope is not used by
any metric.
The fourth character (persistence state) indicates that the value (the metric)
is volatile or persistent across reloads. The following characters are expected :
V The metric is volatile because it is local to the current process so
the value will be lost when reloading.
P The metric is persistent because it may be shared with other co-processes
so that the value is preserved across reloads.
Consumers of these information will generally have enough of these 4 characters
Consumers of these information will generally have enough of these 3 characters
to determine how to accurately report aggregated information across multiple
processes.
@ -1663,8 +1643,8 @@ abort ssl crl-file <crlfile>
acme renew <certificate>
Starts an ACME certificate generation task with the given certificate name.
The certificate must be linked to an acme section, see section 12.8 "ACME"
of the configuration manual. See also "acme status".
The certificate must be linked to an acme section, see section 3.13. of the
configuration manual. See also "acme status".
acme status
Show the status of every certificates that were configured with ACME.
@ -1672,10 +1652,10 @@ acme status
This command outputs, separated by a tab:
- The name of the certificate configured in haproxy
- The acme section used in the configuration
- The state of the acme task, either "Running", "Scheduled" or "Stopped"
- The state of the acme task, either "Running" or "Scheduled"
- The UTC expiration date of the certificate in ISO8601 format
- The relative expiration time (0d if expired)
- The UTC scheduled date of the certificate in ISO8601 format
- The UTC expiration date of the certificate in ISO8601 format
- The relative schedule time (0d if Running)
Example:
@ -1734,9 +1714,8 @@ add server <backend>/<server> [args]*
The <server> name must not be already used in the backend. A special
restriction is put on the backend which must used a dynamic load-balancing
algorithm. A subset of keywords from the server config file statement can be
used to configure the server behavior (see "add server help" to list them).
Also note that no settings will be reused from an hypothetical
'default-server' statement in the same backend.
used to configure the server behavior. Also note that no settings will be
reused from an hypothetical 'default-server' statement in the same backend.
Currently a dynamic server is statically initialized with the "none"
init-addr method. This means that no resolution will be undertaken if a FQDN
@ -1766,10 +1745,78 @@ add server <backend>/<server> [args]*
servers. Please refer to the "u-limit" global keyword documentation in this
case.
add server help
List the keywords supported for dynamic servers by the current haproxy
version. Keyword syntax is similar to the server line from the configuration
file, please refer to their individual documentation for details.
Here is the list of the currently supported keywords :
- agent-addr
- agent-check
- agent-inter
- agent-port
- agent-send
- allow-0rtt
- alpn
- addr
- backup
- ca-file
- check
- check-alpn
- check-proto
- check-send-proxy
- check-sni
- check-ssl
- check-via-socks4
- ciphers
- ciphersuites
- cookie
- crl-file
- crt
- disabled
- downinter
- error-limit
- fall
- fastinter
- force-sslv3/tlsv10/tlsv11/tlsv12/tlsv13
- id
- init-state
- inter
- maxconn
- maxqueue
- minconn
- no-ssl-reuse
- no-sslv3/tlsv10/tlsv11/tlsv12/tlsv13
- no-tls-tickets
- npn
- observe
- on-error
- on-marked-down
- on-marked-up
- pool-low-conn
- pool-max-conn
- pool-purge-delay
- port
- proto
- proxy-v2-options
- rise
- send-proxy
- send-proxy-v2
- send-proxy-v2-ssl
- send-proxy-v2-ssl-cn
- slowstart
- sni
- source
- ssl
- ssl-max-ver
- ssl-min-ver
- tfo
- tls-tickets
- track
- usesrc
- verify
- verifyhost
- weight
- ws
Their syntax is similar to the server line from the configuration file,
please refer to their individual documentation for details.
add ssl ca-file <cafile> <payload>
Add a new certificate to a ca-file. This command is useful when you reached
@ -2309,7 +2356,7 @@ help [<command>]
the requested one. The same help screen is also displayed for unknown
commands.
httpclient [--htx] <method> <URI>
httpclient <method> <URI>
Launch an HTTP client request and print the response on the CLI. Only
supported on a CLI connection running in expert mode (see "expert-mode on").
It's only meant for debugging. The httpclient is able to resolve a server
@ -2318,9 +2365,6 @@ httpclient [--htx] <method> <URI>
able to resolve an host from /etc/hosts if you don't use a local dns daemon
which can resolve those.
The --htx option allow to use the haproxy internal htx representation using
the htx_dump() function, mainly used for debugging.
new ssl ca-file <cafile>
Create a new empty CA file tree entry to be filled with a set of CA
certificates and added to a crt-list. This command should be used in
@ -2371,7 +2415,7 @@ prompt [help | n | i | p | timed]*
Without any option, this will cycle through prompt mode then non-interactive
mode. In non-interactive mode, the connection is closed after the last
command of the current line completes. In interactive mode, the connection is
command of the current line compltes. In interactive mode, the connection is
not closed after a command completes, so that a new one can be entered. In
prompt mode, the interactive mode is still in use, and a prompt will appear
at the beginning of the line, indicating to the user that the interpreter is
@ -3002,19 +3046,18 @@ show info [typed|json] [desc] [float]
(...)
> show info typed
0.Name.1:POSV:str:HAProxy
1.Version.1:POSV:str:3.1-dev0-7c653d-2466
2.Release_date.1:POSV:str:2025/07/01
3.Nbthread.1:CGSV:u32:1
4.Nbproc.1:CGSV:u32:1
5.Process_num.1:KGPV:u32:1
6.Pid.1:SGPV:u32:638069
7.Uptime.1:MDPV:str:0d 0h00m07s
8.Uptime_sec.1:MDPV:u32:7
9.Memmax_MB.1:CLPV:u32:0
10.PoolAlloc_MB.1:MGPV:u32:0
11.PoolUsed_MB.1:MGPV:u32:0
12.PoolFailed.1:MCPV:u32:0
0.Name.1:POS:str:HAProxy
1.Version.1:POS:str:1.7-dev1-de52ea-146
2.Release_date.1:POS:str:2016/03/11
3.Nbproc.1:CGS:u32:1
4.Process_num.1:KGP:u32:1
5.Pid.1:SGP:u32:28105
6.Uptime.1:MDP:str:0d 0h00m08s
7.Uptime_sec.1:MDP:u32:8
8.Memmax_MB.1:CLP:u32:0
9.PoolAlloc_MB.1:MGP:u32:0
10.PoolUsed_MB.1:MGP:u32:0
11.PoolFailed.1:MCP:u32:0
(...)
In the typed format, the presence of the process ID at the end of the
@ -3221,11 +3264,11 @@ show quic [<format>] [<filter>]
An optional argument can be specified to control the verbosity. Its value can
be interpreted in different way. The first possibility is to used predefined
values, "oneline" for the default format, "stream" to list every active
streams and "full" to display all information. Alternatively, a list of
comma-delimited fields can be specified to restrict output. Currently
supported values are "tp", "sock", "pktns", "cc" and "mux". Finally, "help"
in the format will instead show a more detailed help message.
values, "oneline" for the default format and "full" to display all
information. Alternatively, a list of comma-delimited fields can be specified
to restrict output. Currently supported values are "tp", "sock", "pktns",
"cc" and "mux". Finally, "help" in the format will instead show a more
detailed help message.
The final argument is used to restrict or extend the connection list. By
default, connections on closing or draining state are not displayed. Use the
@ -3240,29 +3283,7 @@ show servers conn [<backend>]
The output consists in a header line showing the fields titles, then one
server per line with for each, the backend name and ID, server name and ID,
the address, port and a series or values. The number of fields varies
depending on thread count. The exact format of the output may vary slightly
across versions and depending on the number of threads. One needs to pay
attention to the header line to match columns when extracting output values,
and to the number of threads as the last columns are per-thread:
bkname/svname Backend name '/' server name
bkid/svid Backend ID '/' server ID
addr Server's IP address
port Server's port (or zero if none)
- Unused field, serves as a visual delimiter
purge_delay Interval between connection purges, in milliseconds
used_cur Number of connections currently in use
used_max Highest value of used_cur since the process started
need_est Floating estimate of total needed connections
unsafe_nb Number of idle connections considered as "unsafe"
safe_nb Number of idle connections considered as "safe"
idle_lim Configured maximum number of idle connections
idle_cur Total of the per-thread currently idle connections
idle_per_thr[NB] Idle conns per thread for each one of the NB threads
HAProxy will kill a portion of <idle_cur> every <purge_delay> when the total
of <idle_cur> + <used_cur> exceeds the estimate <need_est>. This estimate
varies based on connection activity.
depending on thread count.
Given the threaded nature of idle connections, it's important to understand
that some values may change once read, and that as such, consistency within a
@ -3495,11 +3516,10 @@ show stat [domain <resolvers|proxy>] [{<iid>|<proxy>} <type> <sid>] \
The rest of the line starting after the first colon follows the "typed output
format" described in the section above. In short, the second column (after the
first ':') indicates the origin, nature, scope and persistence state of the
variable. The third column indicates the field type, among "s32", "s64",
"u32", "u64", "flt' and "str". Then the fourth column is the value itself,
which the consumer knows how to parse thanks to column 3 and how to process
thanks to column 2.
first ':') indicates the origin, nature and scope of the variable. The third
column indicates the field type, among "s32", "s64", "u32", "u64", "flt' and
"str". Then the fourth column is the value itself, which the consumer knows
how to parse thanks to column 3 and how to process thanks to column 2.
When "desc" is appended to the command, one extra colon followed by a quoted
string is appended with a description for the metric. At the time of writing,
@ -3512,32 +3532,37 @@ show stat [domain <resolvers|proxy>] [{<iid>|<proxy>} <type> <sid>] \
Here's an example of typed output format :
$ echo "show stat typed" | socat stdio unix-connect:/tmp/sock1
F.2.0.0.pxname.1:KNSV:str:dummy
F.2.0.1.svname.1:KNSV:str:FRONTEND
F.2.0.4.scur.1:MGPV:u32:0
F.2.0.5.smax.1:MMPV:u32:0
F.2.0.6.slim.1:CLPV:u32:524269
F.2.0.7.stot.1:MCPP:u64:0
F.2.0.8.bin.1:MCPP:u64:0
F.2.0.9.bout.1:MCPP:u64:0
F.2.0.10.dreq.1:MCPP:u64:0
F.2.0.11.dresp.1:MCPP:u64:0
F.2.0.12.ereq.1:MCPP:u64:0
F.2.0.17.status.1:SGPV:str:OPEN
F.2.0.26.pid.1:KGPV:u32:1
F.2.0.27.iid.1:KGSV:u32:2
F.2.0.28.sid.1:KGSV:u32:0
F.2.0.32.type.1:CGSV:u32:0
F.2.0.33.rate.1:MRPP:u32:0
F.2.0.34.rate_lim.1:CLPV:u32:0
F.2.0.35.rate_max.1:MMPV:u32:0
F.2.0.46.req_rate.1:MRPP:u32:0
F.2.0.47.req_rate_max.1:MMPV:u32:0
F.2.0.48.req_tot.1:MCPP:u64:0
F.2.0.51.comp_in.1:MCPP:u64:0
F.2.0.52.comp_out.1:MCPP:u64:0
F.2.0.53.comp_byp.1:MCPP:u64:0
F.2.0.54.comp_rsp.1:MCPP:u64:0
F.2.0.0.pxname.1:MGP:str:private-frontend
F.2.0.1.svname.1:MGP:str:FRONTEND
F.2.0.8.bin.1:MGP:u64:0
F.2.0.9.bout.1:MGP:u64:0
F.2.0.40.hrsp_2xx.1:MGP:u64:0
L.2.1.0.pxname.1:MGP:str:private-frontend
L.2.1.1.svname.1:MGP:str:sock-1
L.2.1.17.status.1:MGP:str:OPEN
L.2.1.73.addr.1:MGP:str:0.0.0.0:8001
S.3.13.60.rtime.1:MCP:u32:0
S.3.13.61.ttime.1:MCP:u32:0
S.3.13.62.agent_status.1:MGP:str:L4TOUT
S.3.13.64.agent_duration.1:MGP:u64:2001
S.3.13.65.check_desc.1:MCP:str:Layer4 timeout
S.3.13.66.agent_desc.1:MCP:str:Layer4 timeout
S.3.13.67.check_rise.1:MCP:u32:2
S.3.13.68.check_fall.1:MCP:u32:3
S.3.13.69.check_health.1:SGP:u32:0
S.3.13.70.agent_rise.1:MaP:u32:1
S.3.13.71.agent_fall.1:SGP:u32:1
S.3.13.72.agent_health.1:SGP:u32:1
S.3.13.73.addr.1:MCP:str:1.255.255.255:8888
S.3.13.75.mode.1:MAP:str:http
B.3.0.0.pxname.1:MGP:str:private-backend
B.3.0.1.svname.1:MGP:str:BACKEND
B.3.0.2.qcur.1:MGP:u32:0
B.3.0.3.qmax.1:MGP:u32:0
B.3.0.4.scur.1:MGP:u32:0
B.3.0.5.smax.1:MGP:u32:0
B.3.0.6.slim.1:MGP:u32:1000
B.3.0.55.lastsess.1:MMP:s32:-1
(...)
In the typed format, the presence of the process ID at the end of the
@ -3548,20 +3573,20 @@ show stat [domain <resolvers|proxy>] [{<iid>|<proxy>} <type> <sid>] \
$ ( echo show stat typed | socat /var/run/haproxy.sock1 - ; \
echo show stat typed | socat /var/run/haproxy.sock2 - ) | \
sort -t . -k 1,1 -k 2,2n -k 3,3n -k 4,4n -k 5,5 -k 6,6n
B.3.0.0.pxname.1:KNSV:str:private-backend
B.3.0.0.pxname.2:KNSV:str:private-backend
B.3.0.1.svname.1:KNSV:str:BACKEND
B.3.0.1.svname.2:KNSV:str:BACKEND
B.3.0.2.qcur.1:MGPV:u32:0
B.3.0.2.qcur.2:MGPV:u32:0
B.3.0.3.qmax.1:MMPV:u32:0
B.3.0.3.qmax.2:MMPV:u32:0
B.3.0.4.scur.1:MGPV:u32:0
B.3.0.4.scur.2:MGPV:u32:0
B.3.0.5.smax.1:MMPV:u32:0
B.3.0.5.smax.2:MMPV:u32:0
B.3.0.6.slim.1:CLPV:u32:1000
B.3.0.6.slim.2:CLPV:u32:1000
B.3.0.0.pxname.1:MGP:str:private-backend
B.3.0.0.pxname.2:MGP:str:private-backend
B.3.0.1.svname.1:MGP:str:BACKEND
B.3.0.1.svname.2:MGP:str:BACKEND
B.3.0.2.qcur.1:MGP:u32:0
B.3.0.2.qcur.2:MGP:u32:0
B.3.0.3.qmax.1:MGP:u32:0
B.3.0.3.qmax.2:MGP:u32:0
B.3.0.4.scur.1:MGP:u32:0
B.3.0.4.scur.2:MGP:u32:0
B.3.0.5.smax.1:MGP:u32:0
B.3.0.5.smax.2:MGP:u32:0
B.3.0.6.slim.1:MGP:u32:1000
B.3.0.6.slim.2:MGP:u32:1000
(...)
The format of JSON output is described in a schema which may be output
@ -4546,6 +4571,9 @@ show proc [debug]
1271 worker 1 0d00h00m00s 2.5-dev13
# old workers
1233 worker 3 0d00h00m43s 2.0-dev3-6019f6-289
# programs
1244 foo 0 0d00h00m00s -
1255 bar 0 0d00h00m00s -
In this example, the master has been reloaded 5 times but one of the old
worker is still running and survived 3 reloads. You could access the CLI of

View File

@ -3,7 +3,7 @@
-- Provides a pure lua alternative to tcpcheck mailers.
--
-- To be loaded using "lua-load" from haproxy configuration to handle
-- email-alerts directly from lua
-- email-alerts directly from lua and disable legacy tcpcheck implementation.
local SYSLOG_LEVEL = {
["EMERG"] = 0,
@ -364,9 +364,9 @@ local function srv_event_add(event, data)
mailers_track_server_events(data.reference)
end
-- tell haproxy that we do use the legacy native "mailers" config section
-- which allows us to retrieve mailers configuration using Proxy:get_mailers()
core.use_native_mailers_config()
-- disable legacy email-alerts since email-alerts will be sent from lua directly
core.disable_legacy_mailers()
-- event subscriptions are purposely performed in an init function to prevent
-- email alerts from being generated too early (when process is starting up)

View File

@ -112,7 +112,7 @@ local function rotate_piece(piece, piece_id, px, py, board)
end
function render(applet, board, piece, piece_id, px, py, score)
local output = cursor_home
local output = clear_screen .. cursor_home
output = output .. game_name .. " - Lines: " .. score .. "\r\n"
output = output .. "+" .. string.rep("-", board_width * 2) .. "+\r\n"
for y = 1, board_height do
@ -160,7 +160,6 @@ function handler(applet)
end
applet:send(cursor_hide)
applet:send(clear_screen)
-- fall the piece by one line every delay
local function fall_piece()
@ -215,7 +214,7 @@ function handler(applet)
local input = applet:receive(1, delay)
if input then
if input == "" or input == "q" then
if input == "q" then
game_over = true
elseif input == "\27" then
local a = applet:receive(1, delay)

View File

@ -31,7 +31,7 @@ struct acme_cfg {
};
enum acme_st {
ACME_RESOURCES = 0,
ACME_RESSOURCES = 0,
ACME_NEWNONCE,
ACME_CHKACCOUNT,
ACME_NEWACCOUNT,
@ -51,11 +51,9 @@ enum http_st {
};
struct acme_auth {
struct ist dns; /* dns entry */
struct ist auth; /* auth URI */
struct ist chall; /* challenge URI */
struct ist token; /* token */
int ready; /* is the challenge ready ? */
void *next;
};
@ -72,7 +70,7 @@ struct acme_ctx {
struct ist newNonce;
struct ist newAccount;
struct ist newOrder;
} resources;
} ressources;
struct ist nonce;
struct ist kid;
struct ist order;
@ -81,20 +79,6 @@ struct acme_ctx {
X509_REQ *req;
struct ist finalize;
struct ist certificate;
struct task *task;
struct mt_list el;
};
#define ACME_EV_SCHED (1ULL << 0) /* scheduling wakeup */
#define ACME_EV_NEW (1ULL << 1) /* new task */
#define ACME_EV_TASK (1ULL << 2) /* Task handler */
#define ACME_EV_REQ (1ULL << 3) /* HTTP Request */
#define ACME_EV_RES (1ULL << 4) /* HTTP Response */
#define ACME_VERB_CLEAN 1
#define ACME_VERB_MINIMAL 2
#define ACME_VERB_SIMPLE 3
#define ACME_VERB_ADVANCED 4
#define ACME_VERB_COMPLETE 5
#endif

View File

@ -66,8 +66,7 @@ enum act_parse_ret {
enum act_opt {
ACT_OPT_NONE = 0x00000000, /* no flag */
ACT_OPT_FINAL = 0x00000001, /* last call, cannot yield */
ACT_OPT_FINAL_EARLY = 0x00000002, /* set in addition to ACT_OPT_FINAL if last call occurs earlier than normal due to unexpected IO/error */
ACT_OPT_FIRST = 0x00000004, /* first call for this action */
ACT_OPT_FIRST = 0x00000002, /* first call for this action */
};
/* Flags used to describe the action. */

View File

@ -81,13 +81,9 @@ static forceinline char *appctx_show_flags(char *buf, size_t len, const char *de
#undef _
}
#define APPLET_FL_NEW_API 0x00000001 /* Set if the applet is based on the new API (using applet's buffers) */
#define APPLET_FL_WARNED 0x00000002 /* Set when warning was already emitted about a legacy applet */
/* Applet descriptor */
struct applet {
enum obj_type obj_type; /* object type = OBJ_TYPE_APPLET */
unsigned int flags; /* APPLET_FL_* flags */
/* 3 unused bytes here */
char *name; /* applet's name to report in logs */
int (*init)(struct appctx *); /* callback to init resources, may be NULL.

View File

@ -116,7 +116,7 @@ static inline int appctx_init(struct appctx *appctx)
* the appctx will be fully initialized. The session and the stream will
* eventually be created. The affinity must be set now !
*/
BUG_ON(appctx->t->tid != -1 && appctx->t->tid != tid);
BUG_ON(appctx->t->tid != tid);
task_set_thread(appctx->t, tid);
if (appctx->applet->init)
@ -282,120 +282,6 @@ static inline void applet_expect_data(struct appctx *appctx)
se_fl_clr(appctx->sedesc, SE_FL_EXP_NO_DATA);
}
/* Returns the buffer containing data pushed to the applet by the stream. For
* applets using its own buffers it is the appctx input buffer. For legacy
* applet, it is the output channel buffer.
*/
static inline struct buffer *applet_get_inbuf(struct appctx *appctx)
{
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
if (applet_fl_test(appctx, APPCTX_FL_INBLK_ALLOC) || !appctx_get_buf(appctx, &appctx->inbuf))
return NULL;
return &appctx->inbuf;
}
else
return sc_ob(appctx_sc(appctx));
}
/* Returns the buffer containing data pushed by the applets to the stream. For
* applets using its own buffer it is the appctx output buffer. For legacy
* applet, it is the input channel buffer.
*/
static inline struct buffer *applet_get_outbuf(struct appctx *appctx)
{
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
if (applet_fl_test(appctx, APPCTX_FL_OUTBLK_ALLOC|APPCTX_FL_OUTBLK_FULL) ||
!appctx_get_buf(appctx, &appctx->outbuf))
return NULL;
return &appctx->outbuf;
}
else
return sc_ib(appctx_sc(appctx));
}
/* Returns the amount of data in the input buffer (see applet_get_inbuf) */
static inline size_t applet_input_data(const struct appctx *appctx)
{
if (appctx->flags & APPCTX_FL_INOUT_BUFS)
return b_data(&appctx->inbuf);
else
return co_data(sc_oc(appctx_sc(appctx)));
}
/* Returns the amount of HTX data in the input buffer (see applet_get_inbuf) */
static inline size_t applet_htx_input_data(const struct appctx *appctx)
{
if (appctx->flags & APPCTX_FL_INOUT_BUFS)
return htx_used_space(htxbuf(&appctx->inbuf));
else
return co_data(sc_oc(appctx_sc(appctx)));
}
/* Skips <len> bytes from the input buffer (see applet_get_inbuf).
*
* This is useful when data have been read directly from the buffer. It is
* illegal to call this function with <len> causing a wrapping at the end of the
* buffer. It's the caller's responsibility to ensure that <len> is never larger
* than available ouput data.
*/
static inline void applet_skip_input(struct appctx *appctx, size_t len)
{
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
b_del(&appctx->inbuf, len);
applet_fl_clr(appctx, APPCTX_FL_INBLK_FULL);
}
else
co_skip(sc_oc(appctx_sc(appctx)), len);
}
/* Removes all bytes from the input buffer (see applet_get_inbuf).
*/
static inline void applet_reset_input(struct appctx *appctx)
{
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
b_reset(&appctx->inbuf);
applet_fl_clr(appctx, APPCTX_FL_INBLK_FULL);
}
else
co_skip(sc_oc(appctx_sc(appctx)), co_data(sc_oc(appctx_sc(appctx))));
}
/* Returns the amout of space available at the output buffer (see applet_get_outbuf).
*/
static inline size_t applet_output_room(const struct appctx *appctx)
{
if (appctx->flags & APPCTX_FL_INOUT_BUFS)
return b_room(&appctx->outbuf);
else
return channel_recv_max(sc_ic(appctx_sc(appctx)));
}
/* Returns the amout of space available at the HTX output buffer (see applet_get_outbuf).
*/
static inline size_t applet_htx_output_room(const struct appctx *appctx)
{
if (appctx->flags & APPCTX_FL_INOUT_BUFS)
return htx_free_data_space(htxbuf(&appctx->outbuf));
else
return channel_recv_max(sc_ic(appctx_sc(appctx)));
}
/*Indicates that the applet have more data to deliver and it needs more room in
* the output buffer to do so (see applet_get_outbuf).
*
* For applets using its own buffers, <room_needed> is not used and only
* <appctx> flags are updated. For legacy applets, the amount of free space
* required must be specified. In this last case, it is the caller
* responsibility to be sure <room_needed> is valid.
*/
static inline void applet_need_room(struct appctx *appctx, size_t room_needed)
{
if (appctx->flags & APPCTX_FL_INOUT_BUFS)
applet_have_more_data(appctx);
else
sc_need_room(appctx_sc(appctx), room_needed);
}
/* Should only be used via wrappers applet_putchk() / applet_putchk_stress(). */
static inline int _applet_putchk(struct appctx *appctx, struct buffer *chunk,
int stress)
@ -432,10 +318,9 @@ static inline int _applet_putchk(struct appctx *appctx, struct buffer *chunk,
return ret;
}
/* writes chunk <chunk> into the applet output buffer (see applet_get_outbuf).
*
* Returns the number of written bytes on success or -1 on error (lake of space,
* shutdown, invalid call...)
/* writes chunk <chunk> into the input channel of the stream attached to this
* appctx's endpoint, and marks the SC_FL_NEED_ROOM on a channel full error.
* See ci_putchk() for the list of return codes.
*/
static inline int applet_putchk(struct appctx *appctx, struct buffer *chunk)
{
@ -448,10 +333,9 @@ static inline int applet_putchk_stress(struct appctx *appctx, struct buffer *chu
return _applet_putchk(appctx, chunk, 1);
}
/* writes <len> chars from <blk> into the applet output buffer (see applet_get_outbuf).
*
* Returns the number of written bytes on success or -1 on error (lake of space,
* shutdown, invalid call...)
/* writes <len> chars from <blk> into the input channel of the stream attached
* to this appctx's endpoint, and marks the SC_FL_NEED_ROOM on a channel full
* error. See ci_putblk() for the list of return codes.
*/
static inline int applet_putblk(struct appctx *appctx, const char *blk, int len)
{
@ -483,11 +367,10 @@ static inline int applet_putblk(struct appctx *appctx, const char *blk, int len)
return ret;
}
/* writes chars from <str> up to the trailing zero (excluded) into the applet
* output buffer (see applet_get_outbuf).
*
* Returns the number of written bytes on success or -1 on error (lake of space,
* shutdown, invalid call...)
/* writes chars from <str> up to the trailing zero (excluded) into the input
* channel of the stream attached to this appctx's endpoint, and marks the
* SC_FL_NEED_ROOM on a channel full error. See ci_putstr() for the list of
* return codes.
*/
static inline int applet_putstr(struct appctx *appctx, const char *str)
{
@ -520,10 +403,9 @@ static inline int applet_putstr(struct appctx *appctx, const char *str)
return ret;
}
/* writes character <chr> into the applet's output buffer (see applet_get_outbuf).
*
* Returns the number of written bytes on success or -1 on error (lake of space,
* shutdown, invalid call...)
/* writes character <chr> into the input channel of the stream attached to this
* appctx's endpoint, and marks the SC_FL_NEED_ROOM on a channel full error.
* See ci_putchr() for the list of return codes.
*/
static inline int applet_putchr(struct appctx *appctx, char chr)
{
@ -556,283 +438,6 @@ static inline int applet_putchr(struct appctx *appctx, char chr)
return ret;
}
static inline int applet_may_get(const struct appctx *appctx, size_t len)
{
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
if (len > b_data(&appctx->inbuf)) {
if (se_fl_test(appctx->sedesc, SE_FL_SHW))
return -1;
return 0;
}
}
else {
const struct stconn *sc = appctx_sc(appctx);
if ((sc->flags & SC_FL_SHUT_DONE) || len > co_data(sc_oc(sc))) {
if (sc->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED))
return -1;
return 0;
}
}
return 1;
}
/* Gets one char from the applet input buffer (see appet_get_inbuf),
*
* Return values :
* 1 : number of bytes read, equal to requested size.
* =0 : not enough data available. <c> is left undefined.
* <0 : no more bytes readable because output is shut.
*
* The status of the corresponding buffer is not changed. The caller must call
* applet_skip_input() to update it.
*/
static inline int applet_getchar(const struct appctx *appctx, char *c)
{
int ret;
ret = applet_may_get(appctx, 1);
if (ret <= 0)
return ret;
*c = ((appctx->flags & APPCTX_FL_INOUT_BUFS)
? *(b_head(&appctx->inbuf))
: *(co_head(sc_oc(appctx_sc(appctx)))));
return 1;
}
/* Copies one full block of data from the applet input buffer (see
* appet_get_inbuf).
*
* <len> bytes are capied, starting at the offset <offset>.
*
* Return values :
* >0 : number of bytes read, equal to requested size.
* =0 : not enough data available. <blk> is left undefined.
* <0 : no more bytes readable because output is shut.
*
* The status of the corresponding buffer is not changed. The caller must call
* applet_skip_input() to update it.
*/
static inline int applet_getblk(const struct appctx *appctx, char *blk, int len, int offset)
{
const struct buffer *buf;
int ret;
ret = applet_may_get(appctx, len+offset);
if (ret <= 0)
return ret;
buf = ((appctx->flags & APPCTX_FL_INOUT_BUFS)
? &appctx->inbuf
: sc_ob(appctx_sc(appctx)));
return b_getblk(buf, blk, len, offset);
}
/* Gets one text block representing a word from the applet input buffer (see
* appet_get_inbuf).
*
* The separator is waited for as long as some data can still be received and the
* destination is not full. Otherwise, the string may be returned as is, without
* the separator.
*
* Return values :
* >0 : number of bytes read. Includes the separator if present before len or end.
* =0 : no separator before end found. <str> is left undefined.
* <0 : no more bytes readable because output is shut.
*
* The status of the corresponding buffer is not changed. The caller must call
* applet_skip_input() to update it.
*/
static inline int applet_getword(const struct appctx *appctx, char *str, int len, char sep)
{
const struct buffer *buf;
char *p;
size_t input, max = len;
int ret = 0;
ret = applet_may_get(appctx, 1);
if (ret <= 0)
goto out;
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
buf = &appctx->inbuf;
input = b_data(buf);
}
else {
struct stconn *sc = appctx_sc(appctx);
buf = sc_ob(sc);
input = co_data(sc_oc(sc));
}
if (max > input) {
max = input;
str[max-1] = 0;
}
p = b_head(buf);
ret = 0;
while (max) {
*str++ = *p;
ret++;
max--;
if (*p == sep)
goto out;
p = b_next(buf, p);
}
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
if (ret < len && (ret < input || b_room(buf)) &&
!se_fl_test(appctx->sedesc, SE_FL_SHW))
ret = 0;
}
else {
struct stconn *sc = appctx_sc(appctx);
if (ret < len && (ret < input || channel_may_recv(sc_oc(sc))) &&
!(sc->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)))
ret = 0;
}
out:
if (max)
*str = 0;
return ret;
}
/* Gets one text block representing a line from the applet input buffer (see
* appet_get_inbuf).
*
* The '\n' is waited for as long as some data can still be received and the
* destination is not full. Otherwise, the string may be returned as is, without
* the '\n'.
*
* Return values :
* >0 : number of bytes read. Includes the \n if present before len or end.
* =0 : no '\n' before end found. <str> is left undefined.
* <0 : no more bytes readable because output is shut.
*
* The status of the corresponding buffer is not changed. The caller must call
* applet_skip_input() to update it.
*/
static inline int applet_getline(const struct appctx *appctx, char *str, int len)
{
return applet_getword(appctx, str, len, '\n');
}
/* Gets one or two blocks of data at once from the applet input buffer (see appet_get_inbuf),
*
* Data are not copied.
*
* Return values :
* >0 : number of blocks filled (1 or 2). blk1 is always filled before blk2.
* =0 : not enough data available. <blk*> are left undefined.
* <0 : no more bytes readable because output is shut.
*
* The status of the corresponding buffer is not changed. The caller must call
* applet_skip_input() to update it.
*/
static inline int applet_getblk_nc(const struct appctx *appctx, const char **blk1, size_t *len1, const char **blk2, size_t *len2)
{
const struct buffer *buf;
size_t max;
int ret;
ret = applet_may_get(appctx, 1);
if (ret <= 0)
return ret;
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
buf = &appctx->inbuf;
max = b_data(buf);
}
else {
struct stconn *sc = appctx_sc(appctx);
buf = sc_ob(sc);
max = co_data(sc_oc(sc));
}
return b_getblk_nc(buf, blk1, len1, blk2, len2, 0, max);
}
/* Gets one or two blocks of text representing a word from the applet input
* buffer (see appet_get_inbuf).
*
* Data are not copied. The separator is waited for as long as some data can
* still be received and the destination is not full. Otherwise, the string may
* be returned as is, without the separator.
*
* Return values :
* >0 : number of bytes read. Includes the separator if present before len or end.
* =0 : no separator before end found. <str> is left undefined.
* <0 : no more bytes readable because output is shut.
*
* The status of the corresponding buffer is not changed. The caller must call
* applet_skip_input() to update it.
*/
static inline int applet_getword_nc(const struct appctx *appctx, const char **blk1, size_t *len1, const char **blk2, size_t *len2, char sep)
{
int ret;
size_t l;
ret = applet_getblk_nc(appctx, blk1, len1, blk2, len2);
if (unlikely(ret <= 0))
return ret;
for (l = 0; l < *len1 && (*blk1)[l] != sep; l++);
if (l < *len1 && (*blk1)[l] == sep) {
*len1 = l + 1;
return 1;
}
if (ret >= 2) {
for (l = 0; l < *len2 && (*blk2)[l] != sep; l++);
if (l < *len2 && (*blk2)[l] == sep) {
*len2 = l + 1;
return 2;
}
}
/* If we have found no LF and the buffer is full or the SC is shut, then
* the resulting string is made of the concatenation of the pending
* blocks (1 or 2).
*/
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
if (b_full(&appctx->inbuf) || se_fl_test(appctx->sedesc, SE_FL_SHW))
return ret;
}
else {
struct stconn *sc = appctx_sc(appctx);
if (!channel_may_recv(sc_oc(sc)) || sc->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED))
return ret;
}
/* No LF yet and not shut yet */
return 0;
}
/* Gets one or two blocks of text representing a line from the applet input
* buffer (see appet_get_inbuf).
*
* Data are not copied. The '\n' is waited for as long as some data can still be
* received and the destination is not full. Otherwise, the string may be
* returned as is, without the '\n'.
*
* Return values :
* >0 : number of bytes read. Includes the \n if present before len or end.
* =0 : no '\n' before end found. <str> is left undefined.
* <0 : no more bytes readable because output is shut.
*
* The status of the corresponding buffer is not changed. The caller must call
* applet_skip_input() to update it.
*/
static inline int applet_getline_nc(const struct appctx *appctx, const char **blk1, size_t *len1, const char **blk2, size_t *len2)
{
return applet_getword_nc(appctx, blk1, len1, blk2, len2, '\n');
}
#endif /* _HAPROXY_APPLET_H */
/*

View File

@ -86,7 +86,7 @@ static inline int be_usable_srv(struct proxy *be)
/* set the time of last session on the backend */
static inline void be_set_sess_last(struct proxy *be)
{
HA_ATOMIC_STORE(&be->be_counters.shared.tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
be->be_counters.last_sess = ns_to_sec(now_ns);
}
/* This function returns non-zero if the designated server will be

View File

@ -68,7 +68,7 @@
#else // not x86
/* generic implementation, causes a segfault */
static inline __attribute((always_inline,noreturn,unused)) void ha_crash_now(void)
static inline __attribute((always_inline)) void ha_crash_now(void)
{
#if __GNUC_PREREQ__(5, 0)
#pragma GCC diagnostic push
@ -620,92 +620,9 @@ struct mem_stats {
_HA_ATOMIC_ADD(&_.size, __y); \
strdup(__x); \
})
#undef ha_aligned_alloc
#define ha_aligned_alloc(a,s) ({ \
size_t __a = (a); \
size_t __s = (s); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_MALLOC, \
.func = __func__, \
}, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
_ha_aligned_alloc(__a, __s); \
})
#undef ha_aligned_alloc_safe
#define ha_aligned_alloc_safe(a,s) ({ \
size_t __a = (a); \
size_t __s = (s); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_MALLOC, \
.func = __func__, \
}, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
_ha_aligned_alloc_safe(__a, __s); \
})
#undef ha_aligned_free
#define ha_aligned_free(x) ({ \
typeof(x) __x = (x); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_FREE, \
.func = __func__, \
}, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
if (__builtin_constant_p((x))) { \
HA_LINK_ERROR(call_to_ha_aligned_free_attempts_to_free_a_constant); \
} \
if (__x) \
_HA_ATOMIC_INC(&_.calls); \
_ha_aligned_free(__x); \
})
#undef ha_aligned_free_size
#define ha_aligned_free_size(p,s) ({ \
void *__p = (p); size_t __s = (s); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_FREE, \
.func = __func__, \
}, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
if (__builtin_constant_p((p))) { \
HA_LINK_ERROR(call_to_ha_aligned_free_attempts_to_free_a_constant); \
} \
if (__p) { \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
} \
_ha_aligned_free(__p); \
})
#else // DEBUG_MEM_STATS
#define will_free(x, y) do { } while (0)
#define ha_aligned_alloc(a,s) _ha_aligned_alloc(a, s)
#define ha_aligned_alloc_safe(a,s) _ha_aligned_alloc_safe(a, s)
#define ha_aligned_free(p) _ha_aligned_free(p)
#define ha_aligned_free_size(p,s) _ha_aligned_free(p)
#endif /* DEBUG_MEM_STATS*/

46
include/haproxy/cbuf-t.h Normal file
View File

@ -0,0 +1,46 @@
/*
* include/haprox/cbuf-t.h
* This file contains definition for circular buffers.
*
* Copyright 2021 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation, version 2.1
* exclusively.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _HAPROXY_CBUF_T_H
#define _HAPROXY_CBUF_T_H
#ifdef USE_QUIC
#ifndef USE_OPENSSL
#error "Must define USE_OPENSSL"
#endif
#endif
#include <stddef.h>
#include <haproxy/list-t.h>
extern struct pool_head *pool_head_cbuf;
struct cbuf {
/* buffer */
unsigned char *buf;
/* buffer size */
size_t sz;
/* Writer index */
size_t wr;
/* Reader index */
size_t rd;
};
#endif /* _HAPROXY_CBUF_T_H */

136
include/haproxy/cbuf.h Normal file
View File

@ -0,0 +1,136 @@
/*
* include/haprox/cbuf.h
* This file contains definitions and prototypes for circular buffers.
* Inspired from Linux circular buffers (include/linux/circ_buf.h).
*
* Copyright 2021 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation, version 2.1
* exclusively.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _HAPROXY_CBUF_H
#define _HAPROXY_CBUF_H
#ifdef USE_QUIC
#ifndef USE_OPENSSL
#error "Must define USE_OPENSSL"
#endif
#endif
#include <haproxy/atomic.h>
#include <haproxy/list.h>
#include <haproxy/cbuf-t.h>
struct cbuf *cbuf_new(unsigned char *buf, size_t sz);
void cbuf_free(struct cbuf *cbuf);
/* Amount of data between <rd> and <wr> */
#define CBUF_DATA(wr, rd, size) (((wr) - (rd)) & ((size) - 1))
/* Return the writer position in <cbuf>.
* To be used only by the writer!
*/
static inline unsigned char *cb_wr(struct cbuf *cbuf)
{
return cbuf->buf + cbuf->wr;
}
/* Reset the reader index.
* To be used by a reader!
*/
static inline void cb_rd_reset(struct cbuf *cbuf)
{
cbuf->rd = 0;
}
/* Reset the writer index.
* To be used by a writer!
*/
static inline void cb_wr_reset(struct cbuf *cbuf)
{
cbuf->wr = 0;
}
/* Increase <cbuf> circular buffer data by <count>.
* To be used by a writer!
*/
static inline void cb_add(struct cbuf *cbuf, size_t count)
{
cbuf->wr = (cbuf->wr + count) & (cbuf->sz - 1);
}
/* Return the reader position in <cbuf>.
* To be used only by the reader!
*/
static inline unsigned char *cb_rd(struct cbuf *cbuf)
{
return cbuf->buf + cbuf->rd;
}
/* Skip <count> byte in <cbuf> circular buffer.
* To be used by a reader!
*/
static inline void cb_del(struct cbuf *cbuf, size_t count)
{
cbuf->rd = (cbuf->rd + count) & (cbuf->sz - 1);
}
/* Return the amount of data left in <cbuf>.
* To be used only by the writer!
*/
static inline size_t cb_data(struct cbuf *cbuf)
{
size_t rd;
rd = HA_ATOMIC_LOAD(&cbuf->rd);
return CBUF_DATA(cbuf->wr, rd, cbuf->sz);
}
/* Return the amount of room left in <cbuf> minus 1 to distinguish
* the case where the buffer is full from the case where is is empty
* To be used only by the write!
*/
static inline size_t cb_room(struct cbuf *cbuf)
{
size_t rd;
rd = HA_ATOMIC_LOAD(&cbuf->rd);
return CBUF_DATA(rd, cbuf->wr + 1, cbuf->sz);
}
/* Return the amount of contiguous data left in <cbuf>.
* To be used only by the reader!
*/
static inline size_t cb_contig_data(struct cbuf *cbuf)
{
size_t end, n;
end = cbuf->sz - cbuf->rd;
n = (HA_ATOMIC_LOAD(&cbuf->wr) + end) & (cbuf->sz - 1);
return n < end ? n : end;
}
/* Return the amount of contiguous space left in <cbuf>.
* To be used only by the writer!
*/
static inline size_t cb_contig_space(struct cbuf *cbuf)
{
size_t end, n;
end = cbuf->sz - 1 - cbuf->wr;
n = (HA_ATOMIC_LOAD(&cbuf->rd) + end) & (cbuf->sz - 1);
return n <= end ? n : end + 1;
}
#endif /* _HAPROXY_CBUF_H */

View File

@ -28,7 +28,7 @@
extern struct timeval start_date; /* the process's start date in wall-clock time */
extern struct timeval ready_date; /* date when the process was considered ready */
extern ullong start_time_ns; /* the process's start date in internal monotonic time (ns) */
extern volatile ullong *global_now_ns;/* common monotonic date between all threads, in ns (wraps every 585 yr) */
extern volatile ullong global_now_ns; /* common monotonic date between all threads, in ns (wraps every 585 yr) */
extern THREAD_LOCAL ullong now_ns; /* internal monotonic date derived from real clock, in ns (wraps every 585 yr) */
extern THREAD_LOCAL struct timeval date; /* the real current date (wall-clock time) */

View File

@ -94,21 +94,11 @@ typedef struct { } empty_t;
# endif
#endif
/* unsafe ones for use with constant macros needed in initializers */
#ifndef _MIN
#define _MIN(a, b) ((a < b) ? a : b)
#endif
#ifndef _MAX
#define _MAX(a, b) ((a > b) ? a : b)
#endif
/* safe versions for use anywhere except in initializers */
#ifndef MIN
#define MIN(a, b) ({ \
typeof(a) _a = (a); \
typeof(a) _b = (b); \
_MIN(_a, _b); \
((_a < _b) ? _a : _b); \
})
#endif
@ -116,15 +106,10 @@ typedef struct { } empty_t;
#define MAX(a, b) ({ \
typeof(a) _a = (a); \
typeof(a) _b = (b); \
_MAX(_a, _b); \
((_a > _b) ? _a : _b); \
})
#endif
/* always set a _POSIX_VERSION if there isn't any, in order to ease compares */
#ifndef _POSIX_VERSION
# define _POSIX_VERSION 0
#endif
/* this is for libc5 for example */
#ifndef TCP_NODELAY
#define TCP_NODELAY 1

View File

@ -350,7 +350,7 @@
* <type> which has its member <name> stored at address <ptr>.
*/
#ifndef container_of
#define container_of(ptr, type, name) ((type *)(((char *)(ptr)) - offsetof(type, name)))
#define container_of(ptr, type, name) ((type *)(((void *)(ptr)) - ((long)&((type *)0)->name)))
#endif
/* returns a pointer to the structure of type <type> which has its member <name>
@ -359,7 +359,7 @@
#ifndef container_of_safe
#define container_of_safe(ptr, type, name) \
({ void *__p = (ptr); \
__p ? (type *)((char *)__p - offsetof(type, name)) : (type *)0; \
__p ? (type *)(__p - ((long)&((type *)0)->name)) : (type *)0; \
})
#endif

View File

@ -68,50 +68,6 @@ struct ssl_sock_ctx;
* conn_cond_update_polling().
*/
/* A bit of explanation is required for backend connection reuse. A connection
* may be shared between multiple streams of the same thread (e.g. h2, fcgi,
* quic) and may be reused by subsequent streams of a different thread if it
* is totally idle (i.e. not used at all). In order to permit other streams
* to find a connection, it has to appear in lists and/or trees that reflect
* its current state. If the connection is full and cannot be shared anymore,
* it is not in any of such places. The various states are the following:
*
* - private: a private connection is not visible to other threads. It is
* attached via its <idle_list> member to the <conn_list> head of a
* sess_priv_conns struct specific to the server, itself attached to the
* session. Only other streams of the same session may find this connection.
* Such connections include totally idle connections as well as connections
* with available slots left. The <hash_node> part is still used to store
* the hash key but the tree node part is otherwise left unused.
*
* - avail: an available connection is a connection that has at least one
* stream in use and at least one slot available for a new stream. Such a
* connection is indexed in the server's <avail_conns> member based on the
* key of the hash_node. It cannot be used by other threads, and is not
* present in the server's <idle_conn_list>, so its <idle_list> member is
* always empty. Since this connection is in use by a single thread and
* cannot be taken over, it doesn't require any locking to enter/leave the
* tree.
*
* - safe: a safe connection is an idle connection that has proven that it
* could reliably be reused. Such a connection may be taken over at any
* instant by other threads, and must only be manipulated under the server's
* <idle_lock>. It is indexed in the server's <safe_conns> member based on
* the key of the hash_node. It is attached to the server's <idle_conn_list>
* via its <idle_list> member. It may be purged after too long inactivity,
* though the thread responsible for doing this will first take it over. Such
* a connection has (conn->flags & CO_FL_LIST_MASK) = CO_FL_SAFE_LIST.
*
* - idle: a purely idle connection has not yet proven that it could reliably
* be reused. Such a connection may be taken over at any instant by other
* threads, and must only be manipulated under the server's <idle_lock>. It
* is indexed in the server's <idle_conns> member based on the key of the
* hash_node. It is attached to the server's <idle_conn_list> via its
* <idle_list> member. It may be purged after too long inactivity, though the
* thread responsible for doing this will first take it over. Such a
* connection has (conn->flags & CO_FL_LIST_MASK) = CO_FL_IDLE_LIST.
*/
/* flags for use in connection->flags. Please also update the conn_show_flags()
* function below in case of changes.
*/
@ -493,6 +449,8 @@ struct mux_ops {
int (*unsubscribe)(struct stconn *sc, int event_type, struct wait_event *es); /* Unsubscribe <es> from events */
int (*sctl)(struct stconn *sc, enum mux_sctl_type mux_sctl, void *arg); /* Provides information about the mux stream */
int (*avail_streams)(struct connection *conn); /* Returns the number of streams still available for a connection */
int (*avail_streams_bidi)(struct connection *conn); /* Returns the number of bidirectional streams still available for a connection */
int (*avail_streams_uni)(struct connection *conn); /* Returns the number of unidirectional streams still available for a connection */
int (*used_streams)(struct connection *conn); /* Returns the number of streams in use on a connection. */
void (*destroy)(void *ctx); /* Let the mux know one of its users left, so it may have to disappear */
int (*ctl)(struct connection *conn, enum mux_ctl_type mux_ctl, void *arg); /* Provides information about the mux connection */

View File

@ -25,144 +25,108 @@
#include <haproxy/freq_ctr-t.h>
#define COUNTERS_SHARED_F_NONE 0x0000
#define COUNTERS_SHARED_F_LOCAL 0x0001 // shared counter struct is actually process-local
// common to fe_counters_shared and be_counters_shared
#define COUNTERS_SHARED \
struct { \
uint16_t flags; /* COUNTERS_SHARED_F flags */\
};
#define COUNTERS_SHARED_TG \
struct { \
unsigned long last_state_change; /* last time, when the state was changed */\
long long srv_aborts; /* aborted responses during DATA phase caused by the server */\
long long cli_aborts; /* aborted responses during DATA phase caused by the client */\
long long internal_errors; /* internal processing errors */\
long long failed_rewrites; /* failed rewrites (warning) */\
long long bytes_out; /* number of bytes transferred from the server to the client */\
long long bytes_in; /* number of bytes transferred from the client to the server */\
long long denied_resp; /* blocked responses because of security concerns */\
long long denied_req; /* blocked requests because of security concerns */\
long long cum_sess; /* cumulated number of accepted connections */\
/* compression counters, index 0 for requests, 1 for responses */\
long long comp_in[2]; /* input bytes fed to the compressor */\
long long comp_out[2]; /* output bytes emitted by the compressor */\
long long comp_byp[2]; /* input bytes that bypassed the compressor (cpu/ram/bw limitation) */\
struct freq_ctr sess_per_sec; /* sessions per second on this server */\
}
// for convenience (generic pointer)
struct counters_shared {
COUNTERS_SHARED;
struct {
COUNTERS_SHARED_TG;
} *tg[MAX_TGROUPS];
};
/* counters used by listeners and frontends */
struct fe_counters_shared_tg {
COUNTERS_SHARED_TG;
long long denied_sess; /* denied session requests (tcp-req-sess rules) */
long long denied_conn; /* denied connection requests (tcp-req-conn rules) */
long long intercepted_req; /* number of monitoring or stats requests intercepted by the frontend */
long long cum_conn; /* cumulated number of received connections */
struct freq_ctr conn_per_sec; /* received connections per second on the frontend */
struct freq_ctr req_per_sec; /* HTTP requests per second on the frontend */
long long cum_sess_ver[3]; /* cumulated number of h1/h2/h3 sessions */
union {
struct {
long long cum_req[4]; /* cumulated number of processed other/h1/h2/h3 requests */
long long cache_hits; /* cache hits */
long long cache_lookups;/* cache lookups */
long long comp_rsp; /* number of compressed responses */
long long rsp[6]; /* http response codes */
} http;
} p; /* protocol-specific stats */
long long failed_req; /* failed requests (eg: invalid or timeout) */
};
struct fe_counters_shared {
COUNTERS_SHARED;
struct fe_counters_shared_tg *tg[MAX_TGROUPS];
};
struct fe_counters {
struct fe_counters_shared shared; /* shared counters */
unsigned int conn_max; /* max # of active sessions */
long long cum_conn; /* cumulated number of received connections */
long long cum_sess; /* cumulated number of accepted connections */
long long cum_sess_ver[3]; /* cumulated number of h1/h2/h3 sessions */
unsigned int cps_max; /* maximum of new connections received per second */
unsigned int sps_max; /* maximum of new connections accepted per second (sessions) */
struct freq_ctr _sess_per_sec; /* sessions per second on this frontend, used to compute sps_max (internal use only) */
struct freq_ctr _conn_per_sec; /* connections per second on this frontend, used to compute cps_max (internal use only) */
long long bytes_in; /* number of bytes transferred from the client to the server */
long long bytes_out; /* number of bytes transferred from the server to the client */
/* compression counters, index 0 for requests, 1 for responses */
long long comp_in[2]; /* input bytes fed to the compressor */
long long comp_out[2]; /* output bytes emitted by the compressor */
long long comp_byp[2]; /* input bytes that bypassed the compressor (cpu/ram/bw limitation) */
long long denied_req; /* blocked requests because of security concerns */
long long denied_resp; /* blocked responses because of security concerns */
long long failed_req; /* failed requests (eg: invalid or timeout) */
long long denied_conn; /* denied connection requests (tcp-req-conn rules) */
long long denied_sess; /* denied session requests (tcp-req-sess rules) */
long long failed_rewrites; /* failed rewrites (warning) */
long long internal_errors; /* internal processing errors */
long long cli_aborts; /* aborted responses during DATA phase caused by the client */
long long srv_aborts; /* aborted responses during DATA phase caused by the server */
long long intercepted_req; /* number of monitoring or stats requests intercepted by the frontend */
union {
struct {
unsigned int rps_max; /* maximum of new HTTP requests second observed */
struct freq_ctr _req_per_sec; /* HTTP requests per second on the frontend, only used to compute rps_max */
} http;
} p; /* protocol-specific stats */
};
struct be_counters_shared_tg {
COUNTERS_SHARED_TG;
long long cum_lbconn; /* cumulated number of sessions processed by load balancing (BE only) */
long long connect; /* number of connection establishment attempts */
long long reuse; /* number of connection reuses */
unsigned long last_sess; /* last session time */
long long failed_checks, failed_hana; /* failed health checks and health analyses for servers */
long long down_trans; /* up->down transitions */
union {
struct {
long long cum_req; /* cumulated number of processed HTTP requests */
long long cache_hits; /* cache hits */
long long cache_lookups;/* cache lookups */
long long cum_req[4]; /* cumulated number of processed other/h1/h2/h3 requests */
long long comp_rsp; /* number of compressed responses */
unsigned int rps_max; /* maximum of new HTTP requests second observed */
long long rsp[6]; /* http response codes */
long long cache_lookups;/* cache lookups */
long long cache_hits; /* cache hits */
} http;
} p; /* protocol-specific stats */
long long redispatches; /* retried and redispatched connections (BE only) */
long long retries; /* retried and redispatched connections (BE only) */
long long failed_resp; /* failed responses (BE only) */
long long failed_conns; /* failed connect() attempts (BE only) */
};
struct freq_ctr sess_per_sec; /* sessions per second on this server */
struct freq_ctr req_per_sec; /* HTTP requests per second on the frontend */
struct freq_ctr conn_per_sec; /* received connections per second on the frontend */
struct be_counters_shared {
COUNTERS_SHARED;
struct be_counters_shared_tg *tg[MAX_TGROUPS];
unsigned long last_change; /* last time, when the state was changed */
};
/* counters used by servers and backends */
struct be_counters {
struct be_counters_shared shared; /* shared counters */
unsigned int conn_max; /* max # of active sessions */
long long cum_sess; /* cumulated number of accepted connections */
long long cum_lbconn; /* cumulated number of sessions processed by load balancing (BE only) */
unsigned int cps_max; /* maximum of new connections received per second */
unsigned int sps_max; /* maximum of new connections accepted per second (sessions) */
unsigned int nbpend_max; /* max number of pending connections with no server assigned yet */
unsigned int cur_sess_max; /* max number of currently active sessions */
struct freq_ctr _sess_per_sec; /* sessions per second on this frontend, used to compute sps_max (internal use only) */
long long bytes_in; /* number of bytes transferred from the client to the server */
long long bytes_out; /* number of bytes transferred from the server to the client */
/* compression counters, index 0 for requests, 1 for responses */
long long comp_in[2]; /* input bytes fed to the compressor */
long long comp_out[2]; /* output bytes emitted by the compressor */
long long comp_byp[2]; /* input bytes that bypassed the compressor (cpu/ram/bw limitation) */
long long denied_req; /* blocked requests because of security concerns */
long long denied_resp; /* blocked responses because of security concerns */
long long connect; /* number of connection establishment attempts */
long long reuse; /* number of connection reuses */
long long failed_conns; /* failed connect() attempts (BE only) */
long long failed_resp; /* failed responses (BE only) */
long long cli_aborts; /* aborted responses during DATA phase caused by the client */
long long srv_aborts; /* aborted responses during DATA phase caused by the server */
long long retries; /* retried and redispatched connections (BE only) */
long long redispatches; /* retried and redispatched connections (BE only) */
long long failed_rewrites; /* failed rewrites (warning) */
long long internal_errors; /* internal processing errors */
long long failed_checks, failed_hana; /* failed health checks and health analyses for servers */
long long down_trans; /* up->down transitions */
unsigned int q_time, c_time, d_time, t_time; /* sums of conn_time, queue_time, data_time, total_time */
unsigned int qtime_max, ctime_max, dtime_max, ttime_max; /* maximum of conn_time, queue_time, data_time, total_time observed */
union {
struct {
long long cum_req; /* cumulated number of processed HTTP requests */
long long comp_rsp; /* number of compressed responses */
unsigned int rps_max; /* maximum of new HTTP requests second observed */
long long rsp[6]; /* http response codes */
long long cache_lookups;/* cache lookups */
long long cache_hits; /* cache hits */
} http;
} p; /* protocol-specific stats */
struct freq_ctr sess_per_sec; /* sessions per second on this server */
unsigned long last_sess; /* last session time */
unsigned long last_change; /* last time, when the state was changed */
};
#endif /* _HAPROXY_COUNTERS_T_H */

View File

@ -1,102 +0,0 @@
/*
* include/haproxy/counters.h
* objects counters management
*
* Copyright 2025 HAProxy Technologies
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation, version 2.1
* exclusively.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _HAPROXY_COUNTERS_H
# define _HAPROXY_COUNTERS_H
#include <stddef.h>
#include <haproxy/counters-t.h>
#include <haproxy/guid-t.h>
int counters_fe_shared_prepare(struct fe_counters_shared *counters, const struct guid_node *guid);
int counters_be_shared_prepare(struct be_counters_shared *counters, const struct guid_node *guid);
void counters_fe_shared_drop(struct fe_counters_shared *counters);
void counters_be_shared_drop(struct be_counters_shared *counters);
/* time oriented helper: get last time (relative to current time) on a given
* <scounter> array, for <elem> member (one member per thread group) which is
* assumed to be unsigned long type.
*
* wrapping is handled by taking the lowest diff between now and last counter.
* But since wrapping is expected once every ~136 years (starting 01/01/1970),
* perhaps it's not worth the extra CPU cost.. let's see.
*/
#define COUNTERS_SHARED_LAST_OFFSET(scounters, type, offset) \
({ \
unsigned long last = HA_ATOMIC_LOAD((type *)((char *)scounters[0] + offset));\
unsigned long now_seconds = ns_to_sec(now_ns); \
int it; \
\
for (it = 1; it < global.nbtgroups; it++) { \
unsigned long cur = HA_ATOMIC_LOAD((type *)((char *)scounters[it] + offset));\
if ((now_seconds - cur) < (now_seconds - last)) \
last = cur; \
} \
last; \
})
#define COUNTERS_SHARED_LAST(scounters, elem) \
({ \
int offset = offsetof(typeof(**scounters), elem); \
unsigned long last = COUNTERS_SHARED_LAST_OFFSET(scounters, typeof(scounters[0]->elem), offset); \
\
last; \
})
/* generic unsigned integer addition for all <elem> members from
* <scounters> array (one member per thread group)
* <rfunc> is function taking pointer as parameter to read from the memory
* location pointed to scounters[it].elem
*/
#define COUNTERS_SHARED_TOTAL_OFFSET(scounters, type, offset, rfunc) \
({ \
uint64_t __ret = 0; \
int it; \
\
for (it = 0; it < global.nbtgroups; it++) \
__ret += rfunc((type *)((char *)scounters[it] + offset)); \
__ret; \
})
#define COUNTERS_SHARED_TOTAL(scounters, elem, rfunc) \
({ \
int offset = offsetof(typeof(**scounters), elem); \
uint64_t __ret = COUNTERS_SHARED_TOTAL_OFFSET(scounters, typeof(scounters[0]->elem), offset, rfunc);\
\
__ret; \
})
/* same as COUNTERS_SHARED_TOTAL but with <rfunc> taking 2 extras arguments:
* <arg1> and <arg2>
*/
#define COUNTERS_SHARED_TOTAL_ARG2(scounters, elem, rfunc, arg1, arg2) \
({ \
uint64_t __ret = 0; \
int it; \
\
for (it = 0; it < global.nbtgroups; it++) \
__ret += rfunc(&scounters[it]->elem, arg1, arg2); \
__ret; \
})
#endif /* _HAPROXY_COUNTERS_H */

View File

@ -2,7 +2,6 @@
#define _HAPROXY_CPU_TOPO_H
#include <haproxy/api.h>
#include <haproxy/chunk.h>
#include <haproxy/cpuset-t.h>
#include <haproxy/cpu_topo-t.h>
@ -56,12 +55,7 @@ int cpu_map_configured(void);
/* Dump the CPU topology <topo> for up to cpu_topo_maxcpus CPUs for
* debugging purposes. Offline CPUs are skipped.
*/
void cpu_topo_debug(const struct ha_cpu_topo *topo);
/* Dump the summary of CPU topology <topo>, i.e. clusters info and thread-cpu
* bindings.
*/
void cpu_topo_dump_summary(const struct ha_cpu_topo *topo, struct buffer *trash);
void cpu_dump_topology(const struct ha_cpu_topo *topo);
/* re-order a CPU topology array by locality to help form groups. */
void cpu_reorder_by_locality(struct ha_cpu_topo *topo, int entries);

View File

@ -44,7 +44,7 @@
* doesn't engage us too far.
*/
#ifndef MAX_TGROUPS
#define MAX_TGROUPS 32
#define MAX_TGROUPS 16
#endif
#define MAX_THREADS_PER_GROUP __WORDSIZE
@ -53,7 +53,7 @@
* long bits if more tgroups are enabled.
*/
#ifndef MAX_THREADS
#define MAX_THREADS ((((MAX_TGROUPS) > 1) ? 16 : 1) * (MAX_THREADS_PER_GROUP))
#define MAX_THREADS ((((MAX_TGROUPS) > 1) ? 4 : 1) * (MAX_THREADS_PER_GROUP))
#endif
#endif // USE_THREAD
@ -115,10 +115,6 @@
// via standard input.
#define MAX_CFG_SIZE 10485760
// may be handy for some system config files, where we just need to find
// some specific values (read with fgets)
#define MAX_LINES_TO_READ 32
// max # args on a configuration line
#define MAX_LINE_ARGS 64
@ -353,11 +349,6 @@
#define SRV_CHK_INTER_THRES 1000
#endif
/* INET6 connectivity caching interval (in ms) */
#ifndef INET6_CONNECTIVITY_CACHE_TIME
#define INET6_CONNECTIVITY_CACHE_TIME 30000
#endif
/* Specifies the string used to report the version and release date on the
* statistics page. May be defined to the empty string ("") to permanently
* disable the feature.

View File

@ -499,7 +499,6 @@ static inline long fd_clr_running(int fd)
static inline void fd_insert(int fd, void *owner, void (*iocb)(int fd), int tgid, unsigned long thread_mask)
{
extern void sock_conn_iocb(int);
struct tgroup_info *tginfo = &ha_tgroup_info[tgid - 1];
int newstate;
/* conn_fd_handler should support edge-triggered FDs */
@ -529,7 +528,7 @@ static inline void fd_insert(int fd, void *owner, void (*iocb)(int fd), int tgid
BUG_ON(fdtab[fd].state != 0);
BUG_ON(tgid < 1 || tgid > MAX_TGROUPS);
thread_mask &= tginfo->threads_enabled;
thread_mask &= tg->threads_enabled;
BUG_ON(thread_mask == 0);
fd_claim_tgid(fd, tgid);

View File

@ -197,7 +197,6 @@ struct global {
int pattern_cache; /* max number of entries in the pattern cache. */
int sslcachesize; /* SSL cache size in session, defaults to 20000 */
int comp_maxlevel; /* max HTTP compression level */
uint glitch_kill_maxidle; /* have glitches kill only below this level of idle */
int pool_low_ratio; /* max ratio of FDs used before we stop using new idle connections */
int pool_high_ratio; /* max ratio of FDs used before we start killing idle connections when creating new connections */
int pool_low_count; /* max number of opened fd before we stop using new idle connections */

View File

@ -65,7 +65,6 @@ int h1_format_htx_reqline(const struct htx_sl *sl, struct buffer *chk);
int h1_format_htx_stline(const struct htx_sl *sl, struct buffer *chk);
int h1_format_htx_hdr(const struct ist n, const struct ist v, struct buffer *chk);
int h1_format_htx_data(const struct ist data, struct buffer *chk, int chunked);
int h1_format_htx_msg(const struct htx *htx, struct buffer *outbuf);
#endif /* _HAPROXY_H1_HTX_H */

View File

@ -72,8 +72,8 @@ struct stream;
#define HLUA_NOYIELD 0x00000020
#define HLUA_BUSY 0x00000040
#define HLUA_F_AS_STRING 0x01
#define HLUA_F_MAY_USE_CHANNELS_DATA 0x02
#define HLUA_F_AS_STRING 0x01
#define HLUA_F_MAY_USE_HTTP 0x02
/* HLUA TXN flags */
#define HLUA_TXN_NOTERM 0x00000001

View File

@ -232,52 +232,6 @@ static inline int http_path_has_forbidden_char(const struct ist ist, const char
return 0;
}
/* Checks whether the :authority pseudo header contains dangerous chars that
* might affect its reassembly. We want to catch anything below 0x21, above
* 0x7e, as well as '@', '[', ']', '/','?', '#', '\', CR, LF, NUL. Then we
* fall back to the slow path and decide. Brackets are used for IP-literal and
* deserve special case, that is better handled in the slow path. The function
* returns 0 if no forbidden char is presnet, non-zero otherwise.
*/
static inline int http_authority_has_forbidden_char(const struct ist ist)
{
size_t ofs, len = istlen(ist);
const char *p = istptr(ist);
int brackets = 0;
uchar c;
/* Many attempts with various methods have shown that moderately recent
* compilers (gcc >= 9, clang >= 13) will arrange the code below as an
* evaluation tree that remains efficient at -O2 and above (~1.2ns per
* char). The immediate next efficient one is the bitmap from 64-bit
* registers but it's extremely sensitive to code arrangements and
* optimization.
*/
for (ofs = 0; ofs < len; ofs++) {
c = p[ofs];
if (unlikely(c < 0x21 || c > 0x7e ||
c == '#' || c == '/' || c == '?' || c == '@' ||
c == '[' || c == '\\' || c == ']')) {
/* all of them must be rejected, except '[' which may
* only appear at the beginning, and ']' which may
* only appear at the end or before a colon.
*/
if ((c == '[' && ofs == 0) ||
(c == ']' && (ofs == len - 1 || p[ofs + 1] == ':'))) {
/* that's an IP-literal (see RFC3986#3.2), it's
* OK for now.
*/
brackets ^= 1;
} else {
return 1;
}
}
}
/* there must be no opening bracket left nor lone closing one */
return brackets;
}
/* Checks status code array <array> for the presence of status code <status>.
* Returns non-zero if the code is present, zero otherwise. Any status code is
* permitted.

View File

@ -32,7 +32,6 @@ struct httpclient {
int timeout_server; /* server timeout in ms */
void *caller; /* ptr of the caller */
unsigned int flags; /* other flags */
unsigned int options; /* options */
struct proxy *px; /* proxy for special cases */
struct server *srv_raw; /* server for clear connections */
#ifdef USE_OPENSSL
@ -43,16 +42,11 @@ struct httpclient {
/* Action (FA) to do */
#define HTTPCLIENT_FA_STOP 0x00000001 /* stops the httpclient at the next IO handler call */
#define HTTPCLIENT_FA_AUTOKILL 0x00000002 /* sets the applet to destroy the httpclient struct itself */
#define HTTPCLIENT_FA_DRAIN_REQ 0x00000004 /* drains the request */
/* status (FS) */
#define HTTPCLIENT_FS_STARTED 0x00010000 /* the httpclient was started */
#define HTTPCLIENT_FS_ENDED 0x00020000 /* the httpclient is stopped */
/* options */
#define HTTPCLIENT_O_HTTPPROXY 0x00000001 /* the request must be use an absolute URI */
#define HTTPCLIENT_O_RES_HTX 0x00000002 /* response is stored in HTX */
/* States of the HTTP Client Appctx */
enum {
HTTPCLIENT_S_REQ = 0,
@ -65,4 +59,12 @@ enum {
#define HTTPCLIENT_USERAGENT "HAProxy"
/* What kind of data we need to read */
#define HC_F_RES_STLINE 0x01
#define HC_F_RES_HDR 0x02
#define HC_F_RES_BODY 0x04
#define HC_F_RES_END 0x08
#define HC_F_HTTPPROXY 0x10
#endif /* ! _HAPROXY_HTTCLIENT__T_H */

View File

@ -64,17 +64,8 @@ enum jwt_elt {
JWT_ELT_MAX
};
enum jwt_entry_type {
JWT_ENTRY_DFLT,
JWT_ENTRY_STORE,
JWT_ENTRY_PKEY,
JWT_ENTRY_INVALID, /* already tried looking into ckch_store tree (unsuccessful) */
};
struct jwt_cert_tree_entry {
EVP_PKEY *pubkey;
struct ckch_store *ckch_store;
int type; /* jwt_entry_type */
EVP_PKEY *pkey;
struct ebmb_node node;
char path[VAR_ARRAY];
};
@ -87,8 +78,7 @@ enum jwt_vrfy_status {
JWT_VRFY_UNMANAGED_ALG = -2,
JWT_VRFY_INVALID_TOKEN = -3,
JWT_VRFY_OUT_OF_MEMORY = -4,
JWT_VRFY_UNKNOWN_CERT = -5,
JWT_VRFY_INTERNAL_ERR = -6
JWT_VRFY_UNKNOWN_CERT = -5
};
#endif /* USE_OPENSSL */

View File

@ -28,13 +28,10 @@
#ifdef USE_OPENSSL
enum jwt_alg jwt_parse_alg(const char *alg_str, unsigned int alg_len);
int jwt_tokenize(const struct buffer *jwt, struct jwt_item *items, unsigned int *item_num);
int jwt_tree_load_cert(char *path, int pathlen, const char *file, int line, char **err);
int jwt_tree_load_cert(char *path, int pathlen, char **err);
enum jwt_vrfy_status jwt_verify(const struct buffer *token, const struct buffer *alg,
const struct buffer *key);
void jwt_replace_ckch_store(struct ckch_store *old_ckchs, struct ckch_store *new_ckchs);
#endif /* USE_OPENSSL */
#endif /* _HAPROXY_JWT_H */

View File

@ -97,7 +97,7 @@
* since it's used only once.
* Example: LIST_ELEM(cur_node->args.next, struct node *, args)
*/
#define LIST_ELEM(lh, pt, el) ((pt)(((const char *)(lh)) - offsetof(typeof(*(pt)NULL), el)))
#define LIST_ELEM(lh, pt, el) ((pt)(((const char *)(lh)) - ((size_t)&((pt)NULL)->el)))
/* checks if the list head <lh> is empty or not */
#define LIST_ISEMPTY(lh) ((lh)->n == (lh))
@ -284,11 +284,10 @@ static __inline void watcher_attach(struct watcher *w, void *target)
MT_LIST_APPEND(list, &w->el);
}
/* Untracks target via <w> watcher. Does nothing if <w> is not attached */
/* Untracks target via <w> watcher. Invalid if <w> is not attached first. */
static __inline void watcher_detach(struct watcher *w)
{
if (!MT_LIST_INLIST(&w->el))
return;
BUG_ON_HOT(!MT_LIST_INLIST(&w->el));
*w->pptr = NULL;
MT_LIST_DELETE(&w->el);
}

View File

@ -121,7 +121,6 @@ enum li_status {
#define BC_SSL_O_NONE 0x0000
#define BC_SSL_O_NO_TLS_TICKETS 0x0100 /* disable session resumption tickets */
#define BC_SSL_O_PREF_CLIE_CIPH 0x0200 /* prefer client ciphers */
#define BC_SSL_O_STRICT_SNI 0x0400 /* refuse negotiation if sni doesn't match a certificate */
#endif
struct tls_version_filter {
@ -170,6 +169,7 @@ struct bind_conf {
unsigned long long ca_ignerr_bitfield[IGNERR_BF_SIZE]; /* ignored verify errors in handshake if depth > 0 */
unsigned long long crt_ignerr_bitfield[IGNERR_BF_SIZE]; /* ignored verify errors in handshake if depth == 0 */
void *initial_ctx; /* SSL context for initial negotiation */
int strict_sni; /* refuse negotiation if sni doesn't match a certificate */
int ssl_options; /* ssl options */
struct eb_root sni_ctx; /* sni_ctx tree of all known certs full-names sorted by name */
struct eb_root sni_w_ctx; /* sni_ctx tree of all known certs wildcards sorted by name */
@ -193,7 +193,6 @@ struct bind_conf {
unsigned int analysers; /* bitmap of required protocol analysers */
int maxseg; /* for TCP, advertised MSS */
int tcp_ut; /* for TCP, user timeout */
char *tcp_md5sig; /* TCP MD5 signature password (RFC2385) */
int idle_ping; /* MUX idle-ping interval in ms */
int maxaccept; /* if set, max number of connections accepted at once (-1 when disabled) */
unsigned int backlog; /* if set, listen backlog */
@ -245,7 +244,7 @@ struct listener {
struct fe_counters *counters; /* statistics counters */
struct mt_list wait_queue; /* link element to make the listener wait for something (LI_LIMITED) */
char *name; /* listener's name */
char *label; /* listener's label */
unsigned int thr_conn[MAX_THREADS_PER_GROUP]; /* number of connections per thread for the group */
struct list by_fe; /* chaining in frontend's list of listeners */

View File

@ -71,5 +71,20 @@ struct mailers {
} timeout;
};
struct email_alert {
struct list list;
struct tcpcheck_rules rules;
struct server *srv;
};
struct email_alertq {
struct list email_alerts;
struct check check; /* Email alerts are implemented using existing check
* code even though they are not checks. This structure
* is as a parameter to the check code.
* Each check corresponds to a mailer */
__decl_thread(HA_SPINLOCK_T lock);
};
#endif /* _HAPROXY_MAILERS_T_H */

View File

@ -31,10 +31,13 @@
#include <haproxy/proxy-t.h>
#include <haproxy/server-t.h>
extern int mailers_used_from_lua;
extern struct mailers *mailers;
extern int send_email_disabled;
int init_email_alert(struct mailers *mailers, struct proxy *p, char **err);
void free_email_alert(struct proxy *p);
void send_email_alert(struct server *s, int priority, const char *format, ...)
__attribute__ ((format(printf, 3, 4)));
#endif /* _HAPROXY_MAILERS_H */

View File

@ -17,8 +17,6 @@
#include <haproxy/quic_frame-t.h>
#include <haproxy/quic_pacing-t.h>
#include <haproxy/quic_stream-t.h>
#include <haproxy/quic_utils-t.h>
#include <haproxy/session-t.h>
#include <haproxy/stconn-t.h>
#include <haproxy/task-t.h>
#include <haproxy/time-t.h>
@ -29,6 +27,9 @@ enum qcs_type {
QCS_SRV_BIDI,
QCS_CLT_UNI,
QCS_SRV_UNI,
/* Must be the last one */
QCS_MAX_TYPES
};
enum qcc_app_st {
@ -67,9 +68,6 @@ struct qcc {
/* flow-control fields set by the peer which we must respect. */
struct {
uint64_t ms_uni; /* max sub-ID of uni stream allowed by the peer */
uint64_t ms_bidi; /* max sub-ID of bidi stream allowed by the peer */
uint64_t md; /* connection flow control limit updated on MAX_DATA frames reception */
uint64_t msd_bidi_l; /* initial max-stream-data from peer on local bidi streams */
uint64_t msd_bidi_r; /* initial max-stream-data from peer on remote bidi streams */
@ -147,7 +145,6 @@ struct qc_stream_rxbuf {
struct qcs {
struct qcc *qcc;
struct session *sess; /* only set for backend conns */
struct sedesc *sd;
uint32_t flags; /* QC_SF_* */
enum qcs_state st; /* QC_SS_* state */
@ -160,7 +157,6 @@ struct qcs {
struct buffer app_buf; /* receive buffer used by stconn layer */
uint64_t msd; /* current max-stream-data limit to enforce */
uint64_t msd_base; /* max-stream-data previous to latest update */
struct bdata_ctr data; /* data utilization counter. Note that <tot> is now used for now as accounting may be difficult with ncbuf. */
} rx;
struct {
struct quic_fctl fc; /* stream flow control applied on sending */
@ -207,11 +203,11 @@ struct qcc_app_ops {
/* Initialize <qcs> stream app context or leave it to NULL if rejected. */
int (*attach)(struct qcs *qcs, void *conn_ctx);
/* Convert received HTTP payload to HTX. Returns amount of decoded bytes from <b> or a negative error code. */
/* Convert received HTTP payload to HTX. */
ssize_t (*rcv_buf)(struct qcs *qcs, struct buffer *b, int fin);
/* Convert HTX to HTTP payload for sending. */
size_t (*snd_buf)(struct qcs *qcs, struct buffer *b, size_t count, char *fin);
size_t (*snd_buf)(struct qcs *qcs, struct buffer *b, size_t count);
/* Negotiate and commit fast-forward data from opposite MUX. */
size_t (*nego_ff)(struct qcs *qcs, size_t count);
@ -237,7 +233,7 @@ struct qcc_app_ops {
#define QC_CF_ERRL 0x00000001 /* fatal error detected locally, connection should be closed soon */
#define QC_CF_ERRL_DONE 0x00000002 /* local error properly handled, connection can be released */
#define QC_CF_IS_BACK 0x00000004 /* backend side */
/* unused 0x00000004 */
#define QC_CF_CONN_FULL 0x00000008 /* no stream buffers available on connection */
/* unused 0x00000010 */
#define QC_CF_ERR_CONN 0x00000020 /* fatal error reported by transport layer */
@ -277,7 +273,6 @@ static forceinline char *qcc_show_flags(char *buf, size_t len, const char *delim
#define QC_SF_TO_STOP_SENDING 0x00000200 /* a STOP_SENDING must be sent */
#define QC_SF_UNKNOWN_PL_LENGTH 0x00000400 /* HTX EOM may be missing from the stream layer */
#define QC_SF_RECV_RESET 0x00000800 /* a RESET_STREAM was received */
#define QC_SF_EOI_SUSPENDED 0x00001000 /* EOI must not be reported even if HTX EOM is present - useful when transferring HTTP interim responses */
/* This function is used to report flags in debugging tools. Please reflect
* below any single-bit flag addition above in the same order via the

View File

@ -19,7 +19,6 @@
void qcc_set_error(struct qcc *qcc, int err, int app);
int _qcc_report_glitch(struct qcc *qcc, int inc);
int qcc_fctl_avail_streams(const struct qcc *qcc, int bidi);
struct qcs *qcc_init_stream_local(struct qcc *qcc, int bidi);
void qcs_send_metadata(struct qcs *qcs);
int qcs_attach_sc(struct qcs *qcs, struct buffer *buf, char fin);
@ -44,7 +43,6 @@ int qcc_recv(struct qcc *qcc, uint64_t id, uint64_t len, uint64_t offset,
char fin, char *data);
int qcc_recv_max_data(struct qcc *qcc, uint64_t max);
int qcc_recv_max_stream_data(struct qcc *qcc, uint64_t id, uint64_t max);
int qcc_recv_max_streams(struct qcc *qcc, uint64_t max, int bidi);
int qcc_recv_reset_stream(struct qcc *qcc, uint64_t id, uint64_t err, uint64_t final_size);
int qcc_recv_stop_sending(struct qcc *qcc, uint64_t id, uint64_t err);
@ -53,7 +51,15 @@ static inline int qmux_stream_rx_bufsz(void)
return global.tune.bufsize - NCB_RESERVED_SZ;
}
/* Bit shift to get the stream sub ID for internal use which is obtained
* shifting the stream IDs by this value, knowing that the
* QCS_ID_TYPE_SHIFT less significant bits identify the stream ID
* types (client initiated bidirectional, server initiated bidirectional,
* client initiated unidirectional, server initiated bidirectional).
* Note that there is no reference to such stream sub IDs in the RFC.
*/
#define QCS_ID_TYPE_MASK 0x3
#define QCS_ID_TYPE_SHIFT 2
/* The less significant bit of a stream ID is set for a server initiated stream */
#define QCS_ID_SRV_INTIATOR_BIT 0x1
/* This bit is set for unidirectional streams */
@ -76,6 +82,16 @@ static inline int quic_stream_is_remote(struct qcc *qcc, uint64_t id)
return !quic_stream_is_local(qcc, id);
}
static inline int quic_stream_is_uni(uint64_t id)
{
return id & QCS_ID_DIR_BIT;
}
static inline int quic_stream_is_bidi(uint64_t id)
{
return !quic_stream_is_uni(id);
}
static inline char *qcs_st_to_str(enum qcs_state st)
{
switch (st) {

View File

@ -87,9 +87,10 @@ static forceinline char *spop_strm_show_flags(char *buf, size_t len, const char
/* SPOP connection state (spop_conn->state) */
enum spop_conn_st {
SPOP_CS_HA_HELLO = 0, /* init done, waiting for sending HELLO frame */
SPOP_CS_AGENT_HELLO, /* HELLO frame sent, waiting for agent HELLO frame to define the connection settings */
SPOP_CS_RUNNING, /* HELLO handshake finished, exchange NOTIFY/ACK frames */
SPOP_CS_HA_HELLO = 0, /* init done, waiting for sending HELLO frame */
SPOP_CS_AGENT_HELLO, /* HELLO frame sent, waiting for agent HELLO frame to define the connection settings */
SPOP_CS_FRAME_H, /* HELLO handshake finished, waiting for a frame header */
SPOP_CS_FRAME_P, /* Frame header received, waiting for a frame data */
SPOP_CS_ERROR, /* send DISCONNECT frame to be able ti close the connection */
SPOP_CS_CLOSING, /* DISCONNECT frame sent, waiting for the agent DISCONNECT frame before closing */
SPOP_CS_CLOSED, /* Agent DISCONNECT frame received and close the connection ASAP */
@ -102,7 +103,8 @@ static inline const char *spop_conn_st_to_str(enum spop_conn_st st)
switch (st) {
case SPOP_CS_HA_HELLO : return "HHL";
case SPOP_CS_AGENT_HELLO: return "AHL";
case SPOP_CS_RUNNING : return "RUN";
case SPOP_CS_FRAME_H : return "FRH";
case SPOP_CS_FRAME_P : return "FRP";
case SPOP_CS_ERROR : return "ERR";
case SPOP_CS_CLOSING : return "CLI";
case SPOP_CS_CLOSED : return "CLO";

View File

@ -21,7 +21,7 @@
#define PROC_O_TYPE_MASTER 0x00000001
#define PROC_O_TYPE_WORKER 0x00000002
/* 0x00000004 unused */
#define PROC_O_TYPE_PROG 0x00000004
/* 0x00000008 unused */
#define PROC_O_LEAVING 0x00000010 /* this process should be leaving */
/* state of the newly forked worker process, which hasn't sent yet its READY message to master */

View File

@ -42,6 +42,8 @@ void mworker_cleanlisteners(void);
int mworker_child_nb(void);
int mworker_ext_launch_all(void);
void mworker_kill_max_reloads(int sig);
struct mworker_proc *mworker_proc_new();

View File

@ -46,35 +46,8 @@
#ifdef USE_QUIC_OPENSSL_COMPAT
#include <haproxy/quic_openssl_compat.h>
#else
#define HAVE_OPENSSL_QUIC_CLIENT_SUPPORT
#if defined(OSSL_FUNC_SSL_QUIC_TLS_CRYPTO_SEND)
/* This macro is defined by the new OpenSSL 3.5.0 QUIC TLS API and it is not
* defined by quictls.
*/
#if defined(USE_QUIC) && (OPENSSL_VERSION_NUMBER < 0x30500010L)
#error "OpenSSL 3.5 QUIC API should only be used with OpenSSL 3.5.1 version and newer"
#endif
#define HAVE_OPENSSL_QUIC
#define SSL_set_quic_transport_params SSL_set_quic_tls_transport_params
#define SSL_set_quic_early_data_enabled SSL_set_quic_tls_early_data_enabled
#define SSL_quic_read_level(arg) -1
enum ssl_encryption_level_t {
ssl_encryption_initial = 0,
ssl_encryption_early_data,
ssl_encryption_handshake,
ssl_encryption_application
};
#else
/* QUIC TLS API */
#define HAVE_OPENSSL_QUICTLS
#endif
#endif /* USE_QUIC_OPENSSL_COMPAT */
#if defined(OPENSSL_IS_AWSLC)
#define OPENSSL_NO_DH
#define SSL_CTX_set1_sigalgs_list SSL_CTX_set1_sigalgs_list

View File

@ -33,9 +33,6 @@
extern const char *const pat_match_names[PAT_MATCH_NUM];
extern int const pat_match_types[PAT_MATCH_NUM];
extern unsigned long long patterns_added;
extern unsigned long long patterns_freed;
extern int (*const pat_parse_fcts[PAT_MATCH_NUM])(const char *, struct pattern *, int, char **);
extern int (*const pat_index_fcts[PAT_MATCH_NUM])(struct pattern_expr *, struct pattern *, char **);
extern void (*const pat_prune_fcts[PAT_MATCH_NUM])(struct pattern_expr *);

View File

@ -40,5 +40,10 @@ int peers_register_table(struct peers *, struct stktable *table);
void peers_setup_frontend(struct proxy *fe);
void peers_register_keywords(struct peers_kw_list *pkwl);
static inline enum obj_type *peer_session_target(struct peer *p, struct stream *s)
{
return &p->srv->obj_type;
}
#endif /* _HAPROXY_PEERS_H */

View File

@ -25,7 +25,6 @@
#include <sys/mman.h>
#include <stdlib.h>
#include <haproxy/api.h>
#include <haproxy/tools.h>
/************* normal allocator *************/
@ -33,9 +32,9 @@
/* allocates an area of size <size> and returns it. The semantics are similar
* to those of malloc().
*/
static forceinline void *pool_alloc_area(size_t size, size_t align)
static forceinline void *pool_alloc_area(size_t size)
{
return ha_aligned_alloc(align, size);
return malloc(size);
}
/* frees an area <area> of size <size> allocated by pool_alloc_area(). The
@ -44,7 +43,8 @@ static forceinline void *pool_alloc_area(size_t size, size_t align)
*/
static forceinline void pool_free_area(void *area, size_t __maybe_unused size)
{
ha_aligned_free_size(area, size);
will_free(area, size);
free(area);
}
/************* use-after-free allocator *************/
@ -52,15 +52,14 @@ static forceinline void pool_free_area(void *area, size_t __maybe_unused size)
/* allocates an area of size <size> and returns it. The semantics are similar
* to those of malloc(). However the allocation is rounded up to 4kB so that a
* full page is allocated. This ensures the object can be freed alone so that
* future dereferences are easily detected. The returned object is always at
* least 16-bytes aligned to avoid issues with unaligned structure objects, and
* in any case, is always at least aligned as required by the pool, though no
* more than 4096. In case some padding is added, the area's start address is
* copied at the end of the padding to help detect underflows.
* future dereferences are easily detected. The returned object is always
* 16-bytes aligned to avoid issues with unaligned structure objects. In case
* some padding is added, the area's start address is copied at the end of the
* padding to help detect underflows.
*/
static inline void *pool_alloc_area_uaf(size_t size, size_t align)
static inline void *pool_alloc_area_uaf(size_t size)
{
size_t pad = (4096 - size) & 0xFF0 & -align;
size_t pad = (4096 - size) & 0xFF0;
void *ret;
ret = mmap(NULL, (size + 4095) & -4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);

View File

@ -27,8 +27,6 @@
#define MEM_F_SHARED 0x1
#define MEM_F_EXACT 0x2
#define MEM_F_UAF 0x4
#define MEM_F_STATREG 0x8 /* static registration: do not free it! */
/* A special pointer for the pool's free_list that indicates someone is
* currently manipulating it. Serves as a short-lived lock.
@ -53,7 +51,6 @@
#define POOL_DBG_TAG 0x00000080 // place a tag at the end of the area
#define POOL_DBG_POISON 0x00000100 // poison memory area on pool_alloc()
#define POOL_DBG_UAF 0x00000200 // enable use-after-free protection
#define POOL_DBG_BACKUP 0x00000400 // backup the object contents on free()
/* This is the head of a thread-local cache */
@ -70,9 +67,7 @@ struct pool_cache_head {
*/
struct pool_registration {
struct list list; /* link element */
const char *name; /* name of the pool */
const char *file; /* where the pool is declared */
unsigned int line; /* line in the file where the pool is declared, 0 if none */
char name[12]; /* name of the pool */
unsigned int size; /* expected object size */
unsigned int flags; /* MEM_F_* */
unsigned int align; /* expected alignment; 0=unspecified */
@ -128,7 +123,6 @@ struct pool_head {
unsigned int minavail; /* how many chunks are expected to be used */
unsigned int size; /* chunk size */
unsigned int flags; /* MEM_F_* */
unsigned int align; /* alignment size */
unsigned int users; /* number of pools sharing this zone */
unsigned int alloc_sz; /* allocated size (includes hidden fields) */
unsigned int sum_size; /* sum of all registered users' size */

View File

@ -30,71 +30,19 @@
#include <haproxy/pool-t.h>
#include <haproxy/thread.h>
/* This creates a pool_reg registers a call to create_pool_callback(ptr) with it.
* Do not use this one, use REGISTER_POOL() instead.
*/
#define __REGISTER_POOL(_line, _ptr, _name, _size, _align) \
static struct pool_registration __pool_reg_##_line = { \
.name = _name, \
.file = __FILE__, \
.line = __LINE__, \
.size = _size, \
.flags = MEM_F_STATREG, \
.align = _align, \
}; \
INITCALL3(STG_POOL, create_pool_callback, (_ptr), (_name), &__pool_reg_##_line);
/* intermediary level for line number resolution, do not use this one, use
* REGISTER_POOL() instead.
*/
#define _REGISTER_POOL(line, ptr, name, size, align) \
__REGISTER_POOL(line, ptr, name, size, align)
/* This registers a call to create_pool_callback(ptr) with these args */
/* This registers a call to create_pool_callback(ptr, name, size) */
#define REGISTER_POOL(ptr, name, size) \
_REGISTER_POOL(__LINE__, ptr, name, size, 0)
INITCALL3(STG_POOL, create_pool_callback, (ptr), (name), (size))
/* This macro declares a pool head <ptr> and registers its creation */
#define DECLARE_POOL(ptr, name, size) \
struct pool_head *(ptr) __read_mostly = NULL; \
_REGISTER_POOL(__LINE__, &ptr, name, size, 0)
REGISTER_POOL(&ptr, name, size)
/* This macro declares a static pool head <ptr> and registers its creation */
#define DECLARE_STATIC_POOL(ptr, name, size) \
static struct pool_head *(ptr) __read_mostly; \
_REGISTER_POOL(__LINE__, &ptr, name, size, 0)
/*** below are the aligned pool macros, taking one extra arg for alignment ***/
/* This registers a call to create_pool_callback(ptr) with these args */
#define REGISTER_ALIGNED_POOL(ptr, name, size, align) \
_REGISTER_POOL(__LINE__, ptr, name, size, align)
/* This macro declares an aligned pool head <ptr> and registers its creation */
#define DECLARE_ALIGNED_POOL(ptr, name, size, align) \
struct pool_head *(ptr) __read_mostly = NULL; \
_REGISTER_POOL(__LINE__, &ptr, name, size, align)
/* This macro declares a static aligned pool head <ptr> and registers its creation */
#define DECLARE_STATIC_ALIGNED_POOL(ptr, name, size, align) \
static struct pool_head *(ptr) __read_mostly; \
_REGISTER_POOL(__LINE__, &ptr, name, size, align)
/*** below are the typed pool macros, taking a type and an extra size ***/
/* This registers a call to create_pool_callback(ptr) with these args */
#define REGISTER_TYPED_POOL(ptr, name, type, extra) \
_REGISTER_POOL(__LINE__, ptr, name, sizeof(type) + extra, __alignof__(type))
/* This macro declares an aligned pool head <ptr> and registers its creation */
#define DECLARE_TYPED_POOL(ptr, name, type, extra) \
struct pool_head *(ptr) __read_mostly = NULL; \
_REGISTER_POOL(__LINE__, &ptr, name, sizeof(type) + extra, __alignof__(type))
/* This macro declares a static aligned pool head <ptr> and registers its creation */
#define DECLARE_STATIC_TYPED_POOL(ptr, name, type, extra) \
static struct pool_head *(ptr) __read_mostly; \
_REGISTER_POOL(__LINE__, &ptr, name, sizeof(type) + extra, __alignof__(type))
REGISTER_POOL(&ptr, name, size)
/* By default, free objects are linked by a pointer stored at the beginning of
* the memory area. When DEBUG_MEMORY_POOLS is set, the allocated area is
@ -175,22 +123,14 @@ unsigned long long pool_total_allocated(void);
unsigned long long pool_total_used(void);
void pool_flush(struct pool_head *pool);
void pool_gc(struct pool_head *pool_ctx);
struct pool_head *create_pool_with_loc(const char *name, unsigned int size, unsigned int align,
unsigned int flags, const char *file, unsigned int line);
struct pool_head *create_pool_from_reg(const char *name, struct pool_registration *reg);
void create_pool_callback(struct pool_head **ptr, char *name, struct pool_registration *reg);
struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags);
void create_pool_callback(struct pool_head **ptr, char *name, unsigned int size);
void *pool_destroy(struct pool_head *pool);
void pool_destroy_all(void);
void *__pool_alloc(struct pool_head *pool, unsigned int flags);
void __pool_free(struct pool_head *pool, void *ptr);
void pool_inspect_item(const char *msg, struct pool_head *pool, const void *item, const void *caller, ssize_t ofs);
#define create_pool(name, size, flags) \
create_pool_with_loc(name, size, 0, flags, __FILE__, __LINE__)
#define create_aligned_pool(name, size, align, flags) \
create_pool_with_loc(name, size, align, flags, __FILE__, __LINE__)
/****************** Thread-local cache management ******************/

View File

@ -311,10 +311,6 @@ struct proxy {
char flags; /* bit field PR_FL_* */
enum pr_mode mode; /* mode = PR_MODE_TCP, PR_MODE_HTTP, ... */
char cap; /* supported capabilities (PR_CAP_*) */
unsigned long last_change; /* internal use only: last time the proxy state was changed */
struct list global_list; /* list member for global proxy list */
unsigned int maxconn; /* max # of active streams on the frontend */
int options; /* PR_O_REDISP, PR_O_TRANSP, ... */
@ -352,7 +348,7 @@ struct proxy {
#ifdef USE_QUIC
struct list quic_init_rules; /* quic-initial rules */
#endif
struct server *srv, *defsrv; /* known servers; default server configuration */
struct server *srv, defsrv; /* known servers; default server configuration */
struct lbprm lbprm; /* load-balancing parameters */
int srv_act, srv_bck; /* # of servers eligible for LB (UP|!checked) AND (enabled+weight!=0) */
int served; /* # of active sessions currently being served */

View File

@ -33,7 +33,6 @@
#include <haproxy/thread.h>
extern struct proxy *proxies_list;
extern struct list proxies;
extern struct eb_root used_proxy_id; /* list of proxy IDs in use */
extern unsigned int error_snapshot_id; /* global ID assigned to each error then incremented */
extern struct eb_root proxy_by_name; /* tree of proxies sorted by name */
@ -61,6 +60,7 @@ void proxy_store_name(struct proxy *px);
struct proxy *proxy_find_by_id(int id, int cap, int table);
struct proxy *proxy_find_by_name(const char *name, int cap, int table);
struct proxy *proxy_find_best_match(int cap, const char *name, int id, int *diff);
struct server *findserver(const struct proxy *px, const char *name);
int proxy_cfg_ensure_no_http(struct proxy *curproxy);
int proxy_cfg_ensure_no_log(struct proxy *curproxy);
void init_new_proxy(struct proxy *p);
@ -135,24 +135,22 @@ static inline void proxy_reset_timeouts(struct proxy *proxy)
/* increase the number of cumulated connections received on the designated frontend */
static inline void proxy_inc_fe_conn_ctr(struct listener *l, struct proxy *fe)
{
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_conn);
_HA_ATOMIC_INC(&fe->fe_counters.cum_conn);
if (l && l->counters)
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_conn);
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->conn_per_sec, 1);
_HA_ATOMIC_INC(&l->counters->cum_conn);
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.cps_max,
update_freq_ctr(&fe->fe_counters._conn_per_sec, 1));
update_freq_ctr(&fe->fe_counters.conn_per_sec, 1));
}
/* increase the number of cumulated connections accepted by the designated frontend */
static inline void proxy_inc_fe_sess_ctr(struct listener *l, struct proxy *fe)
{
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_sess);
_HA_ATOMIC_INC(&fe->fe_counters.cum_sess);
if (l && l->counters)
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_sess);
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->sess_per_sec, 1);
_HA_ATOMIC_INC(&l->counters->cum_sess);
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.sps_max,
update_freq_ctr(&fe->fe_counters._sess_per_sec, 1));
update_freq_ctr(&fe->fe_counters.sess_per_sec, 1));
}
/* increase the number of cumulated HTTP sessions on the designated frontend.
@ -162,21 +160,20 @@ static inline void proxy_inc_fe_cum_sess_ver_ctr(struct listener *l, struct prox
unsigned int http_ver)
{
if (http_ver == 0 ||
http_ver > sizeof(fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver) / sizeof(*fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver))
http_ver > sizeof(fe->fe_counters.cum_sess_ver) / sizeof(*fe->fe_counters.cum_sess_ver))
return;
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
_HA_ATOMIC_INC(&fe->fe_counters.cum_sess_ver[http_ver - 1]);
if (l && l->counters)
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
_HA_ATOMIC_INC(&l->counters->cum_sess_ver[http_ver - 1]);
}
/* increase the number of cumulated streams on the designated backend */
static inline void proxy_inc_be_ctr(struct proxy *be)
{
_HA_ATOMIC_INC(&be->be_counters.shared.tg[tgid - 1]->cum_sess);
update_freq_ctr(&be->be_counters.shared.tg[tgid - 1]->sess_per_sec, 1);
_HA_ATOMIC_INC(&be->be_counters.cum_sess);
HA_ATOMIC_UPDATE_MAX(&be->be_counters.sps_max,
update_freq_ctr(&be->be_counters._sess_per_sec, 1));
update_freq_ctr(&be->be_counters.sess_per_sec, 1));
}
/* increase the number of cumulated requests on the designated frontend.
@ -186,15 +183,14 @@ static inline void proxy_inc_be_ctr(struct proxy *be)
static inline void proxy_inc_fe_req_ctr(struct listener *l, struct proxy *fe,
unsigned int http_ver)
{
if (http_ver >= sizeof(fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req) / sizeof(*fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req))
if (http_ver >= sizeof(fe->fe_counters.p.http.cum_req) / sizeof(*fe->fe_counters.p.http.cum_req))
return;
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req[http_ver]);
_HA_ATOMIC_INC(&fe->fe_counters.p.http.cum_req[http_ver]);
if (l && l->counters)
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->p.http.cum_req[http_ver]);
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->req_per_sec, 1);
_HA_ATOMIC_INC(&l->counters->p.http.cum_req[http_ver]);
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.p.http.rps_max,
update_freq_ctr(&fe->fe_counters.p.http._req_per_sec, 1));
update_freq_ctr(&fe->fe_counters.req_per_sec, 1));
}
/* Returns non-zero if the proxy is configured to retry a request if we got that status, 0 otherwise */

View File

@ -1,17 +1,12 @@
#ifndef QPACK_ENC_H_
#define QPACK_ENC_H_
#include <haproxy/http-t.h>
#include <haproxy/istbuf.h>
struct buffer;
int qpack_encode_field_section_line(struct buffer *out);
int qpack_encode_int_status(struct buffer *out, unsigned int status);
int qpack_encode_method(struct buffer *out, enum http_meth_t meth, struct ist other);
int qpack_encode_scheme(struct buffer *out, const struct ist scheme);
int qpack_encode_path(struct buffer *out, const struct ist path);
int qpack_encode_auth(struct buffer *out, const struct ist auth);
int qpack_encode_header(struct buffer *out, const struct ist n, const struct ist v);
#endif /* QPACK_ENC_H_ */

View File

@ -28,22 +28,22 @@
#include <sys/socket.h>
#include <import/ebtree-t.h>
#include <haproxy/cbuf-t.h>
#include <haproxy/list.h>
#include <haproxy/show_flags-t.h>
#include <haproxy/api-t.h>
#include <haproxy/buf-t.h>
#include <haproxy/listener-t.h>
#include <haproxy/openssl-compat.h>
#include <haproxy/mux_quic-t.h>
#include <haproxy/quic_cid-t.h>
#include <haproxy/quic_cc-t.h>
#include <haproxy/quic_frame-t.h>
#include <haproxy/quic_loss-t.h>
#include <haproxy/quic_openssl_compat-t.h>
#include <haproxy/quic_stats-t.h>
#include <haproxy/quic_tls-t.h>
#include <haproxy/quic_tp-t.h>
#include <haproxy/show_flags-t.h>
#include <haproxy/ssl_sock-t.h>
#include <haproxy/task-t.h>
#include <haproxy/task.h>
#include <import/ebtree-t.h>
typedef unsigned long long ull;
@ -228,9 +228,6 @@ struct quic_version {
extern const struct quic_version quic_versions[];
extern const size_t quic_versions_nb;
extern const struct quic_version *preferred_version;
extern const struct quic_version *quic_version_draft_29;
extern const struct quic_version *quic_version_1;
extern const struct quic_version *quic_version_2;
/* unused: 0x01 */
/* Flag the packet number space as requiring an ACK frame to be sent. */
@ -251,7 +248,7 @@ extern const struct quic_version *quic_version_2;
/* The maximum number of bytes of CRYPTO data in flight during handshakes. */
#define QUIC_CRYPTO_IN_FLIGHT_MAX 4096
/* Status of the MUX layer. This defines how to handle app data.
/* Status of the connection/mux layer. This defines how to handle app data.
*
* During a standard quic_conn lifetime it transitions like this :
* QC_MUX_NULL -> QC_MUX_READY -> QC_MUX_RELEASED
@ -282,10 +279,6 @@ struct quic_conn_cntrs {
long long streams_blocked_uni; /* total number of times STREAMS_BLOCKED_UNI frame was received */
};
struct connection;
struct qcc;
struct qcc_app_ops;
#define QUIC_CONN_COMMON \
struct { \
/* Connection owned socket FD. */ \
@ -308,7 +301,6 @@ struct qcc_app_ops;
/* Number of received bytes. */ \
uint64_t rx; \
} bytes; \
size_t max_udp_payload; \
/* First DCID used by client on its Initial packet. */ \
struct quic_cid odcid; \
/* DCID of our endpoint - not updated when a new DCID is used */ \
@ -319,7 +311,7 @@ struct qcc_app_ops;
* with a connection \
*/ \
struct eb_root *cids; \
enum obj_type *target; \
struct listener *li; /* only valid for frontend connections */ \
/* Idle timer task */ \
struct task *idle_timer_task; \
unsigned int idle_expire; \
@ -342,10 +334,7 @@ struct quic_conn {
int tps_tls_ext;
int state;
enum qc_mux_state mux_state; /* status of the connection/mux layer */
#ifdef HAVE_OPENSSL_QUIC
uint32_t prot_level;
#endif
#if defined(USE_QUIC_OPENSSL_COMPAT) || defined(HAVE_OPENSSL_QUIC)
#ifdef USE_QUIC_OPENSSL_COMPAT
unsigned char enc_params[QUIC_TP_MAX_ENCLEN]; /* encoded QUIC transport parameters */
size_t enc_params_len;
#endif
@ -356,10 +345,6 @@ struct quic_conn {
*/
uint64_t hash64;
/* QUIC client only retry token received from servers RETRY packet */
unsigned char *retry_token;
size_t retry_token_len;
/* Initial encryption level */
struct quic_enc_level *iel;
/* 0-RTT encryption level */
@ -398,10 +383,10 @@ struct quic_conn {
/* RX buffer */
struct buffer buf;
struct list pkt_list;
/* first unhandled streams ID, set by MUX after release */
uint64_t stream_max_uni;
uint64_t stream_max_bidi;
struct {
/* Number of open or closed streams */
uint64_t nb_streams;
} strms[QCS_MAX_TYPES];
} rx;
struct {
struct quic_tls_kp prv_rx;
@ -448,7 +433,7 @@ struct quic_conn_closed {
#define QUIC_FL_CONN_ANTI_AMPLIFICATION_REACHED (1U << 0)
#define QUIC_FL_CONN_SPIN_BIT (1U << 1) /* Spin bit set by remote peer */
#define QUIC_FL_CONN_NEED_POST_HANDSHAKE_FRMS (1U << 2) /* HANDSHAKE_DONE must be sent */
/* gap here */
#define QUIC_FL_CONN_LISTENER (1U << 3)
#define QUIC_FL_CONN_ACCEPT_REGISTERED (1U << 4)
/* gap here */
#define QUIC_FL_CONN_IDLE_TIMER_RESTARTED_AFTER_READ (1U << 6)
@ -464,7 +449,6 @@ struct quic_conn_closed {
#define QUIC_FL_CONN_HPKTNS_DCD (1U << 16) /* Handshake packet number space discarded */
#define QUIC_FL_CONN_PEER_VALIDATED_ADDR (1U << 17) /* Peer address is considered as validated for this connection. */
#define QUIC_FL_CONN_NO_TOKEN_RCVD (1U << 18) /* Client dit not send any token */
#define QUIC_FL_CONN_SCID_RECEIVED (1U << 19) /* (client only: first Initial received. */
/* gap here */
#define QUIC_FL_CONN_TO_KILL (1U << 24) /* Unusable connection, to be killed */
#define QUIC_FL_CONN_TX_TP_RECEIVED (1U << 25) /* Peer transport parameters have been received (used for the transmitting part) */
@ -488,6 +472,7 @@ static forceinline char *qc_show_flags(char *buf, size_t len, const char *delim,
_(QUIC_FL_CONN_ANTI_AMPLIFICATION_REACHED,
_(QUIC_FL_CONN_SPIN_BIT,
_(QUIC_FL_CONN_NEED_POST_HANDSHAKE_FRMS,
_(QUIC_FL_CONN_LISTENER,
_(QUIC_FL_CONN_ACCEPT_REGISTERED,
_(QUIC_FL_CONN_IDLE_TIMER_RESTARTED_AFTER_READ,
_(QUIC_FL_CONN_RETRANS_NEEDED,
@ -507,7 +492,7 @@ static forceinline char *qc_show_flags(char *buf, size_t len, const char *delim,
_(QUIC_FL_CONN_EXP_TIMER,
_(QUIC_FL_CONN_CLOSING,
_(QUIC_FL_CONN_DRAINING,
_(QUIC_FL_CONN_IMMEDIATE_CLOSE)))))))))))))))))))))));
_(QUIC_FL_CONN_IMMEDIATE_CLOSE))))))))))))))))))))))));
/* epilogue */
_(~0U);
return buf;

View File

@ -69,8 +69,7 @@ struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
struct quic_connection_id *conn_id,
struct sockaddr_storage *local_addr,
struct sockaddr_storage *peer_addr,
int token, void *owner,
struct connection *conn);
int server, int token, void *owner);
int quic_build_post_handshake_frames(struct quic_conn *qc);
const struct quic_version *qc_supported_version(uint32_t version);
int quic_peer_validated_addr(struct quic_conn *qc);
@ -82,6 +81,11 @@ void qc_check_close_on_released_mux(struct quic_conn *qc);
int quic_stateless_reset_token_cpy(unsigned char *pos, size_t len,
const unsigned char *salt, size_t saltlen);
static inline int qc_is_listener(struct quic_conn *qc)
{
return qc->flags & QUIC_FL_CONN_LISTENER;
}
/* Free the CIDs attached to <conn> QUIC connection. */
static inline void free_quic_conn_cids(struct quic_conn *conn)
{
@ -159,22 +163,6 @@ static inline void quic_free_ncbuf(struct ncbuf *ncbuf)
*ncbuf = NCBUF_NULL;
}
/* Return the address of the QUIC counters attached to the proxy of
* the owner of the connection whose object type address is <o> for
* listener and servers, or NULL for others object type.
*/
static inline void *qc_counters(enum obj_type *o, const struct stats_module *m)
{
struct proxy *p;
struct listener *l = objt_listener(o);
struct server *s = objt_server(o);
p = l ? l->bind_conf->frontend :
s ? s->proxy : NULL;
return p ? EXTRA_COUNTERS_GET(p->extra_counters_fe, m) : NULL;
}
void chunk_frm_appendf(struct buffer *buf, const struct quic_frame *frm);
void quic_set_connection_close(struct quic_conn *qc, const struct quic_err err);
void quic_set_tls_alert(struct quic_conn *qc, int alert);

View File

@ -13,8 +13,6 @@
#include <haproxy/quic_rx-t.h>
#include <haproxy/quic_sock-t.h>
extern struct pool_head *pool_head_quic_retry_token;
struct listener;
int quic_generate_retry_token(unsigned char *token, size_t len,
@ -30,9 +28,6 @@ int quic_retry_token_check(struct quic_rx_packet *pkt,
struct listener *l,
struct quic_conn *qc,
struct quic_cid *odcid);
int quic_retry_packet_check(struct quic_conn *qc, struct quic_rx_packet *pkt,
const unsigned char *beg, const unsigned char *end,
const unsigned char *pos, size_t *retry_token_len);
#endif /* USE_QUIC */
#endif /* _HAPROXY_QUIC_RETRY_H */

View File

@ -26,7 +26,7 @@
#include <haproxy/quic_rx-t.h>
int quic_dgram_parse(struct quic_dgram *dgram, struct quic_conn *from_qc,
enum obj_type *obj_type);
struct listener *li);
int qc_treat_rx_pkts(struct quic_conn *qc);
int qc_parse_hd_form(struct quic_rx_packet *pkt,
unsigned char **pos, const unsigned char *end);

View File

@ -31,9 +31,7 @@
#include <haproxy/api.h>
#include <haproxy/connection-t.h>
#include <haproxy/fd-t.h>
#include <haproxy/listener-t.h>
#include <haproxy/obj_type.h>
#include <haproxy/quic_conn-t.h>
#include <haproxy/quic_sock-t.h>
@ -79,8 +77,7 @@ static inline char qc_test_fd(struct quic_conn *qc)
*/
static inline int qc_fd(struct quic_conn *qc)
{
/* TODO: check this: For backends, qc->fd is always initialized */
return qc_test_fd(qc) ? qc->fd : __objt_listener(qc->target)->rx.fd;
return qc_test_fd(qc) ? qc->fd : qc->li->rx.fd;
}
/* Try to increment <l> handshake current counter. If listener limit is

View File

@ -34,8 +34,7 @@
#include <haproxy/ssl_sock-t.h>
int ssl_quic_initial_ctx(struct bind_conf *bind_conf);
SSL_CTX *ssl_quic_srv_new_ssl_ctx(void);
int qc_alloc_ssl_sock_ctx(struct quic_conn *qc, struct connection *conn);
int qc_alloc_ssl_sock_ctx(struct quic_conn *qc);
int qc_ssl_provide_all_quic_data(struct quic_conn *qc, struct ssl_sock_ctx *ctx);
static inline void qc_free_ssl_sock_ctx(struct ssl_sock_ctx **ctx)

View File

@ -7,7 +7,6 @@
#include <haproxy/buf-t.h>
#include <haproxy/list-t.h>
#include <haproxy/quic_utils-t.h>
/* A QUIC STREAM buffer used for Tx.
*
@ -44,8 +43,6 @@ struct qc_stream_desc {
uint64_t ack_offset; /* last acknowledged offset */
struct eb_root buf_tree; /* list of active and released buffers */
struct bdata_ctr data; /* data utilization counter */
ullong origin_ts; /* timestamp for creation date of current stream instance */
int flags; /* QC_SD_FL_* values */

View File

@ -74,8 +74,7 @@ int quic_tls_decrypt(unsigned char *buf, size_t len,
const unsigned char *key, const unsigned char *iv);
int quic_tls_generate_retry_integrity_tag(unsigned char *odcid, unsigned char odcid_len,
const unsigned char *buf, size_t len,
unsigned char *tag,
unsigned char *buf, size_t len,
const struct quic_version *qv);
int quic_tls_derive_keys(const QUIC_AEAD *aead, const EVP_CIPHER *hp,
@ -292,29 +291,6 @@ static inline struct quic_enc_level **ssl_to_qel_addr(struct quic_conn *qc,
}
}
#ifdef HAVE_OPENSSL_QUIC
/* Simple helper function which translate an OpenSSL SSL protection level
* to a quictls SSL encryption. This way the code which use the OpenSSL QUIC API
* may use the code which uses the quictls API.
*/
static inline enum ssl_encryption_level_t ssl_prot_level_to_enc_level(struct quic_conn *qc,
uint32_t prot_level)
{
switch (prot_level) {
case OSSL_RECORD_PROTECTION_LEVEL_NONE:
return ssl_encryption_initial;
case OSSL_RECORD_PROTECTION_LEVEL_EARLY:
return ssl_encryption_early_data;
case OSSL_RECORD_PROTECTION_LEVEL_HANDSHAKE:
return ssl_encryption_handshake;
case OSSL_RECORD_PROTECTION_LEVEL_APPLICATION:
return ssl_encryption_application;
default:
return -1;
}
}
#endif
/* Return the address of the QUIC TLS encryption level associated to <level> internal
* encryption level and attached to <qc> QUIC connection if succeeded, or
* NULL if failed.
@ -537,8 +513,7 @@ static inline int quic_pktns_init(struct quic_conn *qc, struct quic_pktns **p)
return 1;
}
static inline void quic_pktns_tx_pkts_release(struct quic_pktns *pktns,
struct quic_conn *qc, int resend)
static inline void quic_pktns_tx_pkts_release(struct quic_pktns *pktns, struct quic_conn *qc)
{
struct eb64_node *node;
@ -559,11 +534,7 @@ static inline void quic_pktns_tx_pkts_release(struct quic_pktns *pktns,
qc_frm_unref(frm, qc);
LIST_DEL_INIT(&frm->list);
quic_tx_packet_refdec(frm->pkt);
if (!resend)
qc_frm_free(qc, &frm);
else
LIST_APPEND(&pktns->tx.frms, &frm->list);
qc_frm_free(qc, &frm);
}
eb64_delete(&pkt->pn_node);
quic_tx_packet_refdec(pkt);
@ -578,12 +549,9 @@ static inline void quic_pktns_tx_pkts_release(struct quic_pktns *pktns,
* connection.
* Note that all the non acknowledged TX packets and their frames are freed.
* Always succeeds.
* <resend> boolean must be 1 to resend the frames which are in flight.
* This is only used to resend the Initial packet frames upon a RETRY
* packet receipt (backend only option).
*/
static inline void quic_pktns_discard(struct quic_pktns *pktns,
struct quic_conn *qc, int resend)
struct quic_conn *qc)
{
TRACE_ENTER(QUIC_EV_CONN_PHPKTS, qc);
@ -599,7 +567,7 @@ static inline void quic_pktns_discard(struct quic_pktns *pktns,
pktns->tx.loss_time = TICK_ETERNITY;
pktns->tx.pto_probe = 0;
pktns->tx.in_flight = 0;
quic_pktns_tx_pkts_release(pktns, qc, resend);
quic_pktns_tx_pkts_release(pktns, qc);
TRACE_LEAVE(QUIC_EV_CONN_PHPKTS, qc);
}

View File

@ -26,9 +26,6 @@ int qc_lstnr_params_init(struct quic_conn *qc,
const unsigned char *dcid, size_t dcidlen,
const unsigned char *scid, size_t scidlen,
const struct quic_cid *token_odcid);
void qc_srv_params_init(struct quic_conn *qc,
const struct quic_transport_params *srv_params,
const unsigned char *scid, size_t scidlen);
/* Dump <cid> transport parameter connection ID value if present (non null length).
* Used only for debugging purposes.

View File

@ -99,6 +99,5 @@ struct quic_rx_crypto_frm {
#define QUIC_EV_CONN_KP (1ULL << 50)
#define QUIC_EV_CONN_SSL_COMPAT (1ULL << 51)
#define QUIC_EV_CONN_BIND_TID (1ULL << 52)
#define QUIC_EV_CONN_RELEASE_RCD (1ULL << 53)
#endif /* _HAPROXY_QUIC_TRACE_T_H */

View File

@ -3,7 +3,7 @@
#define QUIC_MIN_CC_PKTSIZE 128
#define QUIC_DGRAM_HEADLEN (sizeof(uint16_t) + sizeof(void *))
#define QUIC_MAX_CC_BUFSIZE _MAX(QUIC_INITIAL_IPV6_MTU, QUIC_INITIAL_IPV4_MTU)
#define QUIC_MAX_CC_BUFSIZE (2 * (QUIC_MIN_CC_PKTSIZE + QUIC_DGRAM_HEADLEN))
/* Sendmsg input buffer cannot be bigger than 65535 bytes. This comes from UDP
* header which uses a 2-bytes length field. QUIC datagrams are limited to 1252

View File

@ -23,7 +23,6 @@
#include <haproxy/buf-t.h>
#include <haproxy/list-t.h>
#include <haproxy/pool.h>
#include <haproxy/quic_conn-t.h>
#include <haproxy/quic_tls-t.h>
#include <haproxy/quic_pacing-t.h>

View File

@ -1,17 +0,0 @@
#ifndef _HAPROXY_QUIC_UTILS_T_H
#define _HAPROXY_QUIC_UTILS_T_H
#ifdef USE_QUIC
#include <haproxy/api-t.h>
/* Counter which can be used to measure data amount accross several buffers. */
struct bdata_ctr {
uint64_t tot; /* sum of data present in all underlying buffers */
uint8_t bcnt; /* current number of allocated underlying buffers */
uint8_t bmax; /* max number of allocated buffers during stream lifetime */
};
#endif /* USE_QUIC */
#endif /* _HAPROXY_QUIC_UTILS_T_H */

View File

@ -1,59 +0,0 @@
#ifndef _HAPROXY_QUIC_UTILS_H
#define _HAPROXY_QUIC_UTILS_H
#ifdef USE_QUIC
#include <haproxy/quic_utils-t.h>
#include <haproxy/buf-t.h>
#include <haproxy/chunk.h>
static inline int quic_stream_is_uni(uint64_t id)
{
return id & QCS_ID_DIR_BIT;
}
static inline int quic_stream_is_bidi(uint64_t id)
{
return !quic_stream_is_uni(id);
}
static inline void bdata_ctr_init(struct bdata_ctr *ctr)
{
ctr->tot = 0;
ctr->bcnt = 0;
ctr->bmax = 0;
}
static inline void bdata_ctr_binc(struct bdata_ctr *ctr)
{
++ctr->bcnt;
ctr->bmax = MAX(ctr->bcnt, ctr->bmax);
}
static inline void bdata_ctr_bdec(struct bdata_ctr *ctr)
{
--ctr->bcnt;
}
static inline void bdata_ctr_add(struct bdata_ctr *ctr, size_t data)
{
ctr->tot += data;
}
static inline void bdata_ctr_del(struct bdata_ctr *ctr, size_t data)
{
ctr->tot -= data;
}
static inline void bdata_ctr_print(struct buffer *chunk,
const struct bdata_ctr *ctr,
const char *prefix)
{
chunk_appendf(chunk, " %s%d(%d)/%llu",
prefix, ctr->bcnt, ctr->bmax, (ullong)ctr->tot);
}
#endif /* USE_QUIC */
#endif /* _HAPROXY_QUIC_UTILS_H */

View File

@ -348,6 +348,15 @@ static inline void sc_sync_send(struct stconn *sc)
{
if (sc_ep_test(sc, SE_FL_T_MUX))
sc_conn_sync_send(sc);
else if (sc_ep_test(sc, SE_FL_T_APPLET)) {
sc_applet_sync_send(sc);
if (sc_oc(sc)->flags & CF_WRITE_EVENT) {
/* Data was send, wake the applet up. It is safe to do so because sc_applet_sync_send()
* removes CF_WRITE_EVENT flag from the channel before trying to send data to the applet.
*/
task_wakeup(__sc_appctx(sc)->t, TASK_WOKEN_OTHER);
}
}
}
/* Combines both sc_update_rx() and sc_update_tx() at once */
@ -386,19 +395,7 @@ static inline int sc_is_send_allowed(const struct stconn *sc)
if (sc->flags & SC_FL_SHUT_DONE)
return 0;
if (!sc_appctx(sc) || !(__sc_appctx(sc)->flags & APPCTX_FL_INOUT_BUFS))
return !sc_ep_test(sc, SE_FL_WAIT_DATA | SE_FL_WONT_CONSUME);
if (sc_ep_test(sc, SE_FL_WONT_CONSUME))
return 0;
if (sc_ep_test(sc, SE_FL_WAIT_DATA)) {
if (__sc_appctx(sc)->flags & (APPCTX_FL_INBLK_FULL|APPCTX_FL_INBLK_ALLOC))
return 0;
if (!co_data(sc_oc(sc)))
return 0;
}
return 1;
return !sc_ep_test(sc, SE_FL_WAIT_DATA | SE_FL_WONT_CONSUME);
}
static inline int sc_rcv_may_expire(const struct stconn *sc)

View File

@ -171,7 +171,6 @@ enum srv_init_state {
#define SRV_F_DEFSRV_USE_SSL 0x4000 /* default-server uses SSL */
#define SRV_F_DELETED 0x8000 /* srv is deleted but not yet purged */
#define SRV_F_STRICT_MAXCONN 0x10000 /* maxconn is to be strictly enforced, as a limit of outbound connections */
#define SRV_F_CHECKED 0x20000 /* set once server was postparsed */
/* configured server options for send-proxy (server->pp_opts) */
#define SRV_PP_V1 0x0001 /* proxy protocol version 1 */
@ -304,13 +303,6 @@ struct srv_pp_tlv_list {
unsigned char type;
};
/* Renegotiate mode */
enum renegotiate_mode {
SSL_RENEGOTIATE_DFLT = 0, /* Use the SSL library's default behavior */
SSL_RENEGOTIATE_OFF, /* Disable secure renegotiation */
SSL_RENEGOTIATE_ON /* Enable secure renegotiation */
};
struct proxy;
struct server {
/* mostly config or admin stuff, doesn't change often */
@ -355,7 +347,6 @@ struct server {
short onmarkedup; /* what to do when marked up: one of HANA_ONMARKEDUP_* */
int slowstart; /* slowstart time in seconds (ms in the conf) */
int idle_ping; /* MUX idle-ping interval in ms */
unsigned long last_change; /* internal use only (not for stats purpose): last time the server state was changed, doesn't change often, not updated atomically on purpose */
char *id; /* just for identification */
uint32_t rid; /* revision: if id has been reused for a new server, rid won't match */
@ -431,7 +422,6 @@ struct server {
int puid; /* proxy-unique server ID, used for SNMP, and "first" LB algo */
int tcp_ut; /* for TCP, user timeout */
char *tcp_md5sig; /* TCP MD5 signature password (RFC2385) */
int do_check; /* temporary variable used during parsing to denote if health checks must be enabled */
int do_agent; /* temporary variable used during parsing to denote if an auxiliary agent check must be enabled */
@ -444,7 +434,7 @@ struct server {
char *lastaddr; /* the address string provided by the server-state file */
struct resolv_options resolv_opts;
int hostname_dn_len; /* string length of the server hostname in Domain Name format */
char *hostname_dn; /* server hostname in Domain Name format (name is lower cased) */
char *hostname_dn; /* server hostname in Domain Name format */
char *hostname; /* server hostname */
struct sockaddr_storage init_addr; /* plain IP address specified on the init-addr line */
unsigned int init_addr_methods; /* initial address setting, 3-bit per method, ends at 0, enough to store 10 entries */
@ -486,11 +476,7 @@ struct server {
int npn_len; /* NPN protocol string length */
char *alpn_str; /* ALPN protocol string */
int alpn_len; /* ALPN protocol string length */
int renegotiate; /* Renegotiate mode (SSL_RENEGOTIATE_ flag) */
} ssl_ctx;
#ifdef USE_QUIC
struct quic_transport_params quic_params; /* QUIC transport parameters */
#endif
struct resolv_srvrq *srvrq; /* Pointer representing the DNS SRV requeest, if any */
struct list srv_rec_item; /* to attach server to a srv record item */
struct list ip_rec_item; /* to attach server to a A or AAAA record item */

View File

@ -59,11 +59,10 @@ const char *srv_update_addr_port(struct server *s, const char *addr, const char
const char *server_inetaddr_updater_by_to_str(enum server_inetaddr_updater_by by);
const char *srv_update_check_addr_port(struct server *s, const char *addr, const char *port);
const char *srv_update_agent_addr_port(struct server *s, const char *addr, const char *port);
struct server *server_find_by_id(struct proxy *bk, int id);
struct server *server_find_by_id_unique(struct proxy *bk, int id, uint32_t rid);
struct server *server_find_by_name(struct proxy *px, const char *name);
struct server *server_find_by_addr(struct proxy *px, const char *addr);
struct server *server_find(struct proxy *bk, const char *name);
struct server *server_find_unique(struct proxy *bk, const char *name, uint32_t rid);
struct server *server_find_by_name(struct proxy *bk, const char *name);
struct server *server_find_by_name_unique(struct proxy *bk, const char *name, uint32_t rid);
struct server *server_find_best_match(struct proxy *bk, char *name, int id, int *diff);
void apply_server_state(void);
void srv_compute_all_admin_states(struct proxy *px);
@ -74,7 +73,7 @@ struct server *new_server(struct proxy *proxy);
void srv_take(struct server *srv);
struct server *srv_drop(struct server *srv);
void srv_free_params(struct server *srv);
int srv_init(struct server *srv);
int srv_init_per_thr(struct server *srv);
void srv_set_ssl(struct server *s, int use_ssl);
const char *srv_adm_st_chg_cause(enum srv_adm_st_chg_cause cause);
const char *srv_op_st_chg_cause(enum srv_op_st_chg_cause cause);
@ -182,16 +181,15 @@ const struct mux_ops *srv_get_ws_proto(struct server *srv);
/* increase the number of cumulated streams on the designated server */
static inline void srv_inc_sess_ctr(struct server *s)
{
_HA_ATOMIC_INC(&s->counters.shared.tg[tgid - 1]->cum_sess);
update_freq_ctr(&s->counters.shared.tg[tgid - 1]->sess_per_sec, 1);
_HA_ATOMIC_INC(&s->counters.cum_sess);
HA_ATOMIC_UPDATE_MAX(&s->counters.sps_max,
update_freq_ctr(&s->counters._sess_per_sec, 1));
update_freq_ctr(&s->counters.sess_per_sec, 1));
}
/* set the time of last session on the designated server */
static inline void srv_set_sess_last(struct server *s)
{
HA_ATOMIC_STORE(&s->counters.shared.tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
s->counters.last_sess = ns_to_sec(now_ns);
}
/* returns the current server throttle rate between 0 and 100% */
@ -321,51 +319,6 @@ static inline int srv_is_transparent(const struct server *srv)
(srv->flags & SRV_F_MAPPORTS);
}
/* Detach server from proxy list. It is supported to call this
* even if the server is not yet in the list
* Must be called under thread isolation or when it is safe to assume
* that the parent proxy doesn't is not skimming through the server list
*/
static inline void srv_detach(struct server *srv)
{
struct proxy *px = srv->proxy;
if (px->srv == srv)
px->srv = srv->next;
else {
struct server *prev;
for (prev = px->srv; prev && prev->next != srv; prev = prev->next)
;
BUG_ON(!prev);
prev->next = srv->next;
}
}
/* Returns a pointer to the first server matching id <id> in backend <bk>.
* NULL is returned if no match is found.
*/
static inline struct server *server_find_by_id(struct proxy *bk, int id)
{
struct eb32_node *eb32;
eb32 = eb32_lookup(&bk->conf.used_server_id, id);
return eb32 ? container_of(eb32, struct server, conf.id) : NULL;
}
static inline int srv_is_quic(const struct server *srv)
{
#ifdef USE_QUIC
return srv->addr_type.proto_type == PROTO_TYPE_DGRAM &&
srv->addr_type.xprt_type == PROTO_TYPE_STREAM;
#else
return 0;
#endif
}
#endif /* _HAPROXY_SERVER_H */
/*

View File

@ -61,8 +61,6 @@ struct session {
struct list priv_conns; /* list of private conns */
struct sockaddr_storage *src; /* source address (pool), when known, otherwise NULL */
struct sockaddr_storage *dst; /* destination address (pool), when known, otherwise NULL */
struct fe_counters_shared_tg *fe_tgcounters; /* pointer to current thread group shared frontend counters */
struct fe_counters_shared_tg *li_tgcounters; /* pointer to current thread group shared listener counters */
};
/*

View File

@ -171,31 +171,25 @@ static inline void session_unown_conn(struct session *sess, struct connection *c
}
}
/* Add the connection <conn> to the private conns list of session <sess>. Each
* connection is indexed by their respective target in the session. Nothing is
* performed if the connection is already in the session list.
*
* Returns true if conn is inserted or already present else false if a failure
* occurs during insertion.
/* Add the connection <conn> to the private conns list of session <sess>. This
* function is called only if the connection is private. Nothing is performed
* if the connection is already in the session list or if the session does not
* owned the connection.
*/
static inline int session_add_conn(struct session *sess, struct connection *conn)
static inline int session_add_conn(struct session *sess, struct connection *conn, void *target)
{
struct sess_priv_conns *pconns = NULL;
struct server *srv = objt_server(conn->target);
int found = 0;
/* Connection target is used to index it in the session. Only BE conns are expected in session list. */
BUG_ON(!conn->target || objt_listener(conn->target));
BUG_ON(objt_listener(conn->target));
/* A connection cannot be attached already to another session. */
BUG_ON(conn->owner && conn->owner != sess);
/* Already attach to the session */
if (!LIST_ISEMPTY(&conn->sess_el))
/* Already attach to the session or not the connection owner */
if (!LIST_ISEMPTY(&conn->sess_el) || (conn->owner && conn->owner != sess))
return 1;
list_for_each_entry(pconns, &sess->priv_conns, sess_el) {
if (pconns->target == conn->target) {
if (pconns->target == target) {
found = 1;
break;
}
@ -205,7 +199,7 @@ static inline int session_add_conn(struct session *sess, struct connection *conn
pconns = pool_alloc(pool_head_sess_priv_conns);
if (!pconns)
return 0;
pconns->target = conn->target;
pconns->target = target;
LIST_INIT(&pconns->conn_list);
LIST_APPEND(&sess->priv_conns, &pconns->sess_el);
@ -225,34 +219,25 @@ static inline int session_add_conn(struct session *sess, struct connection *conn
return 1;
}
/* Check that session <sess> is able to keep idle connection <conn>. This must
* be called each time a connection stored in a session becomes idle.
*
* Returns 0 if the connection is kept, else non-zero if the connection was
* explicitely removed from session.
/* Returns 0 if the session can keep the idle conn, -1 if it was destroyed. The
* connection must be private.
*/
static inline int session_check_idle_conn(struct session *sess, struct connection *conn)
{
/* Connection must be attached to session prior to this function call. */
BUG_ON(!conn->owner || conn->owner != sess);
/* Connection is not attached to a session. */
if (!conn->owner)
/* Another session owns this connection */
if (conn->owner != sess)
return 0;
/* Ensure conn is not already accounted as idle to prevent sess idle count excess increment. */
BUG_ON(conn->flags & CO_FL_SESS_IDLE);
if (sess->idle_conns >= sess->fe->max_out_conns) {
session_unown_conn(sess, conn);
conn->owner = NULL;
conn->flags &= ~CO_FL_SESS_IDLE;
conn->mux->destroy(conn->ctx);
return -1;
}
else {
} else {
conn->flags |= CO_FL_SESS_IDLE;
sess->idle_conns++;
}
return 0;
}

View File

@ -28,11 +28,9 @@
#include <haproxy/api.h>
#include <haproxy/connection-t.h>
#include <haproxy/listener-t.h>
#include <haproxy/protocol-t.h>
#include <haproxy/sock-t.h>
int sock_create_server_socket(struct connection *conn, struct proxy *be,
enum proto_type proto_type, int sock_type, int *stream_err);
int sock_create_server_socket(struct connection *conn, struct proxy *be, int *stream_err);
void sock_enable(struct receiver *rx);
void sock_disable(struct receiver *rx);
void sock_unbind(struct receiver *rx);

View File

@ -31,7 +31,6 @@ extern int sock_inet6_v6only_default;
extern int sock_inet_tcp_maxseg_default;
extern int sock_inet6_tcp_maxseg_default;
extern int sock_inet6_seems_reachable;
extern uint last_inet6_check;
#ifdef HA_HAVE_MPTCP
extern int sock_inet_mptcp_maxseg_default;
@ -55,6 +54,5 @@ int sock_inet_is_foreign(int fd, sa_family_t family);
int sock_inet4_make_foreign(int fd);
int sock_inet6_make_foreign(int fd);
int sock_inet_bind_receiver(struct receiver *rx, char **errmsg);
int is_inet6_reachable(void);
#endif /* _HAPROXY_SOCK_INET_H */

View File

@ -73,8 +73,6 @@ struct ckch_conf {
} acme;
};
struct jwt_cert_tree_entry;
/*
* this is used to store 1 to SSL_SOCK_NUM_KEYTYPES cert_key_and_chain and
* metadata.
@ -90,7 +88,6 @@ struct ckch_store {
struct list crtlist_entry; /* list of entries which use this store */
struct ckch_conf conf;
struct task *acme_task;
struct jwt_cert_tree_entry *jwt_entry;
struct ebmb_node node;
char path[VAR_ARRAY];
};

View File

@ -62,7 +62,7 @@ struct ckch_inst *ckch_inst_new();
int ckch_inst_new_load_store(const char *path, struct ckch_store *ckchs, struct bind_conf *bind_conf,
struct ssl_bind_conf *ssl_conf, char **sni_filter, int fcount, int is_default, struct ckch_inst **ckchi, char **err);
int ckch_inst_new_load_srv_store(const char *path, struct ckch_store *ckchs,
struct ckch_inst **ckchi, char **err, int is_quic);
struct ckch_inst **ckchi, char **err);
int ckch_inst_rebuild(struct ckch_store *ckch_store, struct ckch_inst *ckchi,
struct ckch_inst **new_inst, char **err);

View File

@ -316,12 +316,6 @@ struct global_ssl {
int disable;
} ocsp_update;
#endif
#ifdef HAVE_ACME
int acme_scheduler;
#endif
int renegotiate; /* Renegotiate mode (SSL_RENEGOTIATE_ flag) */
};
/* The order here matters for picking a default context,

View File

@ -1,7 +1,18 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* include/haproxy/ssl_trace-t.h
* Definitions for SSL traces internal types, constants and flags.
*
* Copyright (C) 2025
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#ifndef _HAPROXY_SSL_TRACE_H
#define _HAPROXY_SSL_TRACE_H
#ifndef _HAPROXY_SSL_TRACE_T_H
#define _HAPROXY_SSL_TRACE_T_H
#include <haproxy/trace-t.h>
@ -22,16 +33,7 @@ extern struct trace_source trace_ssl;
#define SSL_EV_CONN_SWITCHCTX_CB (1ULL << 12)
#define SSL_EV_CONN_CHOOSE_SNI_CTX (1ULL << 13)
#define SSL_EV_CONN_SIGALG_EXT (1ULL << 14)
#define SSL_EV_CONN_CIPHERS_EXT (1ULL << 15)
#define SSL_EV_CONN_CURVES_EXT (1ULL << 16)
#define SSL_VERB_CLEAN 1
#define SSL_VERB_MINIMAL 2
#define SSL_VERB_SIMPLE 3
#define SSL_VERB_ADVANCED 4
#define SSL_VERB_COMPLETE 5
#define TRACE_SOURCE &trace_ssl
#endif /* _HAPROXY_SSL_TRACE_H */
#endif /* _HAPROXY_SSL_TRACE_T_H */

View File

@ -55,7 +55,6 @@ time_t x509_get_notbefore_time_t(X509 *cert);
int curves2nid(const char *curve);
const char *nid2nist(int nid);
const char *sigalg2str(int sigalg);
const char *curveid2str(int curve_id);
#endif /* _HAPROXY_SSL_UTILS_H */
#endif /* USE_OPENSSL */

Some files were not shown because too many files have changed in this diff Show More