Compare commits

..

No commits in common. "master" and "v3.3-dev6" have entirely different histories.

563 changed files with 13288 additions and 32744 deletions

View File

@ -1,7 +1,7 @@
FreeBSD_task:
freebsd_instance:
matrix:
image_family: freebsd-14-3
image_family: freebsd-14-2
only_if: $CIRRUS_BRANCH =~ 'master|next'
install_script:
- pkg update -f && pkg upgrade -y && pkg install -y openssl git gmake lua54 socat pcre2

View File

@ -1,34 +0,0 @@
name: 'setup VTest'
description: 'ssss'
runs:
using: "composite"
steps:
- name: Setup coredumps
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
shell: bash
run: |
sudo sysctl -w fs.suid_dumpable=1
sudo sysctl kernel.core_pattern=/tmp/core.%h.%e.%t
- name: Setup ulimit for core dumps
shell: bash
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
ulimit -c unlimited
- name: Install VTest
shell: bash
run: |
scripts/build-vtest.sh
- name: Install problem matcher for VTest
shell: bash
# This allows one to more easily see which tests fail.
run: echo "::add-matcher::.github/vtest.json"

View File

@ -19,7 +19,7 @@ defaults
frontend h2
mode http
bind 127.0.0.1:8443 ssl crt reg-tests/ssl/certs/common.pem alpn h2,http/1.1
bind 127.0.0.1:8443 ssl crt reg-tests/ssl/common.pem alpn h2,http/1.1
default_backend h2b
backend h2b

69
.github/matrix.py vendored
View File

@ -125,11 +125,9 @@ def main(ref_name):
# Ubuntu
if "haproxy-" in ref_name:
os = "ubuntu-24.04" # stable branch
os_arm = "ubuntu-24.04-arm" # stable branch
os = "ubuntu-24.04" # stable branch
else:
os = "ubuntu-24.04" # development branch
os_arm = "ubuntu-24.04-arm" # development branch
os = "ubuntu-24.04" # development branch
TARGET = "linux-glibc"
for CC in ["gcc", "clang"]:
@ -174,37 +172,36 @@ def main(ref_name):
# ASAN
for os_asan in [os, os_arm]:
matrix.append(
{
"name": "{}, {}, ASAN, all features".format(os_asan, CC),
"os": os_asan,
"TARGET": TARGET,
"CC": CC,
"FLAGS": [
"USE_OBSOLETE_LINKER=1",
'ARCH_FLAGS="-g -fsanitize=address"',
'OPT_CFLAGS="-O1"',
"USE_ZLIB=1",
"USE_OT=1",
"OT_INC=${HOME}/opt-ot/include",
"OT_LIB=${HOME}/opt-ot/lib",
"OT_RUNPATH=1",
"USE_PCRE2=1",
"USE_PCRE2_JIT=1",
"USE_LUA=1",
"USE_OPENSSL=1",
"USE_WURFL=1",
"WURFL_INC=addons/wurfl/dummy",
"WURFL_LIB=addons/wurfl/dummy",
"USE_DEVICEATLAS=1",
"DEVICEATLAS_SRC=addons/deviceatlas/dummy",
"USE_PROMEX=1",
"USE_51DEGREES=1",
"51DEGREES_SRC=addons/51degrees/dummy/pattern",
],
}
)
matrix.append(
{
"name": "{}, {}, ASAN, all features".format(os, CC),
"os": os,
"TARGET": TARGET,
"CC": CC,
"FLAGS": [
"USE_OBSOLETE_LINKER=1",
'ARCH_FLAGS="-g -fsanitize=address"',
'OPT_CFLAGS="-O1"',
"USE_ZLIB=1",
"USE_OT=1",
"OT_INC=${HOME}/opt-ot/include",
"OT_LIB=${HOME}/opt-ot/lib",
"OT_RUNPATH=1",
"USE_PCRE2=1",
"USE_PCRE2_JIT=1",
"USE_LUA=1",
"USE_OPENSSL=1",
"USE_WURFL=1",
"WURFL_INC=addons/wurfl/dummy",
"WURFL_LIB=addons/wurfl/dummy",
"USE_DEVICEATLAS=1",
"DEVICEATLAS_SRC=addons/deviceatlas/dummy",
"USE_PROMEX=1",
"USE_51DEGREES=1",
"51DEGREES_SRC=addons/51degrees/dummy/pattern",
],
}
)
for compression in ["USE_ZLIB=1"]:
matrix.append(
@ -280,7 +277,7 @@ def main(ref_name):
if "haproxy-" in ref_name:
os = "macos-13" # stable branch
else:
os = "macos-26" # development branch
os = "macos-15" # development branch
TARGET = "osx"
for CC in ["clang"]:

View File

@ -15,7 +15,10 @@ jobs:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Install VTest
run: |
scripts/build-vtest.sh
- name: Determine latest AWS-LC release
id: get_aws_lc_release
run: |
@ -49,10 +52,16 @@ jobs:
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-vtest
- name: Install problem matcher for VTest
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy
id: vtest
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
# allow to catch coredumps
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Run Unit tests
id: unittests

View File

@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- uses: codespell-project/codespell-problem-matcher@v1.2.0
- uses: codespell-project/actions-codespell@master
with:

View File

@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Install h2spec
id: install-h2spec
run: |

View File

@ -10,7 +10,7 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Compile admin/halog/halog
run: |
make admin/halog/halog

View File

@ -17,7 +17,7 @@ jobs:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none

View File

@ -99,7 +99,7 @@ jobs:
sudo apt-get -yq --force-yes install \
gcc-${{ matrix.platform.arch }} \
${{ matrix.platform.libs }}
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: install quictls

View File

@ -18,19 +18,19 @@ jobs:
{ name: x86, cc: gcc, QUICTLS_EXTRA_ARGS: "-m32 linux-generic32", ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" },
{ name: x86, cc: clang, QUICTLS_EXTRA_ARGS: "-m32 linux-generic32", ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" }
]
fail-fast: false
name: ${{ matrix.platform.cc }}.${{ matrix.platform.name }}
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
container:
image: fedora:rawhide
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Install dependencies
run: |
dnf -y install awk diffutils git pcre-devel zlib-devel pcre2-devel 'perl(FindBin)' perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils systemd-devel clang
dnf -y install 'perl(FindBin)' 'perl(File::Compare)' perl-IPC-Cmd 'perl(File::Copy)' glibc-devel.i686 lua-devel.i686 lua-devel.x86_64 systemd-devel.i686 zlib-ng-compat-devel.i686 pcre-devel.i686 libatomic.i686
- uses: ./.github/actions/setup-vtest
- name: Install VTest
run: scripts/build-vtest.sh
- name: Install QuicTLS
run: QUICTLS=yes QUICTLS_EXTRA_ARGS="${{ matrix.platform.QUICTLS_EXTRA_ARGS }}" scripts/build-ssl.sh
- name: Build contrib tools
@ -67,4 +67,4 @@ jobs:
- name: Run Unit tests
id: unittests
run: |
make unit-tests
make unit-tests

View File

@ -13,7 +13,7 @@ jobs:
contents: read
steps:
- name: "Checkout repository"
uses: actions/checkout@v5
uses: actions/checkout@v4
- name: "Build on VM"
uses: vmactions/solaris-vm@v1

View File

@ -20,7 +20,7 @@ jobs:
run: |
ulimit -c unlimited
echo '/tmp/core/core.%h.%e.%t' > /proc/sys/kernel/core_pattern
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Install dependencies
run: apk add gcc gdb make tar git python3 libc-dev linux-headers pcre-dev pcre2-dev openssl-dev lua5.3-dev grep socat curl musl-dbg lua5.3-dbg jose
- name: Install VTest

View File

@ -13,7 +13,7 @@ jobs:
contents: read
steps:
- name: "Checkout repository"
uses: actions/checkout@v5
uses: actions/checkout@v4
- name: "Build on VM"
uses: vmactions/netbsd-vm@v1

View File

@ -1,82 +0,0 @@
name: openssl ECH
on:
schedule:
- cron: "0 3 * * *"
workflow_dispatch:
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- name: Install VTest
run: |
scripts/build-vtest.sh
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb
sudo apt-get --no-install-recommends -y install libpsl-dev
- name: Install OpenSSL+ECH
run: env OPENSSL_VERSION="git-feature/ech" GIT_TYPE="branch" scripts/build-ssl.sh
- name: Install curl+ECH
run: env SSL_LIB=${HOME}/opt/ scripts/build-curl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) CC=gcc TARGET=linux-glibc \
USE_QUIC=1 USE_OPENSSL=1 USE_ECH=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
ARCH_FLAGS="-ggdb3 -fsanitize=address"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Install problem matcher for VTest
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy
id: vtest
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
# allow to catch coredumps
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi

View File

@ -1,77 +0,0 @@
name: openssl master
on:
schedule:
- cron: "0 3 * * *"
workflow_dispatch:
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb
sudo apt-get --no-install-recommends -y install libpsl-dev
- uses: ./.github/actions/setup-vtest
- name: Install OpenSSL master
run: env OPENSSL_VERSION="git-master" GIT_TYPE="branch" scripts/build-ssl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_QUIC=1 USE_OPENSSL=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Install problem matcher for VTest
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy
id: vtest
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
# allow to catch coredumps
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi

View File

@ -0,0 +1,34 @@
#
# special purpose CI: test against OpenSSL built in "no-deprecated" mode
# let us run those builds weekly
#
# for example, OpenWRT uses such OpenSSL builds (those builds are smaller)
#
#
# some details might be found at NL: https://www.mail-archive.com/haproxy@formilux.org/msg35759.html
# GH: https://github.com/haproxy/haproxy/issues/367
name: openssl no-deprecated
on:
schedule:
- cron: "0 0 * * 4"
workflow_dispatch:
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install VTest
run: |
scripts/build-vtest.sh
- name: Compile HAProxy
run: |
make DEFINE="-DOPENSSL_API_COMPAT=0x10100000L -DOPENSSL_NO_DEPRECATED" -j3 CC=gcc ERR=1 TARGET=linux-glibc USE_OPENSSL=1
- name: Run VTest
run: |
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel

View File

@ -19,7 +19,7 @@ jobs:
packages: write
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Log in to the Container registry
uses: docker/login-action@v3
@ -35,7 +35,7 @@ jobs:
context: https://github.com/haproxytech/haproxy-qns.git
push: true
build-args: |
SSLLIB=AWS-LC
SSLLIB: AWS-LC
tags: ghcr.io/${{ github.repository }}:aws-lc
- name: Cleanup registry
@ -64,7 +64,7 @@ jobs:
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Log in to the Container registry
uses: docker/login-action@v3

View File

@ -19,7 +19,7 @@ jobs:
packages: write
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Log in to the Container registry
uses: docker/login-action@v3
@ -35,7 +35,7 @@ jobs:
context: https://github.com/haproxytech/haproxy-qns.git
push: true
build-args: |
SSLLIB=LibreSSL
SSLLIB: LibreSSL
tags: ghcr.io/${{ github.repository }}:libressl
- name: Cleanup registry
@ -62,7 +62,7 @@ jobs:
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Log in to the Container registry
uses: docker/login-action@v3

View File

@ -17,7 +17,10 @@ jobs:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Install VTest
run: |
scripts/build-vtest.sh
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
@ -39,10 +42,16 @@ jobs:
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-vtest
- name: Install problem matcher for VTest
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy
id: vtest
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
# allow to catch coredumps
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}

View File

@ -23,7 +23,7 @@ jobs:
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Generate Build Matrix
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@ -44,10 +44,16 @@ jobs:
TMPDIR: /tmp
OT_CPP_VERSION: 1.6.0
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
with:
fetch-depth: 100
- name: Setup coredumps
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
run: |
sudo sysctl -w fs.suid_dumpable=1
sudo sysctl kernel.core_pattern=/tmp/core.%h.%e.%t
#
# Github Action cache key cannot contain comma, so we calculate it based on job name
#
@ -70,7 +76,7 @@ jobs:
uses: actions/cache@v4
with:
path: '~/opt-ot/'
key: ${{ matrix.os }}-ot-${{ matrix.CC }}-${{ env.OT_CPP_VERSION }}-${{ contains(matrix.name, 'ASAN') }}
key: ot-${{ matrix.CC }}-${{ env.OT_CPP_VERSION }}-${{ contains(matrix.name, 'ASAN') }}
- name: Install apt dependencies
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
run: |
@ -87,7 +93,9 @@ jobs:
run: |
brew install socat
brew install lua
- uses: ./.github/actions/setup-vtest
- name: Install VTest
run: |
scripts/build-vtest.sh
- name: Install SSL ${{ matrix.ssl }}
if: ${{ matrix.ssl && matrix.ssl != 'stock' && steps.cache_ssl.outputs.cache-hit != 'true' }}
run: env ${{ matrix.ssl }} scripts/build-ssl.sh
@ -113,16 +121,7 @@ jobs:
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
${{ join(matrix.FLAGS, ' ') }} \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install-bin
- name: Compile admin/halog/halog
run: |
make -j$(nproc) admin/halog/halog \
ERR=1 \
TARGET=${{ matrix.TARGET }} \
CC=${{ matrix.CC }} \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
${{ join(matrix.FLAGS, ' ') }} \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
@ -137,9 +136,16 @@ jobs:
echo "::endgroup::"
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Install problem matcher for VTest
# This allows one to more easily see which tests fail.
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
id: vtest
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}

View File

@ -35,7 +35,7 @@ jobs:
- USE_THREAD=1
- USE_ZLIB=1
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- uses: msys2/setup-msys2@v2
with:
install: >-

View File

@ -13,7 +13,10 @@ jobs:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Install VTest
run: |
scripts/build-vtest.sh
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
@ -35,10 +38,16 @@ jobs:
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-vtest
- name: Install problem matcher for VTest
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy
id: vtest
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
# allow to catch coredumps
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Run Unit tests
id: unittests

View File

@ -171,17 +171,7 @@ feedback for developers:
as the previous releases that had 6 months to stabilize. In terms of
stability it really means that the point zero version already accumulated
6 months of fixes and that it is much safer to use even just after it is
released. There is one exception though, features marked as "experimental"
are not guaranteed to be maintained beyond the release of the next LTS
branch. The rationale here is that the experimental status is made to
expose an early preview of a feature, that is often incomplete, not always
in its definitive form regarding configuration, and for which developers
are seeking feedback from the users. It is even possible that changes will
be brought within the stable branch and it may happen that the feature
breaks. It is not imaginable to always be able to backport bug fixes too
far in this context since the code and configuration may change quite a
bit. Users who want to try experimental features are expected to upgrade
quickly to benefit from the improvements made to that feature.
released.
- for developers, given that the odd versions are solely used by highly
skilled users, it's easier to get advanced traces and captures, and there

934
CHANGELOG
View File

@ -1,940 +1,6 @@
ChangeLog :
===========
2026/01/07 : 3.4-dev2
- BUG/MEDIUM: mworker/listener: ambiguous use of RX_F_INHERITED with shards
- BUG/MEDIUM: http-ana: Properly detect client abort when forwarding response (v2)
- BUG/MEDIUM: stconn: Don't report abort from SC if read0 was already received
- BUG/MEDIUM: quic: Don't try to use hystart if not implemented
- CLEANUP: backend: Remove useless test on server's xprt
- CLEANUP: tcpcheck: Remove useless test on the xprt used for healthchecks
- CLEANUP: ssl-sock: Remove useless tests on connection when resuming TLS session
- REGTESTS: quic: fix a TLS stack usage
- REGTESTS: list all skipped tests including 'feature cmd' ones
- CI: github: remove openssl no-deprecated job
- CI: github: add a job to test the master branch of OpenSSL
- CI: github: openssl-master.yml misses actions/checkout
- BUG/MEDIUM: backend: Do not remove CO_FL_SESS_IDLE in assign_server()
- CI: github: use git prefix for openssl-master.yml
- BUG/MEDIUM: mux-h2: synchronize all conditions to create a new backend stream
- REGTESTS: fix error when no test are skipped
- MINOR: cpu-topo: Turn the cpu policy configuration into a struct
- MEDIUM: cpu-topo: Add a "threads-per-core" keyword to cpu-policy
- MEDIUM: cpu-topo: Add a "cpu-affinity" option
- MEDIUM: cpu-topo: Add a new "max-threads-per-group" global keyword
- MEDIUM: cpu-topo: Add the "per-thread" cpu_affinity
- MEDIUM: cpu-topo: Add the "per-ccx" cpu_affinity
- BUG/MINOR: cpu-topo: fix -Wlogical-not-parentheses build with clang
- DOC: config: fix number of values for "cpu-affinity"
- MINOR: tools: add a secure implementation of memset
- MINOR: mux-h2: add missing glitch count for non-decodable H2 headers
- MINOR: mux-h2: perform a graceful close at 75% glitches threshold
- MEDIUM: mux-h1: implement basic glitches support
- MINOR: mux-h1: perform a graceful close at 75% glitches threshold
- MEDIUM: cfgparse: acknowledge that proxy ID auto numbering starts at 2
- MINOR: cfgparse: remove useless checks on no server in backend
- OPTIM/MINOR: proxy: do not init proxy management task if unused
- MINOR: patterns: preliminary changes for reorganization
- MEDIUM: patterns: reorganize pattern reference elements
- CLEANUP: patterns: remove dead code
- OPTIM: patterns: cache the current generation
- MINOR: tcp: add new bind option "tcp-ss" to instruct the kernel to save the SYN
- MINOR: protocol: support a generic way to call getsockopt() on a connection
- MINOR: tcp: implement the get_opt() function
- MINOR: tcp_sample: implement the fc_saved_syn sample fetch function
- CLEANUP: assorted typo fixes in the code, commits and doc
- BUG/MEDIUM: cpu-topo: Don't forget to reset visited_ccx.
- BUG/MAJOR: set the correct generation ID in pat_ref_append().
- BUG/MINOR: backend: fix the conn_retries check for TFO
- BUG/MINOR: backend: inspect request not response buffer to check for TFO
- MINOR: net_helper: add sample converters to decode ethernet frames
- MINOR: net_helper: add sample converters to decode IP packet headers
- MINOR: net_helper: add sample converters to decode TCP headers
- MINOR: net_helper: add ip.fp() to build a simplified fingerprint of a SYN
- MINOR: net_helper: prepare the ip.fp() converter to support more options
- MINOR: net_helper: add an option to ip.fp() to append the TTL to the fingerprint
- MINOR: net_helper: add an option to ip.fp() to append the source address
- DOC: config: fix the length attribute name for stick tables of type binary / string
- MINOR: mworker/cli: only keep positive PIDs in proc_list
- CLEANUP: mworker: remove duplicate list.h include
- BUG/MINOR: mworker/cli: fix show proc pagination using reload counter
- MINOR: mworker/cli: extract worker "show proc" row printer
- MINOR: cpu-topo: Factorize code
- MINOR: cpu-topo: Rename variables to better fit their usage
- BUG/MEDIUM: peers: Properly handle shutdown when trying to get a line
- BUG/MEDIUM: mux-h1: Take care to update <kop> value during zero-copy forwarding
- MINOR: threads: Avoid using a thread group mask when stopping.
- MINOR: hlua: Add support for lua 5.5
- MEDIUM: cpu-topo: Add an optional directive for per-group affinity
- BUG/MEDIUM: mworker: can't use signals after a failed reload
- BUG/MEDIUM: stconn: Move data from <kip> to <kop> during zero-copy forwarding
- DOC: config: fix a few typos and refine cpu-affinity
- MINOR: receiver: Remove tgroup_mask from struct shard_info
- BUG/MINOR: quic: fix deprecated warning for window size keyword
2025/12/10 : 3.4-dev1
- BUG/MINOR: jwt: Missing "case" in switch statement
- DOC: configuration: ECH support details
- Revert "MINOR: quic: use dynamic cc_algo on bind_conf"
- MINOR: quic: define quic_cc_algo as const
- MINOR: quic: extract cc-algo parsing in a dedicated function
- MINOR: quic: implement cc-algo server keyword
- BUG/MINOR: quic-be: Missing keywords array NULL termination
- REGTESTS: ssl enable tls12_reuse.vtc for AWS-LC
- REGTESTS: ssl: split tls*_reuse in stateless and stateful resume tests
- BUG/MEDIUM: connection: fix "bc_settings_streams_limit" typo
- BUG/MEDIUM: config: ignore empty args in skipped blocks
- DOC: config: mention clearer that the cache's total-max-size is mandatory
- DOC: config: reorder the cache section's keywords
- BUG/MINOR: quic/ssl: crash in ClientHello callback ssl traces
- BUG/MINOR: quic-be: handshake errors without connection stream closure
- MINOR: quic: Add useful debugging traces in qc_idle_timer_do_rearm()
- REGTESTS: ssl: Move all the SSL certificates, keys, crt-lists inside "certs" directory
- REGTESTS: quic/ssl: ssl/del_ssl_crt-list.vtc supported by QUIC
- REGTESTS: quic: dynamic_server_ssl.vtc supported by QUIC
- REGTESTS: quic: issuers_chain_path.vtc supported by QUIC
- REGTESTS: quic: new_del_ssl_cafile.vtc supported by QUIC
- REGTESTS: quic: ocsp_auto_update.vtc supported by QUIC
- REGTESTS: quic: set_ssl_bug_2265.vtc supported by QUIC
- MINOR: quic: avoid code duplication in TLS alert callback
- BUG/MINOR: quic-be: missing connection stream closure upon TLS alert to send
- REGTESTS: quic: set_ssl_cafile.vtc supported by QUIC
- REGTESTS: quic: set_ssl_cert_noext.vtc supported by QUIC
- REGTESTS: quic: set_ssl_cert.vtc supported by QUIC
- REGTESTS: quic: set_ssl_crlfile.vtc supported by QUIC
- REGTESTS: quic: set_ssl_server_cert.vtc supported by QUIC
- REGTESTS: quic: show_ssl_ocspresponse.vtc supported by QUIC
- REGTESTS: quic: ssl_client_auth.vtc supported by QUIC
- REGTESTS: quic: ssl_client_samples.vtc supported by QUIC
- REGTESTS: quic: ssl_default_server.vtc supported by QUIC
- REGTESTS: quic: new_del_ssl_crlfile.vtc supported by QUIC
- REGTESTS: quic: ssl_frontend_samples.vtc supported by QUIC
- REGTESTS: quic: ssl_server_samples.vtc supported by QUIC
- REGTESTS: quic: ssl_simple_crt-list.vtc supported by QUIC
- REGTESTS: quic: ssl_sni_auto.vtc code provision for QUIC
- REGTESTS: quic: ssl_curve_name.vtc supported by QUIC
- REGTESTS: quic: add_ssl_crt-list.vtc supported by QUIC
- REGTESTS: add ssl_ciphersuites.vtc (TCP & QUIC)
- BUG/MINOR: quic: do not set first the default QUIC curves
- REGTESTS: quic/ssl: Add ssl_curves_selection.vtc
- BUG/MINOR: ssl: Don't allow to set NULL sni
- MEDIUM: quic: Add connection as argument when qc_new_conn() is called
- MINOR: ssl: Add a function to hash SNIs
- MINOR: ssl: Store hash of the SNI for cached TLS sessions
- MINOR: ssl: Compare hashes instead of SNIs when a session is cached
- MINOR: connection/ssl: Store the SNI hash value in the connection itself
- MEDIUM: tcpcheck/backend: Get the connection SNI before initializing SSL ctx
- BUG/MEDIUM: ssl: Don't reuse TLS session if the connection's SNI differs
- MEDIUM: ssl/server: No longer store the SNI of cached TLS sessions
- BUG/MINOR: log: Dump good %B and %U values in logs
- BUG/MEDIUM: http-ana: Don't close server connection on read0 in TUNNEL mode
- DOC: config: Fix description of the spop mode
- DOC: config: Improve spop mode documentation
- MINOR: ssl: Split ssl_crt-list_filters.vtc in two files by TLS version
- REGTESTS: quic: tls13_ssl_crt-list_filters.vtc supported by QUIC
- BUG/MEDIUM: h3: do not access QCS <sd> if not allocated
- CLEANUP: mworker/cli: remove useless variable
- BUG/MINOR: mworker/cli: 'show proc' is limited by buffer size
- BUG/MEDIUM: ssl: Always check the ALPN after handshake
- MINOR: connections: Add a new CO_FL_SSL_NO_CACHED_INFO flag
- BUG/MEDIUM: ssl: Don't store the ALPN for check connections
- BUG/MEDIUM: ssl: Don't resume session for check connections
- CLEANUP: improvements to the alignment macros
- CLEANUP: use the automatic alignment feature
- CLEANUP: more conversions and cleanups for alignment
- BUG/MEDIUM: h3: fix access to QCS <sd> definitely
- MINOR: h2/trace: emit a trace of the received RST_STREAM type
2025/11/26 : 3.4-dev0
- MINOR: version: mention that it's development again
2025/11/26 : 3.3.0
- BUG/MINOR: acme: better challenge_ready processing
- BUG/MINOR: acme: warning ctx may be used uninitialized
- MINOR: httpclient: complete the https log
- BUG/MEDIUM: server: do not use default SNI if manually set
- BUG/MINOR: freq_ctr: Prevent possible signed overflow in freq_ctr_overshoot_period
- DOC: ssl: Document the restrictions on 0RTT.
- DOC: ssl: Note that 0rtt works fork QUIC with QuicTLS too.
- BUG/MEDIUM: quic: do not prevent sending if no BE token
- BUG/MINOR: quic/server: free quic_retry_token on srv drop
- MINOR: quic: split global CID tree between FE and BE sides
- MINOR: quic: use separate global quic_conns FE/BE lists
- MINOR: quic: add "clo" filter on show quic
- MINOR: quic: dump backend connections on show quic
- MINOR: quic: mark backend conns on show quic
- BUG/MINOR: quic: fix uninit list on show quic handler
- BUG/MINOR: quic: release BE quic_conn on connect failure
- BUG/MINOR: server: fix srv_drop() crash on partially init srv
- BUG/MINOR: h3: do no crash on forwarding multiple chained response
- BUG/MINOR: h3: handle properly buf alloc failure on response forwarding
- BUG/MEDIUM: server/ssl: Unset the SNI for new server connections if none is set
- BUG/MINOR: acme: fix ha_alert() call
- Revert "BUG/MEDIUM: server/ssl: Unset the SNI for new server connections if none is set"
- BUG/MINOR: sock-inet: ignore conntrack for transparent sockets on Linux
- DEV: patchbot: prepare for new version 3.4-dev
- DOC: update INSTALL with the range of gcc compilers and openssl versions
- MINOR: version: mention that 3.3 is stable now
2025/11/21 : 3.3-dev14
- MINOR: stick-tables: Rename stksess shards to use buckets
- MINOR: quic: do not use quic_newcid_from_hash64 on BE side
- MINOR: quic: support multiple random CID generation for BE side
- MINOR: quic: try to clarify quic_conn CIDs fields direction
- MINOR: quic: refactor qc_new_conn() prototype
- MINOR: quic: remove <ipv4> arg from qc_new_conn()
- MEDIUM: mworker: set the mworker-max-reloads to 50
- BUG/MEDIUM: quic-be: prevent use of MUX for 0-RTT sessions without secrets
- CLEANUP: startup: move confusing msg variable
- BUG/MEDIUM: mworker: signals inconsistencies during startup and reload
- BUG/MINOR: mworker: wrong signals during startup
- BUG/MINOR: acme: P-256 doesn't work with openssl >= 3.0
- REGTESTS: ssl: split the SSL reuse test into TLS 1.2/1.3
- BUILD: Makefile: make install with admin tools
- CI: github: make install-bin instead of make install
- BUG/MINOR: ssl: remove dead code in ssl_sock_from_buf()
- BUG/MINOR: mux-quic: implement max-reuse server parameter
- MINOR: quic: fix trace on quic_conn_closed release
- BUG/MINOR: quic: do not decrement jobs for backend conns
- BUG/MINOR: quic: fix FD usage for quic_conn_closed on backend side
- BUILD: Makefile: remove halog from install-admin
- REGTESTS: ssl: add basic 0rtt tests for TLSv1.2, TLSv1.3 and QUIC
- REGTESTS: ssl: also verify that 0-rtt properly advertises early-data:1
- MINOR: quic/flags: add missing QUIC flags for flags dev tool.
- MINOR: quic: uneeded xprt context variable passed as parameter
- MINOR: limits: keep a copy of the rough estimate of needed FDs in global struct
- MINOR: limits: explain a bit better what to do when fd limits are exceeded
- BUG/MEDIUM: quic-be/ssl_sock: TLS callback called without connection
- BUG/MINOR: acme: alert when the map doesn't exist at startup
- DOC: acme: add details about the DNS-01 support
- DOC: acme: explain how to dump the certificates
- DOC: acme: configuring acme needs a crt file
- DOC: acme: add details about key pair generation in ACME section
- BUG/MEDIUM: queues: Don't forget to unlock the queue before exiting
- MINOR: muxes: Support an optional ALPN string when defining mux protocols
- MINOR: config: Do proto detection for listeners before checks about ALPN
- BUG/MEDIUM: config: Use the mux protocol ALPN by default for listeners if forced
- DOC: config: Add a note about conflict with ALPN/NPN settings and proto keyword
- MINOR: quic: store source address for backend conns
- BUG/MINOR: quic: flag conn with CO_FL_FDLESS on backend side
- ADMIN: dump-certs: let dry-run compare certificates
- BUG/MEDIUM: connection/ssl: also fix the ssl_sock_io_cb() regarding idle list
- DOC: http: document 413 response code
- MINOR: limits: display the computed maxconn using ha_notice()
- BUG/MEDIUM: applet: Fix conditions to detect spinning loop with the new API
- BUG/MEDIUM: cli: State the cli have no more data to deliver if it yields
- MINOR: h3: adjust sedesc update for known input payload len
- BUG/MINOR: mux-quic: fix sedesc leak on BE side
- OPTIM: mux-quic: delay FE sedesc alloc to stream creation
- BUG/MEDIUM: quic-be: quic_conn_closed buffer overflow
- BUG/MINOR: mux-quic: check access on qcs stream-endpoint
- BUG/MINOR: acme: handle multiple auth with the same name
- BUG/MINOR: acme: prevent creating map entries with dns-01
2025/11/14 : 3.3-dev13
- BUG/MEDIUM: config: for word expansion, empty or non-existing are the same
- BUG/MINOR: quic: close connection on CID alloc failure
- MINOR: quic: adjust CID conn tree alloc in qc_new_conn()
- MINOR: quic: split CID alloc/generation function
- BUG/MEDIUM: quic: handle collision on CID generation
- MINOR: quic: extend traces on CID allocation
- MEDIUM/OPTIM: quic: alloc quic_conn after CID collision check
- MINOR: stats-proxy: ensure future-proof FN_AGE manipulation in me_generate_field()
- BUG/MEDIUM: stats-file: fix shm-stats-file preload not working anymore
- BUG/MINOR: do not account backend connections into maxconn
- BUG/MEDIUM: init: 'devnullfd' not properly closed for master
- BUG/MINOR: acme: more explicit error when BIO_new_file()
- BUG/MEDIUM: quic-be: do not launch the connection migration process
- MINOR: quic-be: Parse the NEW_TOKEN frame
- MEDIUM: quic-be: Parse, store and reuse tokens provided by NEW_TOKEN
- MINOR: quic-be: helper functions to save/restore transport params (0-RTT)
- MINOR: quic-be: helper quic_reuse_srv_params() function to reuse server params (0-RTT)
- MINOR: quic-be: Save the backend 0-RTT parameters
- MEDIUM: quic-be: modify ssl_sock_srv_try_reuse_sess() to reuse backend sessions (0-RTT)
- MINOR: quic-be: allow the preparation of 0-RTT packets
- MINOR: quic-be: Send post handshake frames from list of frames (0-RTT)
- MEDIUM: quic-be: qc_send_mux() adaptation for 0-RTT
- MINOR: quic-be: discard the 0-RTT keys
- MEDIUM: quic-be: enable the use of 0-RTT
- MINOR: quic-be: validate the 0-RTT transport parameters
- MINOR: quic-be: do not create the mux after handshake completion (for 0-RTT)
- MINOR: quic-be: avoid a useless I/O callback wakeup for 0-RTT sessions
- BUG/MEDIUM: acme: move from mt_list to a rwlock + ebmbtree
- BUG/MINOR: acme: can't override the default resolver
- MINOR: ssl/sample: expose ssl_*c_curve for AWS-LC
- MINOR: check: delay MUX init when SSL ALPN is used
- MINOR: cfgdiag: adjust diag on servers
- BUG/MINOR: check: only try connection reuse for http-check rulesets
- BUG/MINOR: check: fix reuse-pool if MUX inherited from server
- MINOR: check: clarify check-reuse-pool interaction with reuse policy
- DOC: configuration: add missing ssllib_name_startswith()
- DOC: configuration: add missing openssl_version predicates
- MINOR: cfgcond: add "awslc_api_atleast" and "awslc_api_before"
- REGTESTS: ssl: activate ssl_curve_name.vtc for AWS-LC
- BUILD: ech: fix clang warnings
- BUG/MEDIUM: stick-tables: Always return the good stksess from stktable_set_entry
- BUG/MINOR: stick-tables: Fix return value for __stksess_kill()
- CLEANUP: stick-tables: Don't needlessly compute shard number in stksess_free()
- MINOR: h1: h1_release() should return if it destroyed the connection
- BUG/MEDIUM: h1: prevent a crash on HTTP/2 upgrade
- MINOR: check: use auto SNI for QUIC checks
- MINOR: check: ensure QUIC checks configuration coherency
- CLEANUP: peers: remove an unneeded null check
- Revert "BUG/MEDIUM: connections: permit to permanently remove an idle conn"
- BUG/MEDIUM: connection: do not reinsert a purgeable conn in idle list
- DEBUG: extend DEBUG_STRESS to ease testing and turn on extra checks
- DEBUG: add BUG_ON_STRESS(): a BUG_ON() implemented only when DEBUG_STRESS > 0
- DEBUG: servers: add a few checks for stress-testing idle conns
- BUG/MINOR: check: fix QUIC check test when QUIC disabled
- BUG/MINOR: quic-be: missing version negotiation
- CLEANUP: quic: Missing succesful SSL handshake backend trace (OpenSSL 3.5)
- BUG/MINOR: quic-be: backend SSL session reuse fix (OpenSSL 3.5)
- REGTEST: quic: quic/ssl_reuse.vtc supports OpenSSL 3.5 QUIC API
2025/11/08 : 3.3-dev12
- MINOR: quic: enable SSL on QUIC servers automatically
- MINOR: quic: reject conf with QUIC servers if not compiled
- OPTIM: quic: adjust automatic ALPN setting for QUIC servers
- MINOR: sample: optional AAD parameter support to aes_gcm_enc/dec
- REGTESTS: converters: check USE_OPENSSL in aes_gcm.vtc
- BUG/MINOR: resolvers: ensure fair round robin iteration
- BUG/MAJOR: stats-file: fix crash on non-x86 platform caused by unaligned cast
- OPTIM: backend: skip conn reuse for incompatible proxies
- SCRIPTS: build-ssl: allow to build a FIPS version without FIPS
- OPTIM: proxy: move atomically access fields out of the read-only ones
- SCRIPTS: build-ssl: fix rpath in AWS-LC install for openssl and bssl bin
- CI: github: update to macos-26
- BUG/MINOR: quic: fix crash on client handshake abort
- MINOR: quic: do not set conn member if ssl_sock_ctx
- MINOR: quic: remove connection arg from qc_new_conn()
- BUG/MEDIUM: server: Add a rwlock to path parameter
- BUG/MEDIUM: server: Also call srv_reset_path_parameters() on srv up
- BUG/MEDIUM: mux-h1: fix 414 / 431 status code reporting
- BUG/MEDIUM: mux-h2: make sure not to move a dead connection to idle
- BUG/MEDIUM: connections: permit to permanently remove an idle conn
- MEDIUM: cfgparse: deprecate 'master-worker' keyword alone
- MEDIUM: cfgparse: 'daemon' not compatible with -Ws
- DOC: configuration: deprecate the master-worker keyword
- MINOR: quic: remove <mux_state> field
- BUG/MEDIUM: stick-tables: Make sure we handle expiration on all tables
- MEDIUM: stick-tables: Optimize the expiration process a bit.
- MEDIUM: ssl/ckch: use ckch_store instead of ckch_data for ckch_conf_kws
- MINOR: acme: generate a temporary key pair
- MEDIUM: acme: generate a key pair when no file are available
- BUILD: ssl/ckch: wrong function name in ckch_conf_kws
- BUILD: acme: acme_gen_tmp_x509() signedness and unused variables
- BUG/MINOR: acme: fix initialization issue in acme_gen_tmp_x509()
- BUILD: ssl/ckch: fix ckch_conf_kws parsing without ACME
- MINOR: server: move the lock inside srv_add_idle()
- DOC: acme: crt-store allows you to start without a certificate
- BUG/MINOR: acme: allow 'key' when generating cert
- MINOR: stconn: Add counters to SC to know number of bytes received and sent
- MINOR: stream: Add samples to get number of bytes received or sent on each side
- MINOR: counters: Add req_in/req_out/res_in/res_out counters for fe/be/srv/li
- MINOR: stream: Remove bytes_in and bytes_out counters from stream
- MINOR: counters: Remove bytes_in and bytes_out counter from fe/be/srv/li
- MINOR: stats: Add stats about request and response bytes received and sent
- MINOR: applet: Add function to get amount of data in the output buffer
- MINOR: channel: Remove total field from channels
- DEBUG: stream: Add bytes_in/bytes_out value for both SC in session dump
- MEDIUM: stktables: Limit the number of stick counters to 100
- BUG/MINOR: config: Limit "tune.maxpollevents" parameter to 1000000
- BUG/MEDIUM: server: close a race around ready_srv when deleting a server
- BUG/MINOR: config: emit warning for empty args when *not* in discovery mode
- BUG/MEDIUM: config: solve the empty argument problem again
- MEDIUM: config: now reject configs with empty arguments
- MINOR: tools: add support for ist to the word fingerprinting functions
- MINOR: tools: add env_suggest() to suggest alternate variable names
- MINOR: tools: have parse_line's error pointer point to unknown variable names
- MINOR: cfgparse: try to suggest correct variable names on errors
- IMPORT: cebtree: Replace offset calculation with offsetof to avoid UB
- BUG/MINOR: acme: wrong dns-01 challenge in the log
- MEDIUM: backend: Defer conn_xprt_start() after mux creation
- MINOR: peers: Improve traces for peers
- MEDIUM: peers: No longer ack updates during a full resync
- MEDIUM: peers: Remove commitupdate field on stick-tables
- BUG/MEDIUM: peers: Fix update message parsing during a full resync
- MINOR: sample/stats: Add "bytes" in req_{in,out} and res_{in,out} names
- BUG/MEDIUM: stick-tables: Make sure updates are seen as local
- BUG/MEDIUM: proxy: use aligned allocations for struct proxy
- BUG/MEDIUM: proxy: use aligned allocations for struct proxy_per_tgroup
- BUG/MINOR: acme: avoid a possible crash on error paths
2025/10/31 : 3.3-dev11
- BUG/MEDIUM: mt_list: Make sure not to unlock the element twice
- BUG/MINOR: quic-be: unchecked connections during handshakes
- BUG/MEDIUM: cli: also free the trash chunk on the error path
- MINOR: initcalls: Add a new initcall stage, STG_INIT_2
- MEDIUM: stick-tables: Use a per-shard expiration task
- MEDIUM: stick-tables: Remove the table lock
- MEDIUM: stick-tables: Stop if stktable_trash_oldest() fails.
- MEDIUM: stick-tables: Stop as soon as stktable_trash_oldest succeeds.
- BUG/MEDIUM: h1-htx: Don't set HTX_FL_EOM flag on 1xx informational messages
- BUG/MEDIUM: h3: properly encode response after interim one in same buf
- BUG/MAJOR: pools: fix default pool alignment
- MINOR: ncbuf: extract common types
- MINOR: ncbmbuf: define new ncbmbuf type
- MINOR: ncbmbuf: implement add
- MINOR: ncbmbuf: implement iterator bitmap utilities functions
- MINOR: ncbmbuf: implement ncbmb_data()
- MINOR: ncbmbuf: implement advance operation
- MINOR: ncbmbuf: add tests as standalone mode
- BUG/MAJOR: quic: use ncbmbuf for CRYPTO handling
- MINOR: quic: remove received CRYPTO temporary tree storage
- MINOR: stats-file: fix typo in shm-stats-file object struct size detection
- MINOR: compiler: add FIXED_SIZE(size, type, name) macro
- MEDIUM: freq-ctr: use explicit-size types for freq-ctr struct
- BUG/MAJOR: stats-file: ensure shm_stats_file_object struct mapping consistency
- BUG/MEDIUM: build: limit excessive and counter-productive gcc-15 vectorization
- BUG/MEDIUM: stick-tables: Don't loop if there's nothing left
- MINOR: acme: add the dns-01-record field to the sink
- MINOR: acme: display the complete challenge_ready command in the logs
- BUG/MEDIUM: mt_lists: Avoid el->prev = el->next = el
- MINOR: quic: remove unused conn-tx-buffers limit keyword
- MINOR: quic: prepare support for options on FE/BE side
- MINOR: quic: rename "no-quic" to "tune.quic.listen"
- MINOR: quic: duplicate glitches FE option on BE side
- MINOR: quic: split congestion controler options for FE/BE usage
- MINOR: quic: split Tx options for FE/BE usage
- MINOR: quic: rename max Tx mem setting
- MINOR: quic: rename retry-threshold setting
- MINOR: quic: rename frontend sock-per-conn setting
- BUG/MINOR: quic: split max-idle-timeout option for FE/BE usage
- BUG/MINOR: quic: split option for congestion max window size
- BUG/MINOR: quic: rename and duplicate stream settings
- BUG/MEDIUM: applet: Improve again spinning loops detection with the new API
- Revert "BUG/MAJOR: stats-file: ensure shm_stats_file_object struct mapping consistency"
- Revert "MEDIUM: freq-ctr: use explicit-size types for freq-ctr struct"
- Revert "MINOR: compiler: add FIXED_SIZE(size, type, name) macro"
- BUG/MAJOR: stats-file: ensure shm_stats_file_object struct mapping consistency (2nd attempt)
- BUG/MINOR: stick-tables: properly index string-type keys
- BUILD: openssl-compat: fix build failure with OPENSSL=0 and KTLS=1
- BUG/MEDIUM: mt_list: Use atomic operations to prevent compiler optims
- MEDIUM: quic: Fix build with openssl-compat
- MINOR: applet: do not put SE_FL_WANT_ROOM on rcv_buf() if the channel is empty
- MINOR: cli: create cli_raw_rcv_buf() from the generic applet_raw_rcv_buf()
- BUG/MEDIUM: cli: do not return ACKs one char at a time
- BUG/MEDIUM: ssl: Crash because of dangling ckch_store reference in a ckch instance
- BUG/MINOR: ssl: Remove unreachable code in CLI function
- BUG/MINOR: acl: warn if "_sub" derivative used with an explicit match
- DOC: config: fix confusing typo about ACL -m ("now" vs "not")
- DOC: config: slightly clarify the ssl_fc_has_early() behavior
- MINOR: ssl-sample: add ssl_fc_early_rcvd() to detect use of early data
- CI: disable fail-fast on fedora rawhide builds
- MINOR: http: fix 405,431,501 default errorfile
- BUG/MINOR: init: Do not close previously created fd in stdio_quiet
- MINOR: init: Make devnullfd global and create it earlier in init
- MINOR: init: Use devnullfd in stdio_quiet calls instead of recreating a fd everytime
- MEDIUM: ssl: Add certificate password callback that calls external command
- MEDIUM: ssl: Add local passphrase cache
- MINOR: ssl: Do not dump decrypted privkeys in 'dump ssl cert'
- BUG/MINOR: resolvers: Apply dns-accept-family setting on additional records
- MEDIUM: h1: Immediately try to read data for frontend
- REGTEST: quic: add ssl_reuse.vtc new QUIC test
- BUG/MINOR: ssl: returns when SSL_CTX_new failed during init
- MEDIUM: ssl/ech: config and load keys
- MINOR: ssl/ech: add logging and sample fetches for ECH status and outer SNI
- MINOR: listener: implement bind_conf_find_by_name()
- MINOR: ssl/ech: key management via stats socket
- CI: github: add USE_ECH=1 to haproxy for openssl-ech job
- DOC: configuration: "ech" for bind lines
- BUG/MINOR: ech: non destructive parsing in cli_find_ech_specific_ctx()
- DOC: management: document ECH CLI commands
- MEDIUM: mux-h2: do not needlessly refrain from sending data early
- MINOR: mux-h2: extract the code to send preface+settings into its own function
- BUG/MINOR: mux-h2: send the preface along with the first request if needed
2025/10/18 : 3.3-dev10
- BUG/MEDIUM: connections: Only avoid creating a mux if we have one
- BUG/MINOR: sink: retry attempt for sft server may never occur
- CLEANUP: mjson: remove MJSON_ENABLE_RPC code
- CLEANUP: mjson: remove MJSON_ENABLE_PRINT code
- CLEANUP: mjson: remove MJSON_ENABLE_NEXT code
- CLEANUP: mjson: remove MJSON_ENABLE_BASE64 code
- CLEANUP: mjson: remove unused defines and math.h
- BUG/MINOR: http-ana: Reset analyse_exp date after 'wait-for-body' action
- CLEANUP: mjson: remove unused defines from mjson.h
- BUG/MINOR: acme: avoid overflow when diff > notAfter
- DEV: patchbot: use git reset+checkout instead of pull
- MINOR: proxy: explicitly permit abortonclose on frontends and clarify the doc
- REGTESTS: fix h2_desync_attacks to wait for the response
- REGTESTS: http-messaging: fix the websocket and upgrade tests not to close early
- MINOR: proxy: only check abortonclose through a dedicated function
- MAJOR: proxy: enable abortonclose by default on HTTP proxies
- MINOR: proxy: introduce proxy_abrt_close_def() to pass the desired default
- MAJOR: proxy: enable abortonclose by default on TLS listeners
- MINOR: h3/qmux: Set QC_SF_UNKNOWN_PL_LENGTH flag on QCS when headers are sent
- MINOR: stconn: Add two fields in sedesc to replace the HTX extra value
- MINOR: h1-htx: Increment body len when parsing a payload with no xfer length
- MINOR: mux-h1: Set known input payload length during demux
- MINOR: mux-fcgi: Set known input payload length during demux
- MINOR: mux-h2: Use <body_len> H2S field for payload without content-length
- MINOR: mux-h2: Set known input payload length of the sedesc
- MINOR: h3: Set known input payload length of the sedesc
- MINOR: stconn: Move data from kip to kop when data are sent to the consumer
- MINOR: filters: Reset knwon input payload length if a data filter is used
- MINOR: hlua/http-fetch: Use <kip> instead of HTX extra field to get body size
- MINOR: cache: Use the <kip> value to check too big objects
- MINOR: compression: Use the <kip> value to check body size
- MEDIUM: mux-h1: Stop to use HTX extra value when formatting message
- MEDIUM: htx: Remove the HTX extra field
- MEDIUM: acme: don't insert acme account key in ckchs_tree
- BUG/MINOR: acme: memory leak from the config parser
- CI: cirrus-ci: bump FreeBSD image to 14-3
- BUG/MEDIUM: ssl: take care of second client hello
- BUG/MINOR: ssl: always clear the remains of the first hello for the second one
- BUG/MEDIUM: stconn: Properly forward kip to the opposite SE descriptor
- MEDIUM: applet: Forward <kip> to applets
- DEBUG: mux-h1: Dump <kip> and <kop> values with sedesc info
- BUG/MINOR: ssl: leak in ssl-f-use
- BUG/MINOR: ssl: leak crtlist_name in ssl-f-use
- BUILD: makefile: disable tail calls optimizations with memory profiling
- BUG/MEDIUM: apppet: Improve spinning loop detection with the new API
- BUG/MINOR: ssl: Free global_ssl structure contents during deinit
- BUG/MINOR: ssl: Free key_base from global_ssl structure during deinit
- MEDIUM: jwt: Remove certificate support in jwt_verify converter
- MINOR: jwt: Add new jwt_verify_cert converter
- MINOR: jwt: Do not look into ckch_store for jwt_verify converter
- MINOR: jwt: Add new "jwt" certificate option
- MINOR: jwt: Add specific error code for known but unavailable certificate
- DOC: jwt: Add doc about "jwt_verify_cert" converter
- MINOR: ssl: Dump options in "show ssl cert"
- MINOR: jwt: Add new "add/del/show ssl jwt" CLI commands
- REGTEST: jwt: Test new CLI commands
- BUG/MINOR: ssl: Potential NULL deref in trace macro
- MINOR: regex: use a thread-local match pointer for pcre2
- BUG/MEDIUM: pools: fix bad freeing of aligned pools in UAF mode
- MEDIUM: pools: detect() when munmap() fails in UAF mode
- TESTS: quic: useless param for b_quic_dec_int()
- BUG/MEDIUM: pools: fix crash on filtered "show pools" output
- BUG/MINOR: pools: don't report "limited to the first X entries" by default
- BUG/MAJOR: lb-chash: fix key calculation when using default hash-key id
- BUG/MEDIUM: stick-tables: Don't forget to dec count on failure.
- BUG/MINOR: quic: check applet_putchk() for 'show quic' first line
- TESTS: quic: fix uninit of quic_cc_path const member
- BUILD: ssl: can't build when using -DLISTEN_DEFAULT_CIPHERS
- BUG/MAJOR: quic: uninitialized quic_conn_closed struct members
- BUG/MAJOR: quic: do not reset QUIC backends fds in closing state
- BUG/MINOR: quic: SSL counters not handled
- DOC: clarify the experimental status for certain features
- MINOR: config: remove experimental status on tune.disable-fast-forward
- MINOR: tree-wide: add missing TAINTED flags for some experimental directives
- MEDIUM: config: warn when expose-experimental-directives is used for no reason
- BUG/MEDIUM: threads/config: drop absent threads from thread groups
- REGTESTS: remove experimental from quic/retry.vtc
2025/10/03 : 3.3-dev9
- BUG/MINOR: acl: Fix error message about several '-m' parameters
- MINOR: server: Parse sni and pool-conn-name expressions in a dedicated function
- BUG/MEDIUM: server: Use sni as pool connection name for SSL server only
- BUG/MINOR: server: Update healthcheck when server settings are changed via CLI
- OPTIM: backend: Don't set SNI for non-ssl connections
- OPTIM: proto_rhttp: Don't set SNI for non-ssl connections
- OPTIM: tcpcheck: Don't set SNI and ALPN for non-ssl connections
- BUG/MINOR: tcpcheck: Don't use sni as pool-conn-name for non-SSL connections
- MEDIUM: server/ssl: Base the SNI value to the HTTP host header by default
- MEDIUM: httpcheck/ssl: Base the SNI value on the HTTP host header by default
- OPTIM: tcpcheck: Reorder tcpchek_connect structure fields to fill holes
- REGTESTS: ssl: Add a script to test the automatic SNI selection
- MINOR: quic: add useful trace about padding params values
- BUG/MINOR: quic: too short PADDING frame for too short packets
- BUG/MINOR: cpu_topo: work around a small bug in musl's CPU_ISSET()
- BUG/MEDIUM: ssl: Properly initialize msg_controllen.
- MINOR: quic: SSL session reuse for QUIC
- BUG/MEDIUM: proxy: fix crash with stop_proxy() called during init
- MINOR: stats-file: use explicit unsigned integer bitshift for user slots
- CLEANUP: quic: fix typo in quic_tx trace
- TESTS: quic: add unit-tests for QUIC TX part
- MINOR: quic: restore QUIC_HP_SAMPLE_LEN constant
- REGTESTS: ssl: Fix the script about automatic SNI selection
- BUG/MINOR: pools: Fix the dump of pools info to deal with buffers limitations
- MINOR: pools: Don't dump anymore info about pools when purge is forced
- BUG/MINOR: quic: properly support GSO on backend side
- BUG/MEDIUM: mux-h2: Reset MUX blocking flags when a send error is caught
- BUG/MEDIUM: mux-h2; Don't block reveives in H2_CS_ERROR and H2_CS_ERROR2 states
- BUG/MEDIUM: mux-h2: Restart reading when mbuf ring is no longer full
- BUG/MINOR: mux-h2: Remove H2_CF_DEM_DFULL flags when the demux buffer is reset
- BUG/MEDIUM: mux-h2: Report RST/error to app-layer stream during 0-copy fwding
- BUG/MEDIUM: mux-h2: Reinforce conditions to report an error to app-layer stream
- BUG/MINOR: hq-interop: adjust parsing/encoding on backend side
- OPTIM: check: do not delay MUX for ALPN if SSL not active
- BUG/MEDIUM: checks: fix ALPN inheritance from server
- BUG/MINOR: check: ensure checks are compatible with QUIC servers
- MINOR: check: reject invalid check config on a QUIC server
- MINOR: debug: report the process id in warnings and panics
- DEBUG: stream: count the number of passes in the connect loop
- MINOR: debug: report the number of loops and ctxsw for each thread
- MINOR: debug: report the time since last wakeup and call
- DEBUG: peers: export functions that use locks
- MINOR: stick-table: permit stksess_new() to temporarily allocate more entries
- MEDIUM: stick-tables: relax stktable_trash_oldest() to only purge what is needed
- MEDIUM: stick-tables: give up on lock contention in process_table_expire()
- MEDIUM: stick-tables: don't wait indefinitely in stktable_add_pend_updates()
- MEDIUM: peers: don't even try to process updates under contention
- BUG/MEDIUM: h1: Allow reception if we have early data
- BUG/MEDIUM: ssl: create the mux immediately on early data
- MINOR: ssl: Add a flag to let it known we have an ALPN negociated
- MINOR: ssl: Use the new flag to know when the ALPN has been set.
- MEDIUM: server: Introduce the concept of path parameters
- CLEANUP: backend: clarify the role of the init_mux variable in connect_server()
- CLEANUP: backend: invert the condition to start the mux in connect_server()
- CLEANUP: backend: simplify the complex ifdef related to 0RTT in connect_server()
- CLEANUP: backend: clarify the cases where we want to use early data
- MEDIUM: server: Make use of the stored ALPN stored in the server
- BUILD: ssl: address a recent build warning when QUIC is enabled
- BUG/MINOR: activity: fix reporting of task latency
- MINOR: activity: indicate the number of calls on "show tasks"
- MINOR: tools: don't emit "+0" for symbol names which exactly match known ones
- BUG/MEDIUM: stick-tables: don't loop on non-expirable entries
- DEBUG: stick-tables: export stktable_add_pend_updates() for better reporting
- BUG/MEDIUM: ssl: Fix a crash when using QUIC
- BUG/MEDIUM: ssl: Fix a crash if we failed to create the mux
- MEDIUM: dns: bind the nameserver sockets to the initiating thread
- MEDIUM: resolvers: make the process_resolvers() task single-threaded
- BUG/MINOR: stick-table: make sure never to miss a process_table_expire update
- MEDIUM: stick-table: move process_table_expire() to a single thread
- MEDIUM: peers: move process_peer_sync() to a single thread
- BUG/MAJOR: stream: Force channel analysis on successful synchronous send
- MINOR: quic: get rid of ->target quic_conn struct member
- MINOR: quic-be: make SSL/QUIC objects use their own indexes (ssl_qc_app_data_index)
- MINOR: quic: display build warning for compat layer on recent OpenSSL
- DOC: quic: clarifies limited-quic support
- BUG/MINOR: acme: null pointer dereference upon allocation failure
- BUG/MEDIUM: jws: return size_t in JWS functions
- BUG/MINOR: ssl: Potential NULL deref in trace macro
- BUG/MINOR: ssl: Fix potential NULL deref in trace callback
- BUG/MINOR: ocsp: prototype inconsistency
- MINOR: ocsp: put internal functions as static ones
- MINOR: ssl: set functions as static when no protypes in the .h
- BUILD: ssl: functions defined but not used
- BUG/MEDIUM: resolvers: Properly cache do-resolv resolution
- BUG/MINOR: resolvers: Restore round-robin selection on records in DNS answers
- MINOR: activity: don't report the lat_tot column for show profiling tasks
- MINOR: activity: add a new lkw_avg column to show profiling stats
- MINOR: activity: collect time spent waiting on a lock for each task
- MINOR: thread: add a lock level information in the thread_ctx
- MINOR: activity: add a new lkd_avg column to show profiling stats
- MINOR: activity: collect time spent with a lock held for each task
- MINOR: activity: add a new mem_avg column to show profiling stats
- MINOR: activity: collect CPU time spent on memory allocations for each task
- MINOR: activity/memory: count allocations performed under a lock
- DOC: proxy-protocol: Add TLS group and sig scheme TLVs
- BUG/MEDIUM: resolvers: Test for empty tree when getting a record from DNS answer
- BUG/MEDIUM: resolvers: Make resolution owns its hostname_dn value
- BUG/MEDIUM: resolvers: Accept to create resolution without hostname
- BUG/MEDIUM: resolvers: Wake resolver task up whne unlinking a stream requester
- BUG/MINOR: ocsp: Crash when updating CA during ocsp updates
- Revert "BUG/MINOR: ocsp: Crash when updating CA during ocsp updates"
- BUG/MEDIUM: http_ana: fix potential NULL deref in http_process_req_common()
- MEDIUM: log/proxy: store log-steps selection using a bitmask, not an eb tree
- BUG/MINOR: ocsp: Crash when updating CA during ocsp updates
- BUG/MINOR: resolvers: always normalize FQDN from response
- BUILD: makefile: implement support for running a command in range
- IMPORT: cebtree: import version 0.5.0 to support duplicates
- MEDIUM: migrate the patterns reference to cebs_tree
- MEDIUM: guid: switch guid to more compact cebuis_tree
- MEDIUM: server: switch addr_node to cebis_tree
- MEDIUM: server: switch conf.name to cebis_tree
- MEDIUM: server: switch the host_dn member to cebis_tree
- MEDIUM: proxy: switch conf.name to cebis_tree
- MEDIUM: stktable: index table names using compact trees
- MINOR: proxy: add proxy_get_next_id() to find next free proxy ID
- MINOR: listener: add listener_get_next_id() to find next free listener ID
- MINOR: server: add server_get_next_id() to find next free server ID
- CLEANUP: server: use server_find_by_id() when looking for already used IDs
- MINOR: server: add server_index_id() to index a server by its ID
- MINOR: listener: add listener_index_id() to index a listener by its ID
- MINOR: proxy: add proxy_index_id() to index a proxy by its ID
- MEDIUM: proxy: index proxy ID using compact trees
- MEDIUM: listener: index listener ID using compact trees
- MEDIUM: server: index server ID using compact trees
- CLEANUP: server: slightly reorder fields in the struct to plug holes
- CLEANUP: proxy: slightly reorganize fields to plug some holes
- CLEANUP: backend: factor the connection lookup loop
- CLEANUP: server: use eb64_entry() not ebmb_entry() to convert an eb64
- MINOR: server: pass the server and thread to srv_migrate_conns_to_remove()
- CLEANUP: backend: use a single variable for removed in srv_cleanup_idle_conns()
- MINOR: connection: pass the thread number to conn_delete_from_tree()
- MEDIUM: connection: move idle connection trees to ceb64
- MEDIUM: connection: reintegrate conn_hash_node into connection
- CLEANUP: tools: use the item API for the file names tree
- CLEANUP: vars: use the item API for the variables trees
- BUG/MEDIUM: pattern: fix possible infinite loops on deletion
- CI: scripts: add support for git in openssl builds
- CI: github: add an OpenSSL + ECH job
- CI: scripts: mkdir BUILDSSL_TMPDIR
- Revert "BUG/MEDIUM: pattern: fix possible infinite loops on deletion"
- BUG/MEDIUM: pattern: fix possible infinite loops on deletion (try 2)
- CLEANUP: log: remove deadcode in px_parse_log_steps()
- MINOR: counters: document that tg shared counters are tied to shm-stats-file mapping
- DOC: internals: document the shm-stats-file format/mapping
- IMPORT: ebtree: delete unusable ebpttree.c
- IMPORT: eb32/eb64: reorder the lookup loop for modern CPUs
- IMPORT: eb32/eb64: use a more parallelizable check for lack of common bits
- IMPORT: eb32: drop the now useless node_bit variable
- IMPORT: eb32/eb64: place an unlikely() on the leaf test
- IMPORT: ebmb: optimize the lookup for modern CPUs
- IMPORT: eb32/64: optimize insert for modern CPUs
- IMPORT: ebtree: only use __builtin_prefetch() when supported
- IMPORT: ebst: use prefetching in lookup() and insert()
- IMPORT: ebtree: Fix UB from clz(0)
- IMPORT: ebtree: add a definition of offsetof()
- IMPORT: ebtree: replace hand-rolled offsetof to avoid UB
- MINOR: listener: add the "cc" bind keyword to set the TCP congestion controller
- MINOR: server: add the "cc" keyword to set the TCP congestion controller
- BUG/MEDIUM: ring: invert the length check to avoid an int overflow
- MINOR: trace: don't call strlen() on the thread-id numeric encoding
- MINOR: trace: don't call strlen() on the function's name
- OPTIM: sink: reduce contention on sink_announce_dropped()
- OPTIM: sink: don't waste time calling sink_announce_dropped() if busy
- CLEANUP: ring: rearrange the wait loop in ring_write()
- OPTIM: ring: always relax in the ring lock and leader wait loop
- OPTIM: ring: check the queue's owner using a CAS on x86
- OPTIM: ring: avoid reloading the tail_ofs value before the CAS in ring_write()
- BUG/MEDIUM: sink: fix unexpected double postinit of sink backend
- MEDIUM: stats: consider that shared stats pointers may be NULL
- BUG/MEDIUM: http-client: Fix the test on the response start-line
- MINOR: acme: acme-vars allow to pass data to the dpapi sink
- MINOR: acme: check acme-vars allocation during escaping
- BUG/MINOR: acme/cli: wrong description for "acme challenge_ready"
- CI: move VTest preparation & friends to dedicated composite action
- BUG/MEDIUM: stick-tables: Don't let table_process_entry() handle refcnt
- BUG/MINOR: compression: Test payload size only if content-length is specified
- BUG/MINOR: pattern: Properly flag virtual maps as using samples
- BUG/MINOR: acme: possible overflow on scheduling computation
- BUG/MINOR: acme: possible overflow in acme_will_expire()
- CLEANUP: acme: acme_will_expire() uses acme_schedule_date()
- BUG/MINOR: pattern: Fix pattern lookup for map with opt@ prefix
- CI: scripts: build curl with ECH support
- CI: github: add curl+ech build into openssl-ech job
- BUG/MEDIUM: ssl: ca-file directory mode must read every certificates of a file
- MINOR: acme: provider-name for dpapi sink
- BUILD: acme: fix false positive null pointer dereference
- MINOR: backend: srv_queue helper
- MINOR: backend: srv_is_up converter
- BUILD: halog: misleading indentation in halog.c
- CI: github: build halog on the vtest job
- BUG/MINOR: acme: don't unlink from acme_ctx_destroy()
- BUG/MEDIUM: acme: cfg_postsection_acme() don't init correctly acme sections
- MINOR: acme: implement "reuse-key" option
- ADMIN: haproxy-dump-certs: implement a certificate dumper
- ADMIN: dump-certs: don't update the file if it's up to date
- ADMIN: dump-certs: create files in a tmpdir
- ADMIN: dump-certs: fix lack of / in -p
- ADMIN: dump-certs: use same error format as haproxy
- ADMIN: reload: add a synchronous reload helper
- BUG/MEDIUM: acme: free() of i2d_X509_REQ() with AWS-LC
- ADMIN: reload: introduce verbose and silent mode
- ADMIN: reload: introduce -vv mode
- MINOR: mt_list: Implement MT_LIST_POP_LOCKED()
- BUG/MEDIUM: stick-tables: Make sure not to free a pending entry
- MINOR: sched: let's permit to share the local ctx between threads
- MINOR: sched: pass the thread number to is_sched_alive()
- BUG/MEDIUM: wdt: improve stuck task detection accuracy
- MINOR: ssl: add the ssl_bc_sni sample fetch function to retrieve backend SNI
- MINOR: rawsock: introduce CO_RFL_TRY_HARDER to detect closures on complete reads
- MEDIUM: ssl: don't always process pending handshakes on closed connections
- MEDIUM: servers: Schedule the server requeue target on creation
- MEDIUM: fwlc: Make it so fwlc_srv_reposition works with unqueued srv
- BUG/MEDIUM: fwlc: Handle memory allocation failures.
- DOC: config: clarify some known limitations of the json_query() converter
- BUG/CRITICAL: mjson: fix possible DoS when parsing numbers
- BUG/MINOR: h2: forbid 'Z' as well in header field names checks
- BUG/MINOR: h3: forbid 'Z' as well in header field names checks
- BUG/MEDIUM: resolvers: break an infinite loop in resolv_get_ip_from_response()
2025/09/05 : 3.3-dev8
- BUG/MEDIUM: mux-h2: fix crash on idle-ping due to unwanted ABORT_NOW
- BUG/MINOR: quic-be: missing Initial packet number space discarding
- BUG/MEDIUM: quic-be: crash after backend CID allocation failures
- BUG/MEDIUM: ssl: apply ssl-f-use on every "ssl" bind
- BUG/MAJOR: stream: Remove READ/WRITE events on channels after analysers eval
- MINOR: dns: dns_connect_nameserver: fix fd leak at error path
- BUG/MEDIUM: quic: reset padding when building GSO datagrams
- BUG/MINOR: quic: do not emit probe data if CONNECTION_CLOSE requested
- BUG/MAJOR: quic: fix INITIAL padding with probing packet only
- BUG/MINOR: quic: don't coalesce probing and ACK packet of same type
- MINOR: quic: centralize padding for HP sampling on packet building
- MINOR: http_ana: fix typo in http_res_get_intercept_rule
- BUG/MEDIUM: http_ana: handle yield for "stats http-request" evaluation
- MINOR: applet: Rely on applet flag to detect the new api
- MINOR: applet: Add function to test applet flags from the appctx
- MINOR: applet: Add a flag to know an applet is using HTX buffers
- MINOR: applet: Make some applet functions HTX aware
- MEDIUM: applet: Set .rcv_buf and .snd_buf functions on default ones if not set
- BUG/MEDIUM: mux-spop: Reject connection attempts from a non-spop frontend
- REGTESTS: jwt: create dynamically "cert.ecdsa.pem"
- BUG/MEDIUM: spoe: Improve error detection in SPOE applet on client abort
- MINOR: haproxy: abort config parsing on fatal errors for post parsing hooks
- MEDIUM: server: split srv_init() in srv_preinit() + srv_postinit()
- MINOR: proxy: handle shared listener counters preparation from proxy_postcheck()
- DOC: configuration: reword 'generate-certificates'
- BUG/MEDIUM: quic-be: avoid crashes when releasing Initial pktns
- BUG/MINOR: quic: reorder fragmented RX CRYPTO frames by their offsets
- MINOR: ssl: diagnostic warning when both 'default-crt' and 'strict-sni' are used
- MEDIUM: ssl: convert diag to warning for strict-sni + default-crt
- DOC: configuration: clarify 'default-crt' and implicit default certificates
- MINOR: quic: remove ->offset qf_crypto struct field
- BUG/MINOR: mux-quic: trace with non initialized qcc
- BUG/MINOR: acl: set arg_list->kw to aclkw->kw string literal if aclkw is found
- BUG/MEDIUM: mworker: fix startup and reload on macOS
- BUG/MINOR: connection: rearrange union list members
- BUG/MINOR: connection: remove extra session_unown_conn() on reverse
- MINOR: cli: display failure reason on wait command
- BUG/MINOR: server: decrement session idle_conns on del server
- BUG/MINOR: mux-quic: do not access conn after idle list insert
- MINOR: session: document explicitely that session_add_conn() is safe
- MINOR: session: uninline functions related to BE conns management
- MINOR: session: refactor alloc/lookup of sess_conns elements
- MEDIUM: session: protect sess conns list by idle_conns_lock
- MINOR: server: shard by thread sess_conns member
- MEDIUM: server: close new idle conns if server in maintenance
- MEDIUM: session: close new idle conns if server in maintenance
- MINOR: server: cleanup idle conns for server in maint already stopped
- MINOR: muxes: enforce thread-safety for private idle conns
- MEDIUM: conn/muxes/ssl: reinsert BE priv conn into sess on IO completion
- MEDIUM: conn/muxes/ssl: remove BE priv idle conn from sess on IO
- MEDIUM: mux-quic: enforce thread-safety of backend idle conns
- MAJOR: server: implement purging of private idle connections
- MEDIUM: session: account on server idle conns attached to session
- MAJOR: server: do not remove idle conns in del server
- BUILD: mworker: fix ignoring return value of read
- DOC: unreliable sockpair@ on macOS
- MINOR: muxes: adjust takeover with buf_wait interaction
- OPTIM: backend: set release on takeover for strict maxconn
- DOC: configuration: confuse "strict-mode" with "zero-warning"
- MINOR: doc: add missing statistics column
- MINOR: doc: add missing statistics column
- MINOR: stats: display new curr_sess_idle_conns server counter
- MINOR: proxy: extend "show servers conn" output
- MEDIUM: proxy: Reject some header names for 'http-send-name-header' directive
- BUG/BUILD: stats: fix build due to missing stat enum definition
- DOC: proxy-protocol: Make example for PP2_SUBTYPE_SSL_SIG_ALG accurate
- CLEANUP: quic: remove a useless CRYPTO frame variable assignment
- BUG/MEDIUM: quic: CRYPTO frame freeing without eb_delete()
- BUG/MAJOR: mux-quic: fix crash on reload during emission
- MINOR: conn/muxes/ssl: add ASSUME_NONNULL() prior to _srv_add_idle
- REG-TESTS: map_redirect: Don't use hdr_dom in ACLs with "-m end" matching method
- MINOR: acl: Only allow one '-m' matching method
- MINOR: acl; Warn when matching method based on a suffix is overwritten
- BUG/MEDIUM: server: Duplicate healthcheck's alpn inherited from default server
- BUG/MINOR: server: Duplicate healthcheck's sni inherited from default server
- BUG/MINOR: acl: Properly detect overwritten matching method
- BUG/MINOR: halog: Add OOM checks for calloc() in filter_count_srv_status() and filter_count_url()
- BUG/MINOR: log: Add OOM checks for calloc() and malloc() in logformat parser and dup_logger()
- BUG/MINOR: acl: Add OOM check for calloc() in smp_fetch_acl_parse()
- BUG/MINOR: cfgparse: Add OOM check for calloc() in cfg_parse_listen()
- BUG/MINOR: compression: Add OOM check for calloc() in parse_compression_options()
- BUG/MINOR: tools: Add OOM check for malloc() in indent_msg()
- BUG/MINOR: quic: ignore AGAIN ncbuf err when parsing CRYPTO frames
- MINOR: quic/flags: complete missing flags
- BUG/MINOR: quic: fix room check if padding requested
- BUG/MINOR: quic: fix padding issue on INITIAL retransmit
- BUG/MINOR: quic: pad Initial pkt with CONNECTION_CLOSE on client
- MEDIUM: quic: strengthen BUG_ON() for unpad Initial packet on client
- DOC: configuration: rework the jwt_verify keyword documentation
- BUG/MINOR: haproxy: be sure not to quit too early on soft stop
- BUILD: acl: silence a possible null deref warning in parse_acl_expr()
- MINOR: quic: Add more information about RX packets
- CI: fix syntax of Quic Interop pipelines
- MEDIUM: cfgparse: warn when using user/group when built statically
- BUG/MEDIUM: stick-tables: don't leave the expire loop with elements deleted
- BUG/MINOR: stick-tables: never leave used entries without expiration
- BUG/MEDIUM: peers: don't fail twice to grab the update lock
- MINOR: stick-tables: limit the number of visited nodes during expiration
- OPTIM: stick-tables: exit expiry faster when the update lock is held
- MINOR: counters: retrieve detailed errmsg upon failure with counters_{fe,be}_shared_prepare()
- MINOR: stats-file: introduce shm-stats-file directive
- MEDIUM: stats-file: processes share the same clock source from shm-stats-file
- MINOR: stats-file: add process slot management for shm stats file
- MEDIUM: stats-file/counters: store and preload stats counters as shm file objects
- DOC: config: document "shm-stats-file" directive
- OPTIM: stats-file: don't unnecessarily die hard on shm_stats_file_reuse_object()
- MINOR: compiler: add ALWAYS_PAD() macro
- BUILD: stats-file: fix aligment issues
- MINOR: stats-file: reserve some bytes in exported structs
- MEDIUM: stats-file: add some BUG_ON() guards to ensure exported structs are not changed by accident
- BUG/MINOR: check: ensure check-reuse is compatible with SSL
- BUG/MINOR: check: fix dst address when reusing a connection
- REGTESTS: explicitly use "balance roundrobin" where RR is needed
- MAJOR: backend: switch the default balancing algo to "random"
- BUG/MEDIUM: conn: fix UAF on connection after reversal on edge
- BUG/MINOR: connection: streamline conn detach from lists
- BUG/MEDIUM: quic-be: too early SSL_SESSION initialization
- BUG/MINOR: log: fix potential memory leak upon error in add_to_logformat_list()
- MEDIUM: init: always warn when running as root without being asked to
- MINOR: sample: Add base2 converter
- MINOR: version: add -vq, -vqb, and -vqs flags for concise version output
- BUILD: trace: silence a bogus build warning at -Og
- MINOR: trace: accept trace spec right after "-dt" on the command line
- BUILD: makefile: bump the default minimum linux version to 4.17
2025/08/20 : 3.3-dev7
- MINOR: quic: duplicate GSO unsupp status from listener to conn
- MINOR: quic: define QUIC_FL_CONN_IS_BACK flag
- MINOR: quic: prefer qc_is_back() usage over qc->target
- BUG/MINOR: cfgparse: immediately stop after hard error in srv_init()
- BUG/MINOR: cfgparse-listen: update err_code for fatal error on proxy directive
- BUG/MINOR: proxy: avoid NULL-deref in post_section_px_cleanup()
- MINOR: guid: add guid_get() helper
- MINOR: guid: add guid_count() function
- MINOR: clock: add clock_set_now_offset() helper
- MINOR: clock: add clock_get_now_offset() helper
- MINOR: init: add REGISTER_POST_DEINIT_MASTER() hook
- BUILD: restore USE_SHM_OPEN build option
- BUG/MINOR: stick-table: cap sticky counter idx with tune.nb_stk_ctr instead of MAX_SESS_STKCTR
- MINOR: sock: update broken accept4 detection for older hardwares.
- CI: vtest: add os name to OT cache key
- CI: vtest: add Ubuntu arm64 builds
- BUG/MEDIUM: ssl: Fix 0rtt to the server
- BUG/MEDIUM: ssl: fix build with AWS-LC
- MEDIUM: acme: use lowercase for challenge names in configuration
- BUG/MINOR: init: Initialize random seed earlier in the init process
- DOC: management: clarify usage of -V with -c
- MEDIUM: ssl/cli: relax crt insertion in crt-list of type directory
- MINOR: tools: implement ha_aligned_zalloc()
- CLEANUP: fd: make use of ha_aligned_alloc() for the fdtab
- MINOR: pools: distinguish the requested alignment from the type-specific one
- MINOR: pools: permit to optionally specify extra size and alignment
- MINOR: pools: always check that requested alignment matches the type's
- DOC: api: update the pools API with the alignment and typed declarations
- MEDIUM: tree-wide: replace most DECLARE_POOL with DECLARE_TYPED_POOL
- OPTIM: tasks: align task and tasklet pools to 64
- OPTIM: buffers: align the buffer pool to 64
- OPTIM: queue: align the pendconn pools to 64
- OPTIM: connection: align connection pools to 64
- OPTIM: server: start to use aligned allocs in server
- DOC: management: fix typo in commit f4f93c56
- DOC: config: recommend single quoting passwords
- MINOR: tools: also implement ha_aligned_alloc_typed()
- MEDIUM: server: introduce srv_alloc()/srv_free() to alloc/free a server
- MINOR: server: align server struct to 64 bytes
- MEDIUM: ring: always allocate properly aligned ring structures
- CI: Update to actions/checkout@v5
- MINOR: quic: implement qc_ssl_do_hanshake()
- BUG/MEDIUM: quic: listener connection stuck during handshakes (OpenSSL 3.5)
- BUG/MINOR: mux-h1: fix wrong lock label
- MEDIUM: dns: don't call connect to dest socket for AF_INET*
- BUG/MINOR: spoe: Properly detect and skip empty NOTIFY frames
- BUG/MEDIUM: cli: Report inbuf is no longer full when a line is consumed
- BUG/MEDIUM: quic: crash after quic_conn allocation failures
- BUG/MEDIUM: quic-be: do not initialize ->conn too early
- BUG/MEDIUM: mworker: more verbose error upon loading failure
- MINOR: xprt: Add recvmsg() and sendmsg() parameters to rcv_buf() and snd_buf().
- MINOR: ssl: Add a "flags" field to ssl_sock_ctx.
- MEDIUM: xprt: Add a "get_capability" method.
- MEDIUM: mux_h1/mux_pt: Use XPRT_CAN_SPLICE to decide if we should splice
- MINOR: cfgparse: Add a new "ktls" option to bind and server.
- MINOR: ssl: Define HAVE_VANILLA_OPENSSL if openssl is used.
- MINOR: build: Add a new option, USE_KTLS.
- MEDIUM: ssl: Add kTLS support for OpenSSL.
- MEDIUM: splice: Don't consider EINVAL to be a fatal error
- MEDIUM: ssl: Add splicing with SSL.
- MEDIUM: ssl: Add ktls support for AWS-LC.
- MEDIUM: ssl: Add support for ktls on TLS 1.3 with AWS-LC
- MEDIUM: ssl: Handle non-Application data record with AWS-LC
- MINOR: ssl: Add a way to globally disable ktls.
2025/08/06 : 3.3-dev6
- MINOR: acme: implement traces
- BUG/MINOR: hlua: take default-path into account with lua-load-per-thread

22
INSTALL
View File

@ -111,7 +111,7 @@ HAProxy requires a working GCC or Clang toolchain and GNU make :
may want to retry with "gmake" which is the name commonly used for GNU make
on BSD systems.
- GCC >= 4.7 (up to 15 tested). Older versions are no longer supported due to
- GCC >= 4.7 (up to 14 tested). Older versions are no longer supported due to
the latest mt_list update which only uses c11-like atomics. Newer versions
may sometimes break due to compiler regressions or behaviour changes. The
version shipped with your operating system is very likely to work with no
@ -237,7 +237,7 @@ to forcefully enable it using "USE_LIBCRYPT=1".
-----------------
For SSL/TLS, it is necessary to use a cryptography library. HAProxy currently
supports the OpenSSL library, and is known to build and work with branches
1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, and 3.0 to 3.6. It is recommended to use
1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, and 3.0 to 3.5. It is recommended to use
at least OpenSSL 1.1.1 to have support for all SSL keywords and configuration
in HAProxy. OpenSSL follows a long-term support cycle similar to HAProxy's,
and each of the branches above receives its own fixes, without forcing you to
@ -259,15 +259,11 @@ reported to work as well. While there are some efforts from the community to
ensure they work well, OpenSSL remains the primary target and this means that
in case of conflicting choices, OpenSSL support will be favored over other
options. Note that QUIC is not fully supported when haproxy is built with
OpenSSL < 3.5.2 version. In this case, QUICTLS or AWS-LC are the preferred
alternatives. As of writing this, the QuicTLS project follows OpenSSL very
closely and provides update simultaneously, but being a volunteer-driven
project, its long-term future does not look certain enough to convince
operating systems to package it, so it needs to be build locally. Recent
versions of AWS-LC (>= 1.22 and the FIPS branches) are pretty complete and
generally more performant than other OpenSSL derivatives, but may behave
slightly differently, particularly when dealing with outdated setups. See
the section about QUIC in this document.
OpenSSL < 3.5 version. In this case, QUICTLS is the preferred alternative.
As of writing this, the QuicTLS project follows OpenSSL very closely and provides
update simultaneously, but being a volunteer-driven project, its long-term future
does not look certain enough to convince operating systems to package it, so it
needs to be build locally. See the section about QUIC in this document.
A fifth option is wolfSSL (https://github.com/wolfSSL/wolfssl). It is the only
supported alternative stack not based on OpenSSL, yet which implements almost
@ -563,9 +559,9 @@ It goes into more details with the main options.
To build haproxy, you have to choose your target OS amongst the following ones
and assign it to the TARGET variable :
- linux-glibc for Linux kernel 4.17 and above
- linux-glibc for Linux kernel 2.6.28 and above
- linux-glibc-legacy for Linux kernel 2.6.28 and above without new features
- linux-musl for Linux kernel 4.17 and above with musl libc
- linux-musl for Linux kernel 2.6.28 and above with musl libc
- solaris for Solaris 10 and above
- freebsd for FreeBSD 10 and above
- dragonfly for DragonFlyBSD 4.3 and above

View File

@ -35,7 +35,6 @@
# USE_OPENSSL : enable use of OpenSSL. Recommended, but see below.
# USE_OPENSSL_AWSLC : enable use of AWS-LC
# USE_OPENSSL_WOLFSSL : enable use of wolfSSL with the OpenSSL API
# USE_ECH : enable use of ECH with the OpenSSL API
# USE_QUIC : enable use of QUIC with the quictls API (quictls, libressl, boringssl)
# USE_QUIC_OPENSSL_COMPAT : enable use of QUIC with the standard openssl API (limited features)
# USE_ENGINE : enable use of OpenSSL Engine.
@ -63,8 +62,6 @@
# USE_MEMORY_PROFILING : enable the memory profiler. Linux-glibc only.
# USE_LIBATOMIC : force to link with/without libatomic. Automatic.
# USE_PTHREAD_EMULATION : replace pthread's rwlocks with ours
# USE_SHM_OPEN : use shm_open() for features that can make use of shared memory
# USE_KTLS : use kTLS.(requires at least Linux 4.17).
#
# Options can be forced by specifying "USE_xxx=1" or can be disabled by using
# "USE_xxx=" (empty string). The list of enabled and disabled options for a
@ -214,8 +211,7 @@ UNIT_TEST_SCRIPT=./scripts/run-unittests.sh
# undefined behavior to silently produce invalid code. For this reason we have
# to use -fwrapv or -fno-strict-overflow to guarantee the intended behavior.
# It is preferable not to change this option in order to avoid breakage.
STD_CFLAGS := $(call cc-opt-alt,-fwrapv,-fno-strict-overflow) \
$(call cc-opt,-fvect-cost-model=very-cheap)
STD_CFLAGS := $(call cc-opt-alt,-fwrapv,-fno-strict-overflow)
#### Compiler-specific flags to enable certain classes of warnings.
# Some are hard-coded, others are enabled only if supported.
@ -342,16 +338,14 @@ use_opts = USE_EPOLL USE_KQUEUE USE_NETFILTER USE_POLL \
USE_TPROXY USE_LINUX_TPROXY USE_LINUX_CAP \
USE_LINUX_SPLICE USE_LIBCRYPT USE_CRYPT_H USE_ENGINE \
USE_GETADDRINFO USE_OPENSSL USE_OPENSSL_WOLFSSL USE_OPENSSL_AWSLC \
USE_ECH \
USE_SSL USE_LUA USE_ACCEPT4 USE_CLOSEFROM USE_ZLIB USE_SLZ \
USE_CPU_AFFINITY USE_TFO USE_NS USE_DL USE_RT USE_LIBATOMIC \
USE_MATH USE_DEVICEATLAS USE_51DEGREES \
USE_WURFL USE_OBSOLETE_LINKER USE_PRCTL USE_PROCCTL \
USE_THREAD_DUMP USE_EVPORTS USE_OT USE_QUIC USE_PROMEX \
USE_MEMORY_PROFILING USE_SHM_OPEN \
USE_MEMORY_PROFILING \
USE_STATIC_PCRE USE_STATIC_PCRE2 \
USE_PCRE USE_PCRE_JIT USE_PCRE2 USE_PCRE2_JIT \
USE_QUIC_OPENSSL_COMPAT USE_KTLS
USE_PCRE USE_PCRE_JIT USE_PCRE2 USE_PCRE2_JIT USE_QUIC_OPENSSL_COMPAT
# preset all variables for all supported build options among use_opts
$(reset_opts_vars)
@ -382,13 +376,13 @@ ifeq ($(TARGET),haiku)
set_target_defaults = $(call default_opts,USE_POLL USE_TPROXY USE_OBSOLETE_LINKER)
endif
# For linux >= 4.17 and glibc
# For linux >= 2.6.28 and glibc
ifeq ($(TARGET),linux-glibc)
set_target_defaults = $(call default_opts, \
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
USE_GETADDRINFO USE_BACKTRACE USE_SHM_OPEN USE_KTLS)
USE_GETADDRINFO USE_BACKTRACE)
INSTALL = install -v
endif
@ -401,13 +395,13 @@ ifeq ($(TARGET),linux-glibc-legacy)
INSTALL = install -v
endif
# For linux >= 4.17 and musl
# For linux >= 2.6.28 and musl
ifeq ($(TARGET),linux-musl)
set_target_defaults = $(call default_opts, \
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
USE_GETADDRINFO USE_BACKTRACE USE_SHM_OPEN USE_KTLS)
USE_GETADDRINFO USE_BACKTRACE)
INSTALL = install -v
endif
@ -601,10 +595,6 @@ ifneq ($(USE_BACKTRACE:0=),)
BACKTRACE_CFLAGS = -fno-omit-frame-pointer
endif
ifneq ($(USE_MEMORY_PROFILING:0=),)
MEMORY_PROFILING_CFLAGS = -fno-optimize-sibling-calls
endif
ifneq ($(USE_CPU_AFFINITY:0=),)
OPTIONS_OBJS += src/cpuset.o
OPTIONS_OBJS += src/cpu_topo.o
@ -643,7 +633,7 @@ ifneq ($(USE_OPENSSL:0=),)
OPTIONS_OBJS += src/ssl_sock.o src/ssl_ckch.o src/ssl_ocsp.o src/ssl_crtlist.o \
src/ssl_sample.o src/cfgparse-ssl.o src/ssl_gencert.o \
src/ssl_utils.o src/jwt.o src/ssl_clienthello.o src/jws.o src/acme.o \
src/ssl_trace.o src/jwe.o
src/ssl_trace.o
endif
ifneq ($(USE_ENGINE:0=),)
@ -969,15 +959,15 @@ OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \
src/cache.o src/stconn.o src/http_htx.o src/debug.o \
src/check.o src/stats-html.o src/haproxy.o src/listener.o \
src/applet.o src/pattern.o src/cfgparse-listen.o \
src/flt_spoe.o src/cebis_tree.o src/http_ext.o \
src/http_act.o src/http_fetch.o src/cebs_tree.o \
src/cebib_tree.o src/http_client.o src/dns.o \
src/cebb_tree.o src/vars.o src/event_hdl.o src/tcp_rules.o \
src/flt_spoe.o src/cebuis_tree.o src/http_ext.o \
src/http_act.o src/http_fetch.o src/cebus_tree.o \
src/cebuib_tree.o src/http_client.o src/dns.o \
src/cebub_tree.o src/vars.o src/event_hdl.o src/tcp_rules.o \
src/trace.o src/stats-proxy.o src/pool.o src/stats.o \
src/cfgparse-global.o src/filters.o src/mux_pt.o \
src/flt_http_comp.o src/sock.o src/h1.o src/sink.o \
src/ceba_tree.o src/session.o src/payload.o src/htx.o \
src/cebl_tree.o src/ceb32_tree.o src/ceb64_tree.o \
src/cebua_tree.o src/session.o src/payload.o src/htx.o \
src/cebul_tree.o src/cebu32_tree.o src/cebu64_tree.o \
src/server_state.o src/proto_rhttp.o src/flt_trace.o src/fd.o \
src/task.o src/map.o src/fcgi-app.o src/h2.o src/mworker.o \
src/tcp_sample.o src/mjson.o src/h1_htx.o src/tcp_act.o \
@ -992,7 +982,7 @@ OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \
src/cfgcond.o src/proto_udp.o src/lb_fwlc.o src/ebmbtree.o \
src/proto_uxdg.o src/cfgdiag.o src/sock_unix.o src/sha1.o \
src/lb_fas.o src/clock.o src/sock_inet.o src/ev_select.o \
src/lb_map.o src/shctx.o src/hpack-dec.o src/net_helper.o \
src/lb_map.o src/shctx.o src/hpack-dec.o \
src/arg.o src/signal.o src/fix.o src/dynbuf.o src/guid.o \
src/cfgparse-tcp.o src/lb_ss.o src/chunk.o src/counters.o \
src/cfgparse-unix.o src/regex.o src/fcgi.o src/uri_auth.o \
@ -1002,7 +992,7 @@ OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \
src/ebsttree.o src/freq_ctr.o src/systemd.o src/init.o \
src/http_acl.o src/dict.o src/dgram.o src/pipe.o \
src/hpack-huff.o src/hpack-enc.o src/ebtree.o src/hash.o \
src/httpclient_cli.o src/version.o src/ncbmbuf.o src/ech.o
src/httpclient_cli.o src/version.o
ifneq ($(TRACE),)
OBJS += src/calltrace.o
@ -1123,11 +1113,6 @@ install-doc:
$(INSTALL) -m 644 doc/$$x.txt "$(DESTDIR)$(DOCDIR)" ; \
done
install-admin:
$(Q)$(INSTALL) -d "$(DESTDIR)$(SBINDIR)"
$(Q)$(INSTALL) admin/cli/haproxy-dump-certs "$(DESTDIR)$(SBINDIR)"
$(Q)$(INSTALL) admin/cli/haproxy-reload "$(DESTDIR)$(SBINDIR)"
install-bin:
$(Q)for i in haproxy $(EXTRA); do \
if ! [ -e "$$i" ]; then \
@ -1138,7 +1123,7 @@ install-bin:
$(Q)$(INSTALL) -d "$(DESTDIR)$(SBINDIR)"
$(Q)$(INSTALL) haproxy $(EXTRA) "$(DESTDIR)$(SBINDIR)"
install: install-bin install-admin install-man install-doc
install: install-bin install-man install-doc
uninstall:
$(Q)rm -f "$(DESTDIR)$(MANDIR)"/man1/haproxy.1
@ -1295,8 +1280,6 @@ unit-tests:
# options for all commits within RANGE. RANGE may be either a git range
# such as ref1..ref2 or a single commit, in which case all commits from
# the master branch to this one will be tested.
# Will execute TEST_CMD for each commit if defined, and will stop in case of
# failure.
range:
$(Q)[ -d .git/. ] || { echo "## Fatal: \"make $@\" may only be used inside a Git repository."; exit 1; }
@ -1322,7 +1305,6 @@ range:
echo "[ $$index/$$count ] $$commit #############################"; \
git checkout -q $$commit || die 1; \
$(MAKE) all || die 1; \
[ -z "$(TEST_CMD)" ] || $(TEST_CMD) || die 1; \
index=$$((index + 1)); \
done; \
echo;echo "Done! $${count} commit(s) built successfully for RANGE $${RANGE}" ; \

View File

@ -1,2 +1,2 @@
$Format:%ci$
2026/01/07
2025/08/06

View File

@ -1 +1 @@
3.4-dev2
3.3-dev6

View File

@ -242,8 +242,8 @@ void promex_register_module(struct promex_module *m)
}
/* Pools used to allocate ref on Promex modules and filters */
DECLARE_STATIC_TYPED_POOL(pool_head_promex_mod_ref, "promex_module_ref", struct promex_module_ref);
DECLARE_STATIC_TYPED_POOL(pool_head_promex_metric_flt, "promex_metric_filter", struct promex_metric_filter);
DECLARE_STATIC_POOL(pool_head_promex_mod_ref, "promex_module_ref", sizeof(struct promex_module_ref));
DECLARE_STATIC_POOL(pool_head_promex_metric_flt, "promex_metric_filter", sizeof(struct promex_metric_filter));
/* Return the server status. */
enum promex_srv_state promex_srv_status(struct server *sv)
@ -2144,7 +2144,7 @@ static void promex_appctx_handle_io(struct appctx *appctx)
struct applet promex_applet = {
.obj_type = OBJ_TYPE_APPLET,
.flags = APPLET_FL_NEW_API|APPLET_FL_HTX,
.flags = APPLET_FL_NEW_API,
.name = "<PROMEX>", /* used for logging */
.init = promex_appctx_init,
.release = promex_appctx_release,

View File

@ -1,235 +0,0 @@
#!/bin/bash
#
# Dump certificates from the HAProxy stats or master socket to the filesystem
# Experimental script
#
set -e
export BASEPATH=${BASEPATH:-/etc/haproxy}/
export SOCKET=${SOCKET:-/var/run/haproxy-master.sock}
export DRY_RUN=0
export DEBUG=
export VERBOSE=
export M="@1 "
export TMP
vecho() {
[ -n "$VERBOSE" ] && echo "$@"
return 0
}
read_certificate() {
name=$1
crt_filename=
key_filename=
OFS=$IFS
IFS=":"
while read -r key value; do
case "$key" in
"Crt filename")
crt_filename="${value# }"
key_filename="${value# }"
;;
"Key filename")
key_filename="${value# }"
;;
esac
done < <(echo "${M}show ssl cert ${name}" | socat "${SOCKET}" -)
IFS=$OFS
if [ -z "$crt_filename" ] || [ -z "$key_filename" ]; then
return 1
fi
# handle fields without a crt-base/key-base
[ "${crt_filename:0:1}" != "/" ] && crt_filename="${BASEPATH}${crt_filename}"
[ "${key_filename:0:1}" != "/" ] && key_filename="${BASEPATH}${key_filename}"
vecho "name:$name"
vecho "crt:$crt_filename"
vecho "key:$key_filename"
export NAME="$name"
export CRT_FILENAME="$crt_filename"
export KEY_FILENAME="$key_filename"
return 0
}
cmp_certkey() {
prev=$1
new=$2
if [ ! -f "$prev" ]; then
return 1;
fi
if ! cmp -s <(openssl x509 -in "$prev" -noout -fingerprint -sha256) <(openssl x509 -in "$new" -noout -fingerprint -sha256); then
return 1
fi
return 0
}
dump_certificate() {
name=$1
prev_crt=$2
prev_key=$3
r="tmp.${RANDOM}"
d="old.$(date +%s)"
new_crt="$TMP/$(basename "$prev_crt").${r}"
new_key="$TMP/$(basename "$prev_key").${r}"
if ! touch "${new_crt}" || ! touch "${new_key}"; then
echo "[ALERT] ($$) : can't dump \"$name\", can't create tmp files" >&2
return 1
fi
echo "${M}dump ssl cert ${name}" | socat "${SOCKET}" - | openssl pkey >> "${new_key}"
# use crl2pkcs7 as a way to dump multiple x509, storeutl could be used in modern versions of openssl
echo "${M}dump ssl cert ${name}" | socat "${SOCKET}" - | openssl crl2pkcs7 -nocrl -certfile /dev/stdin | openssl pkcs7 -print_certs >> "${new_crt}"
if ! cmp -s <(openssl x509 -in "${new_crt}" -pubkey -noout) <(openssl pkey -in "${new_key}" -pubout); then
echo "[ALERT] ($$) : Private key \"${new_key}\" and public key \"${new_crt}\" don't match" >&2
return 1
fi
if cmp_certkey "${prev_crt}" "${new_crt}"; then
echo "[NOTICE] ($$) : ${crt_filename} is already up to date" >&2
return 0
fi
# dry run will just return before trying to move the files
if [ "${DRY_RUN}" != "0" ]; then
return 0
fi
# move the current certificates to ".old.timestamp"
if [ -f "${prev_crt}" ] && [ -f "${prev_key}" ]; then
mv "${prev_crt}" "${prev_crt}.${d}"
[ "${prev_crt}" != "${prev_key}" ] && mv "${prev_key}" "${prev_key}.${d}"
fi
# move the new certificates to old place
mv "${new_crt}" "${prev_crt}"
[ "${prev_crt}" != "${prev_key}" ] && mv "${new_key}" "${prev_key}"
return 0
}
dump_all_certificates() {
echo "${M}show ssl cert" | socat "${SOCKET}" - | grep -v '^#' | grep -v '^$' | while read -r line; do
export NAME
export CRT_FILENAME
export KEY_FILENAME
if read_certificate "$line"; then
dump_certificate "$NAME" "$CRT_FILENAME" "$KEY_FILENAME"
else
echo "[WARNING] ($$) : can't dump \"$name\", crt/key filename details not found in \"show ssl cert\"" >&2
fi
done
}
usage() {
echo "Usage:"
echo " $0 [options]* [cert]*"
echo ""
echo " Dump certificates from the HAProxy stats or master socket to the filesystem"
echo " Require socat and openssl"
echo " EXPERIMENTAL script, backup your files!"
echo " The script will move your previous files to FILE.old.unixtimestamp (ex: foo.com.pem.old.1759044998)"
echo ""
echo "Options:"
echo " -S, --master-socket <path> Use the master socket at <path> (default: ${SOCKET})"
echo " -s, --socket <path> Use the stats socket at <path>"
echo " -p, --path <path> Specifiy a base path for relative files (default: ${BASEPATH})"
echo " -n, --dry-run Read certificates on the socket but don't dump them"
echo " -d, --debug Debug mode, set -x"
echo " -v, --verbose Verbose mode"
echo " -h, --help This help"
echo " -- End of options"
echo ""
echo "Examples:"
echo " $0 -v -p ${BASEPATH} -S ${SOCKET}"
echo " $0 -v -p ${BASEPATH} -S ${SOCKET} bar.com.rsa.pem"
echo " $0 -v -p ${BASEPATH} -S ${SOCKET} -- foo.com.ecdsa.pem bar.com.rsa.pem"
}
main() {
while [ -n "$1" ]; do
case "$1" in
-S|--master-socket)
SOCKET="$2"
M="@1 "
shift 2
;;
-s|--socket)
SOCKET="$2"
M=
shift 2
;;
-p|--path)
BASEPATH="$2/"
shift 2
;;
-n|--dry-run)
DRY_RUN=1
shift
;;
-d|--debug)
DEBUG=1
shift
;;
-v|--verbose)
VERBOSE=1
shift
;;
-h|--help)
usage "$@"
exit 0
;;
--)
shift
break
;;
-*)
echo "[ALERT] ($$) : Unknown option '$1'" >&2
usage "$@"
exit 1
;;
*)
break
;;
esac
done
if [ -n "$DEBUG" ]; then
set -x
fi
TMP=${TMP:-$(mktemp -d)}
if [ -z "$1" ]; then
dump_all_certificates
else
# compute the certificates names at the end of the command
while [ -n "$1" ]; do
if ! read_certificate "$1"; then
echo "[ALERT] ($$) : can't dump \"$1\", crt/key filename details not found in \"show ssl cert\"" >&2
exit 1
fi
[ "${DRY_RUN}" = "0" ] && dump_certificate "$NAME" "$CRT_FILENAME" "$KEY_FILENAME"
shift
done
fi
}
trap 'rm -rf -- "$TMP"' EXIT
main "$@"

View File

@ -1,113 +0,0 @@
#!/bin/bash
set -e
export VERBOSE=1
export TIMEOUT=90
export MASTER_SOCKET=${MASTER_SOCKET:-/var/run/haproxy-master.sock}
export RET=
alert() {
if [ "$VERBOSE" -ge "1" ]; then
echo "[ALERT] $*" >&2
fi
}
reload() {
while read -r line; do
if [ "$line" = "Success=0" ]; then
RET=1
elif [ "$line" = "Success=1" ]; then
RET=0
elif [ "$line" = "Another reload is still in progress." ]; then
alert "$line"
elif [ "$line" = "--" ]; then
continue;
else
if [ "$RET" = 1 ] && [ "$VERBOSE" = "2" ]; then
echo "$line" >&2
elif [ "$VERBOSE" = "3" ]; then
echo "$line" >&2
fi
fi
done < <(echo "reload" | socat -t"${TIMEOUT}" "${MASTER_SOCKET}" -)
if [ -z "$RET" ]; then
alert "Couldn't finish the reload before the timeout (${TIMEOUT})."
return 1
fi
return "$RET"
}
usage() {
echo "Usage:"
echo " $0 [options]*"
echo ""
echo " Trigger a reload from the master socket"
echo " Require socat"
echo " EXPERIMENTAL script!"
echo ""
echo "Options:"
echo " -S, --master-socket <path> Use the master socket at <path> (default: ${MASTER_SOCKET})"
echo " -d, --debug Debug mode, set -x"
echo " -t, --timeout Timeout (socat -t) (default: ${TIMEOUT})"
echo " -s, --silent Silent mode (no output)"
echo " -v, --verbose Verbose output (output from haproxy on failure)"
echo " -vv Even more verbose output (output from haproxy on success and failure)"
echo " -h, --help This help"
echo ""
echo "Examples:"
echo " $0 -S ${MASTER_SOCKET} -d ${TIMEOUT}"
}
main() {
while [ -n "$1" ]; do
case "$1" in
-S|--master-socket)
MASTER_SOCKET="$2"
shift 2
;;
-t|--timeout)
TIMEOUT="$2"
shift 2
;;
-s|--silent)
VERBOSE=0
shift
;;
-v|--verbose)
VERBOSE=2
shift
;;
-vv|--verbose)
VERBOSE=3
shift
;;
-d|--debug)
DEBUG=1
shift
;;
-h|--help)
usage "$@"
exit 0
;;
*)
echo "[ALERT] ($$) : Unknown option '$1'" >&2
usage "$@"
exit 1
;;
esac
done
if [ -n "$DEBUG" ]; then
set -x
fi
}
main "$@"
reload

View File

@ -1571,10 +1571,6 @@ void filter_count_srv_status(const char *accept_field, const char *time_field, s
if (!srv_node) {
/* server not yet in the tree, let's create it */
srv = (void *)calloc(1, sizeof(struct srv_st) + e - b + 1);
if (unlikely(!srv)) {
fprintf(stderr, "%s: not enough memory\n", __FUNCTION__);
exit(1);
}
srv_node = &srv->node;
memcpy(&srv_node->key, b, e - b);
srv_node->key[e - b] = '\0';
@ -1684,10 +1680,6 @@ void filter_count_url(const char *accept_field, const char *time_field, struct t
*/
if (unlikely(!ustat))
ustat = calloc(1, sizeof(*ustat));
if (unlikely(!ustat)) {
fprintf(stderr, "%s: not enough memory\n", __FUNCTION__);
exit(1);
}
ustat->nb_err = err;
ustat->nb_req = 1;

View File

@ -6,9 +6,9 @@ Wants=network-online.target
[Service]
EnvironmentFile=-/etc/default/haproxy
EnvironmentFile=-/etc/sysconfig/haproxy
Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/run/haproxy.pid" "CFGDIR=/etc/haproxy/conf.d" "EXTRAOPTS=-S /run/haproxy-master.sock"
ExecStart=@SBINDIR@/haproxy -Ws -f $CONFIG -f $CFGDIR -p $PIDFILE $EXTRAOPTS
ExecReload=@SBINDIR@/haproxy -Ws -f $CONFIG -f $CFGDIR -c $EXTRAOPTS
Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/run/haproxy.pid" "EXTRAOPTS=-S /run/haproxy-master.sock"
ExecStart=@SBINDIR@/haproxy -Ws -f $CONFIG -p $PIDFILE $EXTRAOPTS
ExecReload=@SBINDIR@/haproxy -Ws -f $CONFIG -c $EXTRAOPTS
ExecReload=/bin/kill -USR2 $MAINPID
KillMode=mixed
Restart=always

View File

@ -59,9 +59,9 @@ struct ring_v2 {
struct ring_v2a {
size_t size; // storage size
size_t rsvd; // header length (used for file-backed maps)
size_t tail ALIGNED(64); // storage tail
size_t head ALIGNED(64); // storage head
char area[0] ALIGNED(64); // storage area begins immediately here
size_t tail __attribute__((aligned(64))); // storage tail
size_t head __attribute__((aligned(64))); // storage head
char area[0] __attribute__((aligned(64))); // storage area begins immediately here
};
/* display the message and exit with the code */

View File

@ -1,70 +0,0 @@
BEGININPUT
BEGINCONTEXT
HAProxy's development cycle consists in one development branch, and multiple
maintenance branches.
All the development is made into the development branch exclusively. This
includes mostly new features, doc updates, cleanups and or course, fixes.
The maintenance branches, also called stable branches, never see any
development, and only receive ultra-safe fixes for bugs that affect them,
that are picked from the development branch.
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
release, the development branch enters maintenance and a new development branch
is created with a new, higher version. The current development branch is
3.4-dev, and maintenance branches are 3.3 and below.
Fixes created in the development branch for issues that were introduced in an
earlier branch are applied in descending order to each and every version till
that branch that introduced the issue: 3.3 first, then 3.2, then 3.1, then 3.0
and so on. This operation is called "backporting". A fix for an issue is never
backported beyond the branch that introduced the issue. An important point is
that the project maintainers really aim at zero regression in maintenance
branches, so they're never willing to take any risk backporting patches that
are not deemed strictly necessary.
Fixes consist of patches managed using the Git version control tool and are
identified by a Git commit ID and a commit message. For this reason we
indistinctly talk about backporting fixes, commits, or patches; all mean the
same thing. When mentioning commit IDs, developers always use a short form
made of the first 8 characters only, and expect the AI assistant to do the
same.
It seldom happens that some fixes depend on changes that were brought by other
patches that were not in some branches and that will need to be backported as
well for the fix to work. In this case, such information is explicitly provided
in the commit message by the patch's author in natural language.
Developers are serious and always indicate if a patch needs to be backported.
Sometimes they omit the exact target branch, or they will say that the patch is
"needed" in some older branch, but it means the same. If a commit message
doesn't mention any backport instructions, it means that the commit does not
have to be backported. And patches that are not strictly bug fixes nor doc
improvements are normally not backported. For example, fixes for design
limitations, architectural improvements and performance optimizations are
considered too risky for a backport. Finally, all bug fixes are tagged as
"BUG" at the beginning of their subject line. Patches that are not tagged as
such are not bugs, and must never be backported unless their commit message
explicitly requests so.
ENDCONTEXT
A developer is reviewing the development branch, trying to spot which commits
need to be backported to maintenance branches. This person is already expert
on HAProxy and everything related to Git, patch management, and the risks
associated with backports, so he doesn't want to be told how to proceed nor to
review the contents of the patch.
The goal for this developer is to get some help from the AI assistant to save
some precious time on this tedious review work. In order to do a better job, he
needs an accurate summary of the information and instructions found in each
commit message. Specifically he needs to figure if the patch fixes a problem
affecting an older branch or not, if it needs to be backported, if so to which
branches, and if other patches need to be backported along with it.
The indented text block below after an "id" line and starting with a Subject line
is a commit message from the HAProxy development branch that describes a patch
applied to that branch, starting with its subject line, please read it carefully.

View File

@ -1,29 +0,0 @@
ENDINPUT
BEGININSTRUCTION
You are an AI assistant that follows instruction extremely well. Help as much
as you can, responding to a single question using a single response.
The developer wants to know if he needs to backport the patch above to fix
maintenance branches, for which branches, and what possible dependencies might
be mentioned in the commit message. Carefully study the commit message and its
backporting instructions if any (otherwise it should probably not be backported),
then provide a very concise and short summary that will help the developer decide
to backport it, or simply to skip it.
Start by explaining in one or two sentences what you recommend for this one and why.
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
where X is a single word among:
- "yes", if you recommend to backport the patch right now either because
it explicitly states this or because it's a fix for a bug that affects
a maintenance branch (3.3 or lower);
- "wait", if this patch explicitly mentions that it must be backported, but
only after waiting some time.
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
lack of explicit backport instructions, or it's just an improvement);
- "uncertain" otherwise for cases not covered above
ENDINSTRUCTION
Explanation:

View File

@ -22,8 +22,7 @@ STABLE=$(cd "$HAPROXY_DIR" && git describe --tags "v${BRANCH}-dev0^" |cut -f1,2
PATCHES_DIR="$PATCHES_PFX"-"$BRANCH"
(cd "$HAPROXY_DIR"
# avoid git pull, it chokes on forced push
git remote update origin; git reset origin/master;git checkout -f
git pull
last_file=$(ls -1 "$PATCHES_DIR"/*.patch 2>/dev/null | tail -n1)
if [ -n "$last_file" ]; then
restart=$(head -n1 "$last_file" | cut -f2 -d' ')

File diff suppressed because it is too large Load Diff

View File

@ -1,86 +0,0 @@
2025-08-13 - Memory allocation in HAProxy 3.3
The vast majority of dynamic memory allocations are performed from pools. Pools
are optimized to store pre-calibrated objects of the right size for a given
usage, try to favor locality and hot objects as much as possible, and are
heavily instrumented to detect and help debug a wide class of bugs including
buffer overflows, use-after-free, etc.
For objects of random sizes, or those used only at configuration time, pools
are not suited, and the regular malloc/free family is available, in addition of
a few others.
The standard allocation calls are intercepted at the code level (#define) when
the code is compiled with -DDEBUG_MEM_STATS. For this reason, these calls are
redefined as macros in "bug.h", and one must not try to use the pointers to
such functions, as this may break DEBUG_MEM_STATS. This provides fine-grained
stats about allocation/free per line of source code using locally implemented
counters that can be consulted by "debug dev memstats". The calls are
categorized into one of "calloc", "free", "malloc", "realloc", "strdup",
"p_alloc", "p_free", the latter two designating pools. Extra calls such as
memalign() and similar are also intercepted and counted as malloc.
Due to the nature of this replacement, DEBUG_MEM_STATS cannot see operations
performed in libraries or dependencies.
In addition to DEBUG_MEM_STATS, when haproxy is built with USE_MEMORY_PROFILING
the standard functions are wrapped by new ones defined in "activity.c", which
also hold counters by call place. These ones are able to trace activity in
libraries because the functions check the return pointer to figure where the
call was made. The approach is different and relies on a large hash table. The
files, function names and line numbers are not know, but by passing the pointer
to dladdr(), we can often resolve most of these symbols. These operations are
consulted via "show profiling memory". It must first be enabled either in the
global config "profiling.memory on" or the CLI using "set profiling memory on".
Memory profiling can also track pool allocations and frees thanks to knowing
the size of the element and knowing a place where to store it. Some future
evolutions might consider making this possible as well for pure malloc/free
too by leveraging malloc_usable_size() a bit more.
Finally, 3.3 brought aligned allocations. These are made available via a new
family of functions around ha_aligned_alloc() that simply map to either
posix_memalign(), memalign() or _aligned_malloc() for CYGWIN, depending on
which one is available. This latter one requires to pass the pointer to
_aligned_free() instead of free(), so for this reason, all aligned allocations
have to be released using ha_aligned_free(). Since this mostly happens on
configuration elements, in practice it's not as inconvenient as it can sound.
These functions are in reality macros handled in "bug.h" like the previous
ones in order to deal with DEBUG_MEM_STATS. All "alloc" variants are reported
in memstats as "malloc". All "zalloc" variants are reported in memstats as
"calloc".
The currently available allocators are the following:
- void *ha_aligned_alloc(size_t align, size_t size)
- void *ha_aligned_zalloc(size_t align, size_t size)
Equivalent of malloc() but aligned to <align> bytes. The alignment MUST be
at least as large as one word and MUST be a power of two. The "zalloc"
variant also zeroes the area on success. Both return NULL on failure.
- void *ha_aligned_alloc_safe(size_t align, size_t size)
- void *ha_aligned_zalloc_safe(size_t align, size_t size)
Equivalent of malloc() but aligned to <align> bytes. The alignment is
automatically adjusted to the nearest larger power of two that is at least
as large as a word. The "zalloc" variant also zeroes the area on
success. Both return NULL on failure.
- (type *)ha_aligned_alloc_typed(size_t count, type)
(type *)ha_aligned_zalloc_typed(size_t count, type)
This macro returns an area aligned to the required alignment for type
<type>, large enough for <count> objects of this type, and the result is a
pointer of this type. The goal is to ease allocation of known structures
whose alignment is not necessarily known to the developer (and to avoid
encouraging to hard-code alignment). The cast in return also provides a
last-minute control in case a wrong type is mistakenly used due to a poor
copy-paste or an extra "*" after the type. When DEBUG_MEM_STATS is in use,
the type is stored as a string in the ".extra" field so that it can be
displayed in "debug dev memstats". The "zalloc" variant also zeroes the
area on success. Both return NULL on failure.
- void ha_aligned_free(void *ptr)
Frees the area pointed to by ptr. It is the equivalent of free() but for
objects allocated using one of the functions above.

View File

@ -245,30 +245,6 @@ mt_list_pop(l)
#=========#
mt_list_pop_locked(l)
Removes the list's first element, returns it locked. If the list was empty,
NULL is returned. A macro MT_LIST_POP_LOCKED() is provided for a
more convenient use; instead of returning the list element, it will return
the structure holding the element, taking care of preserving the NULL.
before:
+---+ +---+ +---+ +---+ +---+ +---+ +---+
#=>| L |<===>| A |<===>| B |<===>| C |<===>| D |<===>| E |<===>| F |<=#
# +---+ +---+ +---+ +---+ +---+ +---+ +---+ #
#=====================================================================#
after:
+---+ +---+ +---+ +---+ +---+ +---+
#=>| L |<===>| B |<===>| C |<===>| D |<===>| E |<===>| F |<=#
# +---+ +---+ +---+ +---+ +---+ +---+ #
#===========================================================#
+---+
# x| A |x #
# +---+ #
#=========#
_mt_list_lock_next(elt)
Locks the link that starts at the next pointer of the designated element.
The link is replaced by two locked pointers, and a pointer to the next

View File

@ -1,4 +1,4 @@
2025-08-11 - Pools structure and API
2022-02-24 - Pools structure and API
1. Background
-------------
@ -239,6 +239,10 @@ currently in use:
+------------+ +------------+ / is set at build time
or -dMtag at boot time
Right now no provisions are made to return objects aligned on larger boundaries
than those currently covered by malloc() (i.e. two pointers). This need appears
from time to time and the layout above might evolve a little bit if needed.
4. Storage in the process-wide shared pool
------------------------------------------
@ -353,22 +357,6 @@ struct pool_head *create_pool(char *name, uint size, uint flags)
returned pointer is the new (or reused) pool head, or NULL upon error.
Pools created this way must be destroyed using pool_destroy().
struct pool_head *create_aligned_pool(char *name, uint size, uint align, uint flags)
Create a new pool named <name> for objects of size <size> bytes and
aligned to <align> bytes (0 meaning use the platform's default). Pool
names are truncated to their first 11 characters. Pools of very similar
size will usually be merged if both have set the flag MEM_F_SHARED in
<flags>. When DEBUG_DONT_SHARE_POOLS was set at build time, or
"-dMno-merge" is passed on the executable's command line, the pools
also need to have the exact same name to be merged. In addition, unless
MEM_F_EXACT is set in <flags>, the object size will usually be rounded
up to the size of pointers (16 or 32 bytes). MEM_F_UAF may be set on a
per-pool basis to enable the UAF detection only for this specific pool,
saving the massive overhead of global usage. The name that will appear
in the pool upon merging is the name of the first created pool. The
returned pointer is the new (or reused) pool head, or NULL upon error.
Pools created this way must be destroyed using pool_destroy().
void *pool_destroy(struct pool_head *pool)
Destroy pool <pool>, that is, all of its unused objects are freed and
the structure is freed as well if the pool didn't have any used objects
@ -482,20 +470,6 @@ complicate maintenance.
A few macros exist to ease the declaration of pools:
DECLARE_ALIGNED_POOL(ptr, name, size, align)
Placed at the top level of a file, this declares a global memory pool
as variable <ptr>, name <name> and size <size> bytes per element, all
of which will be aligned to <align> bytes. The alignment will be
rounded up to the next power of two and will be at least as large as a
word on the platform. This is made via a call to REGISTER_ALIGNED_POOL()
and by assigning the resulting pointer to variable <ptr>. <ptr> will be
created of type "struct pool_head *". If the pool needs to be visible
outside of the function (which is likely), it will also need to be
declared somewhere as "extern struct pool_head *<ptr>;". It is
recommended to place such declarations very early in the source file so
that the variable is already known to all subsequent functions which
may use it.
DECLARE_POOL(ptr, name, size)
Placed at the top level of a file, this declares a global memory pool
as variable <ptr>, name <name> and size <size> bytes per element. This
@ -507,17 +481,6 @@ DECLARE_POOL(ptr, name, size)
declarations very early in the source file so that the variable is
already known to all subsequent functions which may use it.
DECLARE_STATIC_ALIGNED_POOL(ptr, name, size, align)
Placed at the top level of a file, this declares a global memory pool
as variable <ptr>, name <name> and size <size> bytes per element, all
of which will be aligned to <align> bytes. The alignment will be
rounded up to the next power of two and will be at least as large as a
word on the platform. This is made via a call to REGISTER_ALIGNED_POOL()
and by assigning the resulting pointer to local variable <ptr>. <ptr>
will be created of type "static struct pool_head *". It is recommended
to place such declarations very early in the source file so that the
variable is already known to all subsequent functions which may use it.
DECLARE_STATIC_POOL(ptr, name, size)
Placed at the top level of a file, this declares a static memory pool
as variable <ptr>, name <name> and size <size> bytes per element. This
@ -527,42 +490,6 @@ DECLARE_STATIC_POOL(ptr, name, size)
early in the source file so that the variable is already known to all
subsequent functions which may use it.
DECLARE_STATIC_TYPED_POOL(ptr, name, type[, extra[, align]])
Placed at the top level of a file, this declares a global memory pool
as variable <ptr>, name <name>, and configured to allocate objects of
type <type>. It is optionally possible to grow these objects by <extra>
bytes (e.g. if they contain some variable length data at the end), and
to force them to be aligned to <align> bytes. If only alignment is
desired without extra data, pass 0 as <extra>. Alignment must be at
least as large as the type's, and a control is enforced at declaration
time so that objects cannot be less aligned than what is promised to
the compiler. The default alignment of zero indicates that the default
one (from the type) should be used. This is made via a call to
REGISTER_ALIGNED_POOL() and by assigning the resulting pointer to local
variable <ptr>. <ptr> will be created of type "static struct pool_head
*". It is recommended to place such declarations very early in the
source file so that the variable is already known to all subsequent
functions which may use it.
DECLARE_TYPED_POOL(ptr, name, type[, extra[, align]])
Placed at the top level of a file, this declares a global memory pool
as variable <ptr>, name <name>, and configured to allocate objects of
type <type>. It is optionally possible to grow these objects by <extra>
bytes (e.g. if they contain some variable length data at the end), and
to force them to be aligned to <align> bytes. If only alignment is
desired without extra data, pass 0 as <extra>. Alignment must be at
least as large as the type's, and a control is enforced at declaration
time so that objects cannot be less aligned than what is promised to
the compiler. The default alignment of zero indicates that the default
one (from the type) should be used. This is made via a call to
REGISTER_ALIGNED_POOL() and by assigning the resulting pointer to
variable <ptr>. <ptr> will be created of type "struct pool_head *". If
the pool needs to be visible outside of the function (which is likely),
it will also need to be declared somewhere as "extern struct pool_head
*<ptr>;". It is recommended to place such declarations very early in
the source file so that the variable is already known to all subsequent
functions which may use it.
6. Build options
----------------

View File

@ -1,53 +0,0 @@
2025/09/16 - SHM stats file storage description and hints
Shm stats file (used to share thread-groupable statistics over multiple
process through the "shm-stats-file" directive) is made of:
- a main header which describes the file version, the processes making
use of it, the common clock source and hints about the number of
objects that are currently stored or provisionned in the file.
- an indefinite number of "objects" blocks coming right after the
main header, all blocks have the same size which is the size of the
maximum underlying object that may be stored. The main header tells
how many objects are stored in the file.
File header looks like this (32/64 bits systems):
0 8 16 32 48 64
+-------+---------+----------------+-------------------+-------------------+
| VERSION | 2 bytes | global_now_ms (global mono date in ms)|
|MAJOR | MINOR | hole | |
+----------------------------------+---------------------------------------+
| global_now_ns (global mono date in ns) |
+--------------------------------------------------------------------------+
| now_offset (offset applied to global monotonic date |
| on startup) |
+--------------------------------------------------------------------------+
| Process slot : | 1byte x 64
| pid | heartbeat (ticks) |
+----------------------------------+---------------------------------------+
| objects | objects slots |
| (used objects) | (available for use) |
+----------------------------------+---------------------------------------+
| padding (for future use) | 128 bytes
+--------------------------------------------------------------------------+
Object block looks like this:
0 8 16 32 48 64
+-------+---------+----------------+-------------------+-------------------+
| GUID | 128 bytes
+ (zero terminated) +
| |
+-------+---------+--------------------------------------------------------+
| tgid | type | padding |
+-------+---------+--------------------------------------------------------+
| users (bitmask of process slots making use of the obj) |
+--------------------------------------------------------------------------+
| object data |
| (version dependent) |
| struct be_counters_shared_tg or |
| struct fe_counters_shared_tg |
+--------------------------------------------------------------------------+
| padding (to anticipate evolutions) | 64 bytes
+--------------------------------------------------------------------------+

View File

@ -1,7 +1,7 @@
-----------------------
HAProxy Starter Guide
-----------------------
version 3.4
version 3.3
This document is an introduction to HAProxy for all those who don't know it, as

View File

@ -1,7 +1,7 @@
------------------------
HAProxy Management Guide
------------------------
version 3.4
version 3.3
This document describes how to start, stop, manage, and troubleshoot HAProxy,
@ -200,12 +200,6 @@ list of options is :
-c : only performs a check of the configuration files and exits before trying
to bind. The exit status is zero if everything is OK, or non-zero if an
error is encountered. Presence of warnings will be reported if any.
By default this option does not report a success message. Combined with
"-V" this will print the message "Configuration file is valid" upon
success.
Scripts must use the exit status to determine the success of the
command.
-cc : evaluates a condition as used within a conditional block of the
configuration. The exit status is zero if the condition is true, 1 if the
@ -390,10 +384,6 @@ list of options is :
using strace to see the forwarded data (which do not appear when using
splice()).
-dT : disable the use of ktls. It is equivalent to the "global" section's
keyword "noktls". It is mostly useful when suspecting a bug related to
ktls.
-dV : disable SSL verify on the server side. It is equivalent to having
"ssl-server-verify none" in the "global" section. This is useful when
trying to reproduce production issues out of the production
@ -900,7 +890,9 @@ If a memory allocation fails due to the memory limit being reached or because
the system doesn't have any enough memory, then haproxy will first start to
free all available objects from all pools before attempting to allocate memory
again. This mechanism of releasing unused memory can be triggered by sending
the signal SIGQUIT to the haproxy process.
the signal SIGQUIT to the haproxy process. When doing so, the pools state prior
to the flush will also be reported to stderr when the process runs in
foreground.
During a reload operation, the process switched to the graceful stop state also
automatically performs some flushes after releasing any connection so that all
@ -1335,26 +1327,6 @@ Here is the list of static fields using the proxy statistics domain:
97. used_conn_cur [...S]: current number of connections in use
98. need_conn_est [...S]: estimated needed number of connections
99. uweight [..BS]: total user weight (backend), server user weight (server)
100. agg_server_status [..B.]: backend aggregated gauge of server's status
101. agg_server_status_check [..B.]: (deprecated)
102. agg_check_status [..B.]: backend aggregated gauge of server's state check
status
103. srid [...S]: server id revision
104. sess_other [.F..]: total number of sessions other than HTTP since process
started
105. h1_sess [.F..]: total number of HTTP/1 sessions since process started
106. h2_sess [.F..]: total number of HTTP/2 sessions since process started
107. h3_sess [.F..]: total number of HTTP/3 sessions since process started
108. req_other [.F..]: total number of sessions other than HTTP processed by
this object since the worker process started
109. h1req [.F..]: total number of HTTP/1 sessions processed by this object
since the worker process started
110. h2req [.F..]: total number of hTTP/2 sessions processed by this object
since the worker process started
111. h3req [.F..]: total number of HTTP/3 sessions processed by this object
since the worker process started
112. proto [L...]: protocol
113. priv_idle_cur [...S]: current number of private idle connections
For all other statistics domains, the presence or the order of the fields are
not guaranteed. In this case, the header line should always be used to parse
@ -1834,35 +1806,6 @@ add ssl crt-list <crtlist> <payload>
$ echo -e 'add ssl crt-list certlist1 <<\nfoobar.pem [allow-0rtt] foo.bar.com
!test1.com\n' | socat /tmp/sock1 -
add ssl ech <bind> <payload>
Add an ECH key to a <bind> line. The payload must be in the PEM for ECH format.
(https://datatracker.ietf.org/doc/html/draft-farrell-tls-pemesni)
The bind line format is <frontend>/@<filename>:<linenum> (Example:
frontend1/@haproxy.conf:19) or <frontend>/<name> if the bind line was named
with the "name" keyword.
Necessitates an OpenSSL version that supports ECH, and HAProxy must be
compiled with USE_ECH=1. This command is only supported on a CLI connection
running in experimental mode (see "experimental-mode on").
See also "show ssl ech" and "ech" in the Section 5.1 of the configuration
manual.
Example:
$ openssl ech -public_name foobar.com -out foobar3.com.ech
$ echo -e "experimental-mode on; add ssl ech frontend1/@haproxy.conf:19 <<%EOF%\n$(cat foobar3.com.ech)\n%EOF%\n" | \
socat /tmp/haproxy.sock -
added a new ECH config to frontend1
add ssl jwt <filename>
Add an already loaded certificate to the list of certificates that can be
used for JWT validation (see "jwt_verify_cert" converter). This command does
not work on ongoing transactions.
See also "del ssl jwt" and "show ssl jwt" commands.
See "jwt" certificate option for more information.
clear counters
Clear the max values of the statistics counters in each proxy (frontend &
backend) and in each server. The accumulated counters are not affected. The
@ -2114,11 +2057,10 @@ del ssl ca-file <cafile>
the "ca-file" or "ca-verify-file" directives in the configuration.
del ssl cert <certfile>
Delete a certificate store from HAProxy. The certificate must be unused
(included for JWT validation) and removed from any crt-list or directory.
"show ssl cert" displays the status of the certificate. The deletion doesn't
work with a certificate referenced directly with the "crt" directive in the
configuration.
Delete a certificate store from HAProxy. The certificate must be unused and
removed from any crt-list or directory. "show ssl cert" displays the status
of the certificate. The deletion doesn't work with a certificate referenced
directly with the "crt" directive in the configuration.
del ssl crl-file <crlfile>
Delete a CRL file tree entry from HAProxy. The CRL file must be unused and
@ -2132,46 +2074,12 @@ del ssl crt-list <filename> <certfile[:line]>
you will need to provide which line you want to delete. To display the line
numbers, use "show ssl crt-list -n <crtlist>".
det ssl ech <bind>
Delete the ECH keys of a bind line.
The bind line format is <frontend>/@<filename>:<linenum> (Example:
frontend1/@haproxy.conf:19) or <frontend>/<name> if the bind line was named
with the "name" keyword.
Necessitates an OpenSSL version that supports ECH, and HAProxy must be
compiled with USE_ECH=1. This command is only supported on a CLI connection
running in experimental mode (see "experimental-mode on").
See also "show ssl ech", "add ssl ech" and "ech" in the Section 5.1 of the
configuration manual.
Example:
$ echo "experimental-mode on; del ssl ech frontend1/@haproxy.conf:19" | socat /tmp/haproxy.sock -
deleted all ECH configs from frontend1/@haproxy.conf:19
del ssl jwt <filename>
Remove an already loaded certificate to the list of certificates that can be
used for JWT validation (see "jwt_verify_cert" converter). This command does
not work on ongoing transactions.
See also "add ssl jwt" and "show ssl jwt" commands.
See "jwt" certificate option for more information.
del server <backend>/<server>
Delete a removable server attached to the backend <backend>. A removable
server is the server which satisfies all of these conditions :
- not referenced by other configuration elements
- must already be in maintenance (see "disable server")
- must not have any active or idle connections
If any of these conditions is not met, the command will fail.
Active connections are those with at least one ongoing request. It is
possible to speed up their termination using "shutdown sessions server". It
is highly recommended to use "wait srv-removable" before "del server" to
ensure that all active or idle connections are closed and that the command
succeeds.
Remove a server attached to the backend <backend>. All servers are eligible,
except servers which are referenced by other configuration elements. The
server must be put in maintenance mode prior to its deletion. The operation
is cancelled if the server still has active or idle connection or its
connection queue is not empty.
disable agent <backend>/<server>
Mark the auxiliary agent check as temporarily stopped.
@ -2474,11 +2382,6 @@ prompt [help | n | i | p | timed]*
advanced scripts, and the non-interactive mode (default) to basic scripts.
Note that the non-interactive mode is not available for the master socket.
publish backend <backend>
Activates content switching to a backend instance. This is the reverse
operation of "unpublish backend" command. This command is restricted and can
only be issued on sockets configured for levels "operator" or "admin".
quit
Close the connection when in interactive mode.
@ -2704,28 +2607,6 @@ set ssl crl-file <crlfile> <payload>
socat /var/run/haproxy.stat -
echo "commit ssl crl-file crlfile.pem" | socat /var/run/haproxy.stat -
set ssl ech <bind> <payload>
Replace the ECH keys of a bind line with this one. The payload must be in the
PEM for ECH format.
(https://datatracker.ietf.org/doc/html/draft-farrell-tls-pemesni)
The bind line format is <frontend>/@<filename>:<linenum> (Example:
frontend1/@haproxy.conf:19) or <frontend>/<name> if the bind line was named
with the "name" keyword.
Necessitates an OpenSSL version that supports ECH, and HAProxy must be
compiled with USE_ECH=1. This command is only supported on a CLI connection
running in experimental mode (see "experimental-mode on").
See also "show ssl ech", "add ssl ech" and "ech" in the Section 5.1 of the
configuration manual.
$ openssl ech -public_name foobar.com -out foobar3.com.ech
$ echo -e "experimental-mode on;
set ssl ech frontend1/@haproxy.conf:19 <<%EOF%\n$(cat foobar3.com.ech)\n%EOF%\n" | \
socat /tmp/haproxy.sock -
set new ECH configs for frontend1/@haproxy.conf:19
set ssl ocsp-response <response | payload>
This command is used to update an OCSP Response for a certificate (see "crt"
on "bind" lines). Same controls are performed as during the initial loading of
@ -2847,13 +2728,6 @@ operator
increased. It also drops expert and experimental mode. See also "show cli
level".
unpublish backend <backend>
Marks the backend as unqualified for future traffic selection. In effect,
use_backend / default_backend rules which reference it are ignored and the
next content switching rules are evaluated. Contrary to disabled backends,
servers health checks remain active. This command is restricted and can only
be issued on sockets configured for levels "operator" or "admin".
user
Decrease the CLI level of the current CLI session to user. It can't be
increased. It also drops expert and experimental mode. See also "show cli
@ -3354,10 +3228,9 @@ show quic [<format>] [<filter>]
in the format will instead show a more detailed help message.
The final argument is used to restrict or extend the connection list. By
default, active frontend connections only are displayed. Use the extra
argument "clo" to list instead closing frontend connections, "be" for backend
connections or "all" for every categories. It's also possible to restrict to
a single connection by specifying its hexadecimal address.
default, connections on closing or draining state are not displayed. Use the
extra argument "all" to include them in the output. It's also possible to
restrict to a single connection by specifying its hexadecimal address.
show servers conn [<backend>]
Dump the current and idle connections state of the servers belonging to the
@ -3378,12 +3251,9 @@ show servers conn [<backend>]
port Server's port (or zero if none)
- Unused field, serves as a visual delimiter
purge_delay Interval between connection purges, in milliseconds
served Number of connections currently in use
used_cur Number of connections currently in use
note that this excludes conns attached to a session
used_max Highest value of used_cur since the process started
need_est Floating estimate of total needed connections
idle_sess Number of idle connections flagged as private
unsafe_nb Number of idle connections considered as "unsafe"
safe_nb Number of idle connections considered as "safe"
idle_lim Configured maximum number of idle connections
@ -3869,66 +3739,6 @@ show ssl crt-list [-n] [<filename>]
ecdsa.pem:3 [verify none allow-0rtt ssl-min-ver TLSv1.0 ssl-max-ver TLSv1.3] localhost !www.test1.com
ecdsa.pem:4 [verify none allow-0rtt ssl-min-ver TLSv1.0 ssl-max-ver TLSv1.3]
show ssl ech [<name>]
Display the list of ECH keys loaded in the HAProxy process.
When <name> is specified, displays the keys for a specific bind line. The
bind line format is <frontend>/@<filename>:<linenum> (Example:
frontend1/@haproxy.conf:19) or <frontend>/<name> if the bind line was named
with the "name" keyword.
The 'age' entry represents the time, in seconds, since the key was loaded in
the bind line. This value is reset when HAProxy is started, reloaded, or
restarted.
Necessitates an OpenSSL version that supports ECH, and HAProxy must be
compiled with USE_ECH=1.
This command is only supported on a CLI connection running in experimental
mode (see "experimental-mode on").
See also "ech" in the Section 5.1 of the configuration manual.
Example:
$ echo "experimental-mode on; show ssl ech" | socat /tmp/haproxy.sock -
***
frontend: frontend1
bind: frontend1/@haproxy.conf:19
ECH entry: 0 public_name: example.com age: 557 (has private key)
[fe0d,94,example.com,[0020,0001,0001],c39285b774bf61c071864181c5292a012b30adaf767e39369a566af05573ef2b,00,00]
ECH entry: 1 public_name: example.com age: 557 (has private key)
[fe0d,ee,example.com,[0020,0001,0001],6572191131b5cabba819f8cacf2d2e06fa0b87b30d9b793644daba7b8866d511,00,00]
bind: frontend1/@haproxy.conf:20
ECH entry: 0 public_name: example.com age: 557 (has private key)
[fe0d,94,example.com,[0020,0001,0001],c39285b774bf61c071864181c5292a012b30adaf767e39369a566af05573ef2b,00,00]
ECH entry: 1 public_name: example.com age: 557 (has private key)
[fe0d,ee,example.com,[0020,0001,0001],6572191131b5cabba819f8cacf2d2e06fa0b87b30d9b793644daba7b8866d511,00,00]
$ echo "experimental-mode on; show ssl ech frontend1/@haproxy.conf:19" | socat /tmp/haproxy.sock -
***
ECH for frontend1/@haproxy.conf:19
ECH entry: 0 public_name: example.com age: 786 (has private key)
[fe0d,94,example.com,[0020,0001,0001],c39285b774bf61c071864181c5292a012b30adaf767e39369a566af05573ef2b,00,00]
ECH entry: 1 public_name: example.com age: 786 (has private key)
[fe0d,ee,example.com,[0020,0001,0001],6572191131b5cabba819f8cacf2d2e06fa0b87b30d9b793644daba7b8866d511,00,00]
show ssl jwt
Display the list of certificates that can be used for JWT validation.
See also "add ssl jwt" and "del ssl jwt" commands.
See "jwt" certificate option for more information.
Example:
echo "show ssl jwt" | socat /tmp/sock1 -
#filename
jwt.pem
show ssl ocsp-response [[text|base64] <id|path>]
Display the IDs of the OCSP tree entries corresponding to all the OCSP
responses used in HAProxy, as well as the corresponding frontend
@ -4277,10 +4087,6 @@ shutdown sessions server <backend>/<server>
maintenance mode, for instance. Such terminated streams are reported with a
'K' flag in the logs.
Backend connections are left in idle state, unless the server is already in
maintenance mode, in which case they will be immediately scheduled for
deletion.
trace
The "trace" command alone lists the trace sources, their current status, and
their brief descriptions. It is only meant as a menu to enter next levels,
@ -4495,13 +4301,12 @@ wait { -h | <delay> } [<condition> [<args>...]]
unsatisfied for the whole <delay> duration. The supported conditions are:
- srv-removable <proxy>/<server> : this will wait for the specified server to
be removable by the "del server" command, i.e. be in maintenance and no
longer have any connection on it (neither active or idle). Some conditions
will never be accepted (e.g. not in maintenance) and will cause the report
of a specific error message indicating what condition is not met. The
server might even have been removed in parallel and no longer exit. If
everything is OK before the delay, a success is returned and the operation
is terminated.
be removable, i.e. be in maintenance and no longer have any connection on
it. Some conditions will never be accepted (e.g. not in maintenance) and
will cause the report of a specific error message indicating what condition
is not met. The server might even have been removed in parallel and no
longer exit. If everything is OK before the delay, a success is returned
and the operation is terminated.
The default unit for the delay is milliseconds, though other units are
accepted if suffixed with the usual timer units (us, ms, s, m, h, d). When
@ -4552,11 +4357,6 @@ Example:
case the full command ends at the end of line or semi-colon like any regular
command.
Bugs: the sockpair@ protocol used to implement communication between the
master and the worker is known to not be reliable on macOS because of an
issue in the macOS sendmsg(2) implementation. A command might end up without
response because of that.
Examples:
$ socat /var/run/haproxy-master.sock readline
@ -4623,11 +4423,6 @@ Example:
command). In this case, the prompt mode of the master socket (interactive,
prompt, timed) is propagated into the worker process.
Bugs: the sockpair@ protocol used to implement communication between the
master and the worker is known to not be reliable on macOS because of an
issue in the macOS sendmsg(2) implementation. A command might end up without
response because of that.
Examples:
# gracefully close connections and delete a server once idle (wait max 10s)
$ socat -t 11 /var/run/haproxy-master.sock - <<< \

View File

@ -28,8 +28,7 @@ Revision history
string encoding. With contributions from Andriy Palamarchuk
(Amazon.com).
2020/03/05 - added the unique ID TLV type (Tim Düsterhus)
2025/09/09 - added SSL-related TLVs for key exchange group and signature
scheme (Steven Collison)
1. Background
@ -536,20 +535,18 @@ the information they choose to publish.
The following types have already been registered for the <type> field :
#define PP2_TYPE_ALPN 0x01
#define PP2_TYPE_AUTHORITY 0x02
#define PP2_TYPE_CRC32C 0x03
#define PP2_TYPE_NOOP 0x04
#define PP2_TYPE_UNIQUE_ID 0x05
#define PP2_TYPE_SSL 0x20
#define PP2_SUBTYPE_SSL_VERSION 0x21
#define PP2_SUBTYPE_SSL_CN 0x22
#define PP2_SUBTYPE_SSL_CIPHER 0x23
#define PP2_SUBTYPE_SSL_SIG_ALG 0x24
#define PP2_SUBTYPE_SSL_KEY_ALG 0x25
#define PP2_SUBTYPE_SSL_GROUP 0x26
#define PP2_SUBTYPE_SSL_SIG_SCHEME 0x27
#define PP2_TYPE_NETNS 0x30
#define PP2_TYPE_ALPN 0x01
#define PP2_TYPE_AUTHORITY 0x02
#define PP2_TYPE_CRC32C 0x03
#define PP2_TYPE_NOOP 0x04
#define PP2_TYPE_UNIQUE_ID 0x05
#define PP2_TYPE_SSL 0x20
#define PP2_SUBTYPE_SSL_VERSION 0x21
#define PP2_SUBTYPE_SSL_CN 0x22
#define PP2_SUBTYPE_SSL_CIPHER 0x23
#define PP2_SUBTYPE_SSL_SIG_ALG 0x24
#define PP2_SUBTYPE_SSL_KEY_ALG 0x25
#define PP2_TYPE_NETNS 0x30
2.2.1 PP2_TYPE_ALPN
@ -657,21 +654,13 @@ of the used cipher, for example "ECDHE-RSA-AES128-GCM-SHA256".
The second level TLV PP2_SUBTYPE_SSL_SIG_ALG provides the US-ASCII string name
of the algorithm used to sign the certificate presented by the frontend when
the incoming connection was made over an SSL/TLS transport layer, for example
"RSA-SHA256".
"SHA256".
The second level TLV PP2_SUBTYPE_SSL_KEY_ALG provides the US-ASCII string name
of the algorithm used to generate the key of the certificate presented by the
frontend when the incoming connection was made over an SSL/TLS transport layer,
for example "RSA2048".
The second level TLV PP2_SUBTYPE_SSL_GROUP provides the US-ASCII string name of
the key exchange algorithm used for the frontend TLS connection, for example
"secp256r1".
The second level TLV PP2_SUBTYPE_SSL_SIG_SCHEME provides the US-ASCII string
name of the algorithm the frontend used to sign the ServerKeyExchange or
CertificateVerify message, for example "rsa_pss_rsae_sha256".
In all cases, the string representation (in UTF8) of the Common Name field
(OID: 2.5.4.3) of the client certificate's Distinguished Name, is appended
using the TLV format and the type PP2_SUBTYPE_SSL_CN. E.g. "example.com".

View File

@ -12,7 +12,6 @@ struct acme_cfg {
char *filename; /* config filename */
int linenum; /* config linenum */
char *name; /* section name */
int reuse_key; /* do we need to renew the private key */
char *directory; /* directory URL */
char *map; /* storage for tokens + thumbprint */
struct {
@ -28,8 +27,6 @@ struct acme_cfg {
int curves; /* NID of curves */
} key;
char *challenge; /* HTTP-01, DNS-01, etc */
char *vars; /* variables put in the dpapi sink */
char *provider; /* DNS provider put in the dpapi sink */
struct acme_cfg *next;
};
@ -85,8 +82,7 @@ struct acme_ctx {
struct ist finalize;
struct ist certificate;
struct task *task;
struct ebmb_node node;
char name[VAR_ARRAY];
struct mt_list el;
};
#define ACME_EV_SCHED (1ULL << 0) /* scheduling wakeup */

View File

@ -4,9 +4,6 @@
#include <haproxy/ssl_ckch-t.h>
int ckch_conf_acme_init(void *value, char *buf, struct ckch_store *s, int cli, const char *filename, int linenum, char **err);
EVP_PKEY *acme_gen_tmp_pkey();
X509 *acme_gen_tmp_x509();
int ckch_conf_acme_init(void *value, char *buf, struct ckch_data *d, int cli, const char *filename, int linenum, char **err);
#endif

View File

@ -76,12 +76,12 @@ struct memprof_stats {
const void *caller;
enum memprof_method method;
/* 4-7 bytes hole here */
unsigned long long locked_calls;
unsigned long long alloc_calls;
unsigned long long free_calls;
unsigned long long alloc_tot;
unsigned long long free_tot;
void *info; // for pools, ptr to the pool
void *pad; // pad to 64
};
#endif
@ -125,8 +125,8 @@ struct activity {
unsigned int ctr2; // general purposee debug counter
#endif
char __pad[0]; // unused except to check remaining room
char __end[0] THREAD_ALIGNED();
} THREAD_ALIGNED();
char __end[0] __attribute__((aligned(64))); // align size to 64.
};
/* 256 entries for callers * callees should be highly sufficient (~45 seen usually) */
#define SCHED_ACT_HASH_BITS 8
@ -143,10 +143,7 @@ struct sched_activity {
uint64_t calls;
uint64_t cpu_time;
uint64_t lat_time;
uint64_t lkw_time; /* lock waiting time */
uint64_t lkd_time; /* locked time */
uint64_t mem_time; /* memory ops wait time */
} THREAD_ALIGNED();
};
#endif /* _HAPROXY_ACTIVITY_T_H */

View File

@ -47,7 +47,7 @@
#define APPCTX_FL_ERROR 0x00000080
#define APPCTX_FL_SHUTDOWN 0x00000100 /* applet was shut down (->release() called if any). No more data exchange with SCs */
#define APPCTX_FL_WANT_DIE 0x00000200 /* applet was running and requested to die */
/* unused: 0x00000400 */
#define APPCTX_FL_INOUT_BUFS 0x00000400 /* applet uses its own buffers */
#define APPCTX_FL_FASTFWD 0x00000800 /* zero-copy forwarding is in-use, don't fill the outbuf */
#define APPCTX_FL_IN_MAYALLOC 0x00001000 /* applet may try again to allocate its inbuf */
#define APPCTX_FL_OUT_MAYALLOC 0x00002000 /* applet may try again to allocate its outbuf */
@ -73,8 +73,8 @@ static forceinline char *appctx_show_flags(char *buf, size_t len, const char *de
_(APPCTX_FL_OUTBLK_ALLOC, _(APPCTX_FL_OUTBLK_FULL,
_(APPCTX_FL_EOI, _(APPCTX_FL_EOS,
_(APPCTX_FL_ERR_PENDING, _(APPCTX_FL_ERROR,
_(APPCTX_FL_SHUTDOWN, _(APPCTX_FL_WANT_DIE,
_(APPCTX_FL_FASTFWD, _(APPCTX_FL_IN_MAYALLOC, _(APPCTX_FL_OUT_MAYALLOC)))))))))))));
_(APPCTX_FL_SHUTDOWN, _(APPCTX_FL_WANT_DIE, _(APPCTX_FL_INOUT_BUFS,
_(APPCTX_FL_FASTFWD, _(APPCTX_FL_IN_MAYALLOC, _(APPCTX_FL_OUT_MAYALLOC))))))))))))));
/* epilogue */
_(~0U);
return buf;
@ -83,7 +83,6 @@ static forceinline char *appctx_show_flags(char *buf, size_t len, const char *de
#define APPLET_FL_NEW_API 0x00000001 /* Set if the applet is based on the new API (using applet's buffers) */
#define APPLET_FL_WARNED 0x00000002 /* Set when warning was already emitted about a legacy applet */
#define APPLET_FL_HTX 0x00000004 /* Set if the applet is using HTX buffers */
/* Applet descriptor */
struct applet {

View File

@ -62,12 +62,6 @@ ssize_t applet_append_line(void *ctx, struct ist v1, struct ist v2, size_t ofs,
static forceinline void applet_fl_set(struct appctx *appctx, uint on);
static forceinline void applet_fl_clr(struct appctx *appctx, uint off);
static forceinline uint appctx_app_test(const struct appctx *appctx, uint test)
{
return (appctx->applet->flags & test);
}
static inline struct appctx *appctx_new_here(struct applet *applet, struct sedesc *sedesc)
{
return appctx_new_on(applet, sedesc, tid);
@ -294,7 +288,7 @@ static inline void applet_expect_data(struct appctx *appctx)
*/
static inline struct buffer *applet_get_inbuf(struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
if (applet_fl_test(appctx, APPCTX_FL_INBLK_ALLOC) || !appctx_get_buf(appctx, &appctx->inbuf))
return NULL;
return &appctx->inbuf;
@ -309,7 +303,7 @@ static inline struct buffer *applet_get_inbuf(struct appctx *appctx)
*/
static inline struct buffer *applet_get_outbuf(struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
if (applet_fl_test(appctx, APPCTX_FL_OUTBLK_ALLOC|APPCTX_FL_OUTBLK_FULL) ||
!appctx_get_buf(appctx, &appctx->outbuf))
return NULL;
@ -319,46 +313,22 @@ static inline struct buffer *applet_get_outbuf(struct appctx *appctx)
return sc_ib(appctx_sc(appctx));
}
/* Returns the amount of HTX data in the input buffer (see applet_get_inbuf) */
static inline size_t applet_htx_input_data(const struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
return htx_used_space(htxbuf(&appctx->inbuf));
else
return co_data(sc_oc(appctx_sc(appctx)));
}
/* Returns the amount of data in the input buffer (see applet_get_inbuf) */
static inline size_t applet_input_data(const struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_HTX))
return applet_htx_input_data(appctx);
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
if (appctx->flags & APPCTX_FL_INOUT_BUFS)
return b_data(&appctx->inbuf);
else
return co_data(sc_oc(appctx_sc(appctx)));
}
/* Returns the amount of HTX data in the output buffer (see applet_get_outbuf) */
static inline size_t applet_htx_output_data(const struct appctx *appctx)
/* Returns the amount of HTX data in the input buffer (see applet_get_inbuf) */
static inline size_t applet_htx_input_data(const struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
return htx_used_space(htxbuf(&appctx->outbuf));
if (appctx->flags & APPCTX_FL_INOUT_BUFS)
return htx_used_space(htxbuf(&appctx->inbuf));
else
return ci_data(sc_ic(appctx_sc(appctx)));
}
/* Returns the amount of data in the output buffer (see applet_get_outbuf) */
static inline size_t applet_output_data(const struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_HTX))
return applet_htx_output_data(appctx);
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
return b_data(&appctx->outbuf);
else
return ci_data(sc_ic(appctx_sc(appctx)));
return co_data(sc_oc(appctx_sc(appctx)));
}
/* Skips <len> bytes from the input buffer (see applet_get_inbuf).
@ -366,13 +336,11 @@ static inline size_t applet_output_data(const struct appctx *appctx)
* This is useful when data have been read directly from the buffer. It is
* illegal to call this function with <len> causing a wrapping at the end of the
* buffer. It's the caller's responsibility to ensure that <len> is never larger
* than available output data.
*
* This function is not HTX aware.
* than available ouput data.
*/
static inline void applet_skip_input(struct appctx *appctx, size_t len)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
b_del(&appctx->inbuf, len);
applet_fl_clr(appctx, APPCTX_FL_INBLK_FULL);
}
@ -384,7 +352,7 @@ static inline void applet_skip_input(struct appctx *appctx, size_t len)
*/
static inline void applet_reset_input(struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
b_reset(&appctx->inbuf);
applet_fl_clr(appctx, APPCTX_FL_INBLK_FULL);
}
@ -392,25 +360,22 @@ static inline void applet_reset_input(struct appctx *appctx)
co_skip(sc_oc(appctx_sc(appctx)), co_data(sc_oc(appctx_sc(appctx))));
}
/* Returns the amount of space available at the HTX output buffer (see applet_get_outbuf).
/* Returns the amout of space available at the output buffer (see applet_get_outbuf).
*/
static inline size_t applet_htx_output_room(const struct appctx *appctx)
static inline size_t applet_output_room(const struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
return htx_free_data_space(htxbuf(&appctx->outbuf));
if (appctx->flags & APPCTX_FL_INOUT_BUFS)
return b_room(&appctx->outbuf);
else
return channel_recv_max(sc_ic(appctx_sc(appctx)));
}
/* Returns the amount of space available at the output buffer (see applet_get_outbuf).
/* Returns the amout of space available at the HTX output buffer (see applet_get_outbuf).
*/
static inline size_t applet_output_room(const struct appctx *appctx)
static inline size_t applet_htx_output_room(const struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_HTX))
return applet_htx_output_room(appctx);
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
return b_room(&appctx->outbuf);
if (appctx->flags & APPCTX_FL_INOUT_BUFS)
return htx_free_data_space(htxbuf(&appctx->outbuf));
else
return channel_recv_max(sc_ic(appctx_sc(appctx)));
}
@ -425,7 +390,7 @@ static inline size_t applet_output_room(const struct appctx *appctx)
*/
static inline void applet_need_room(struct appctx *appctx, size_t room_needed)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
if (appctx->flags & APPCTX_FL_INOUT_BUFS)
applet_have_more_data(appctx);
else
sc_need_room(appctx_sc(appctx), room_needed);
@ -437,7 +402,7 @@ static inline int _applet_putchk(struct appctx *appctx, struct buffer *chunk,
{
int ret;
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
if (unlikely(stress) ?
b_data(&appctx->outbuf) :
b_data(chunk) > b_room(&appctx->outbuf)) {
@ -492,7 +457,7 @@ static inline int applet_putblk(struct appctx *appctx, const char *blk, int len)
{
int ret;
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
if (len > b_room(&appctx->outbuf)) {
applet_fl_set(appctx, APPCTX_FL_OUTBLK_FULL);
ret = -1;
@ -528,7 +493,7 @@ static inline int applet_putstr(struct appctx *appctx, const char *str)
{
int ret;
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
int len = strlen(str);
if (len > b_room(&appctx->outbuf)) {
@ -564,7 +529,7 @@ static inline int applet_putchr(struct appctx *appctx, char chr)
{
int ret;
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
if (b_full(&appctx->outbuf)) {
applet_fl_set(appctx, APPCTX_FL_OUTBLK_FULL);
ret = -1;
@ -593,7 +558,7 @@ static inline int applet_putchr(struct appctx *appctx, char chr)
static inline int applet_may_get(const struct appctx *appctx, size_t len)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
if (len > b_data(&appctx->inbuf)) {
if (se_fl_test(appctx->sedesc, SE_FL_SHW))
return -1;
@ -628,7 +593,7 @@ static inline int applet_getchar(const struct appctx *appctx, char *c)
ret = applet_may_get(appctx, 1);
if (ret <= 0)
return ret;
*c = ((appctx_app_test(appctx, APPLET_FL_NEW_API))
*c = ((appctx->flags & APPCTX_FL_INOUT_BUFS)
? *(b_head(&appctx->inbuf))
: *(co_head(sc_oc(appctx_sc(appctx)))));
@ -657,7 +622,7 @@ static inline int applet_getblk(const struct appctx *appctx, char *blk, int len,
if (ret <= 0)
return ret;
buf = ((appctx_app_test(appctx, APPLET_FL_NEW_API))
buf = ((appctx->flags & APPCTX_FL_INOUT_BUFS)
? &appctx->inbuf
: sc_ob(appctx_sc(appctx)));
return b_getblk(buf, blk, len, offset);
@ -689,7 +654,7 @@ static inline int applet_getword(const struct appctx *appctx, char *str, int len
if (ret <= 0)
goto out;
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
buf = &appctx->inbuf;
input = b_data(buf);
}
@ -716,7 +681,7 @@ static inline int applet_getword(const struct appctx *appctx, char *str, int len
p = b_next(buf, p);
}
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
if (ret < len && (ret < input || b_room(buf)) &&
!se_fl_test(appctx->sedesc, SE_FL_SHW))
ret = 0;
@ -776,7 +741,7 @@ static inline int applet_getblk_nc(const struct appctx *appctx, const char **blk
if (ret <= 0)
return ret;
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
buf = &appctx->inbuf;
max = b_data(buf);
}
@ -832,7 +797,7 @@ static inline int applet_getword_nc(const struct appctx *appctx, const char **bl
* the resulting string is made of the concatenation of the pending
* blocks (1 or 2).
*/
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
if (b_full(&appctx->inbuf) || se_fl_test(appctx->sedesc, SE_FL_SHW))
return ret;
}

View File

@ -46,8 +46,6 @@ int alloc_bind_address(struct sockaddr_storage **ss,
struct server *srv, struct proxy *be,
struct stream *s);
int be_reuse_mode(const struct proxy *be, const struct server *srv);
int64_t be_calculate_conn_hash(struct server *srv, struct stream *strm,
struct session *sess,
struct sockaddr_storage *src,
@ -85,21 +83,10 @@ static inline int be_usable_srv(struct proxy *be)
return be->srv_bck;
}
/* Returns true if <be> backend can be used as target to a switching rules. */
static inline int be_is_eligible(const struct proxy *be)
{
/* A disabled or unpublished backend cannot be selected for traffic.
* Note that STOPPED state is ignored as there is a risk of breaking
* requests during soft-stop.
*/
return !(be->flags & (PR_FL_DISABLED|PR_FL_BE_UNPUBLISHED));
}
/* set the time of last session on the backend */
static inline void be_set_sess_last(struct proxy *be)
{
if (be->be_counters.shared.tg)
HA_ATOMIC_STORE(&be->be_counters.shared.tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
HA_ATOMIC_STORE(&be->be_counters.shared.tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
}
/* This function returns non-zero if the designated server will be
@ -179,12 +166,6 @@ void set_backend_down(struct proxy *be);
unsigned int gen_hash(const struct proxy* px, const char* key, unsigned long len);
/* Returns true if connection reuse is supported by <be> backend. */
static inline int be_supports_conn_reuse(const struct proxy *be)
{
return be->mode == PR_MODE_HTTP || be->mode == PR_MODE_SPOP;
}
#endif /* _HAPROXY_BACKEND_H */
/*

View File

@ -40,23 +40,6 @@
#define DPRINTF(x...)
#endif
/* Let's make DEBUG_STRESS equal to zero if not set or not valid, or to
* 1 if set. This way it is always set and should be easy to use in "if ()"
* statements without requiring ifdefs, while remaining compatible with
* "#if DEBUG_STRESS > 0". We also force DEBUG_STRICT and DEBUG_STRICT_ACTION
* when stressed.
*/
#if !defined(DEBUG_STRESS)
# define DEBUG_STRESS 0
#elif DEBUG_STRESS != 0
# undef DEBUG_STRESS
# define DEBUG_STRESS 1 // make sure comparison >0 always works
# undef DEBUG_STRICT
# define DEBUG_STRICT 2 // enable BUG_ON
# undef DEBUG_STRICT_ACTION
# define DEBUG_STRICT_ACTION 3 // enable crash on match
#endif
#define DUMP_TRACE() do { extern void ha_backtrace_to_stderr(void); ha_backtrace_to_stderr(); } while (0)
/* First, let's try to handle some arch-specific crashing methods. We prefer
@ -424,20 +407,6 @@ extern __attribute__((__weak__)) struct debug_count __stop_dbg_cnt HA_SECTION_S
# define COUNT_IF_HOT(cond, ...) DISGUISE(cond)
#endif
/* turn BUG_ON_STRESS() into a real statement when DEBUG_STRESS is set,
* otherwise simply ignore it, at the risk of failing to notice if the
* condition would build at all. We don't really care if BUG_ON_STRESS
* doesn't always build, because it's meant to be used only in certain
* scenarios, possibly requiring certain combinations of options. We
* just want to be certain that the condition is not implemented at all
* when not used, so as to encourage developers to put a lot of them at
* zero cost.
*/
#if DEBUG_STRESS > 0
# define BUG_ON_STRESS(cond, ...) BUG_ON(cond, __VA_ARGS__)
#else
# define BUG_ON_STRESS(cond, ...) do { } while (0)
#endif
/* When not optimizing, clang won't remove that code, so only compile it in when optimizing */
#if defined(__GNUC__) && defined(__OPTIMIZE__)
@ -537,7 +506,7 @@ struct mem_stats {
size_t size;
struct ha_caller caller;
const void *extra; // extra info specific to this call (e.g. pool ptr)
} ALIGNED(sizeof(void*));
} __attribute__((aligned(sizeof(void*))));
#undef calloc
#define calloc(x,y) ({ \
@ -670,24 +639,6 @@ struct mem_stats {
_ha_aligned_alloc(__a, __s); \
})
#undef ha_aligned_zalloc
#define ha_aligned_zalloc(a,s) ({ \
size_t __a = (a); \
size_t __s = (s); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_CALLOC, \
.func = __func__, \
}, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
_ha_aligned_zalloc(__a, __s); \
})
#undef ha_aligned_alloc_safe
#define ha_aligned_alloc_safe(a,s) ({ \
size_t __a = (a); \
@ -706,64 +657,6 @@ struct mem_stats {
_ha_aligned_alloc_safe(__a, __s); \
})
#undef ha_aligned_zalloc_safe
#define ha_aligned_zalloc_safe(a,s) ({ \
size_t __a = (a); \
size_t __s = (s); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_CALLOC, \
.func = __func__, \
}, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
_ha_aligned_zalloc_safe(__a, __s); \
})
// Since the type is known, the .extra field will contain its name
#undef ha_aligned_alloc_typed
#define ha_aligned_alloc_typed(cnt,type) ({ \
size_t __a = __alignof__(type); \
size_t __s = ((size_t)cnt) * sizeof(type); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_MALLOC, \
.func = __func__, \
}, \
.extra = #type, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
(type*)_ha_aligned_alloc(__a, __s); \
})
// Since the type is known, the .extra field will contain its name
#undef ha_aligned_zalloc_typed
#define ha_aligned_zalloc_typed(cnt,type) ({ \
size_t __a = __alignof__(type); \
size_t __s = ((size_t)cnt) * sizeof(type); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_CALLOC, \
.func = __func__, \
}, \
.extra = #type, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
(type*)_ha_aligned_zalloc_safe(__a, __s); \
})
#undef ha_aligned_free
#define ha_aligned_free(x) ({ \
typeof(x) __x = (x); \
@ -810,11 +703,7 @@ struct mem_stats {
#define will_free(x, y) do { } while (0)
#define ha_aligned_alloc(a,s) _ha_aligned_alloc(a, s)
#define ha_aligned_zalloc(a,s) _ha_aligned_zalloc(a, s)
#define ha_aligned_alloc_safe(a,s) _ha_aligned_alloc_safe(a, s)
#define ha_aligned_zalloc_safe(a,s) _ha_aligned_zalloc_safe(a, s)
#define ha_aligned_alloc_typed(cnt,type) ((type*)_ha_aligned_alloc(__alignof__(type), ((size_t)cnt) * sizeof(type)))
#define ha_aligned_zalloc_typed(cnt,type) ((type*)_ha_aligned_zalloc(__alignof__(type), ((size_t)cnt) * sizeof(type)))
#define ha_aligned_free(p) _ha_aligned_free(p)
#define ha_aligned_free_size(p,s) _ha_aligned_free(p)

View File

@ -54,8 +54,6 @@ enum cond_predicate {
CFG_PRED_OSSL_VERSION_ATLEAST, // "openssl_version_atleast"
CFG_PRED_OSSL_VERSION_BEFORE, // "openssl_version_before"
CFG_PRED_SSLLIB_NAME_STARTSWITH, // "ssllib_name_startswith"
CFG_PRED_AWSLC_API_ATLEAST, // "awslc_api_atleast"
CFG_PRED_AWSLC_API_BEFORE, // "awslc_api_before"
CFG_PRED_ENABLED, // "enabled"
};

View File

@ -140,7 +140,7 @@ int warnif_misplaced_tcp_req_sess(struct proxy *proxy, const char *file, int lin
int warnif_misplaced_tcp_req_cont(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
int warnif_misplaced_tcp_res_cont(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
int warnif_misplaced_quic_init(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
int warnif_cond_conflicts(const struct acl_cond *cond, unsigned int where, char **err);
int warnif_cond_conflicts(const struct acl_cond *cond, unsigned int where, const char *file, int line);
int warnif_tcp_http_cond(const struct proxy *px, const struct acl_cond *cond);
int too_many_args_idx(int maxarg, int index, char **args, char **msg, int *err_code);
int too_many_args(int maxarg, char **args, char **msg, int *err_code);

View File

@ -204,6 +204,7 @@ struct channel {
unsigned short last_read; /* 16 lower bits of last read date (max pause=65s) */
unsigned char xfer_large; /* number of consecutive large xfers */
unsigned char xfer_small; /* number of consecutive small xfers */
unsigned long long total; /* total data read */
int analyse_exp; /* expiration date for current analysers (if set) */
};

View File

@ -323,6 +323,7 @@ static inline void channel_init(struct channel *chn)
chn->to_forward = 0;
chn->last_read = now_ms;
chn->xfer_small = chn->xfer_large = 0;
chn->total = 0;
chn->analysers = 0;
chn->flags = 0;
chn->output = 0;
@ -376,6 +377,7 @@ static inline void channel_add_input(struct channel *chn, unsigned int len)
c_adv(chn, fwd);
}
/* notify that some data was read */
chn->total += len;
chn->flags |= CF_READ_EVENT;
}

View File

@ -47,7 +47,6 @@
#define APPCTX_CLI_ST1_INTER (1 << 3) /* interactive mode (i.e. don't close after 1st cmd) */
#define APPCTX_CLI_ST1_PROMPT (1 << 4) /* display prompt */
#define APPCTX_CLI_ST1_TIMED (1 << 5) /* display timer in prompt */
#define APPCTX_CLI_ST1_YIELD (1 << 6) /* forced yield between commands */
#define CLI_PREFIX_KW_NB 5
#define CLI_MAX_MATCHES 5

View File

@ -49,8 +49,6 @@ uint clock_report_idle(void);
void clock_leaving_poll(int timeout, int interrupted);
void clock_entering_poll(void);
void clock_adjust_now_offset(void);
void clock_set_now_offset(llong ofs);
llong clock_get_now_offset(void);
static inline void clock_update_date(int max_wait, int interrupted)
{

View File

@ -31,23 +31,6 @@
#include <stdlib.h>
#endif
/* DEFVAL() returns either the second argument as-is, or <def> if absent. This
* is for use in macros arguments.
*/
#define DEFVAL(_def,...) _FIRST_ARG(NULL, ##__VA_ARGS__, (_def))
/* DEFNULL() returns either the argument as-is, or NULL if absent. This is for
* use in macros arguments.
*/
#define DEFNULL(...) DEFVAL(NULL, ##__VA_ARGS__)
/* DEFZERO() returns either the argument as-is, or 0 if absent. This is for
* use in macros arguments.
*/
#define DEFZERO(...) DEFVAL(0, ##__VA_ARGS__)
#define _FIRST_ARG(a, b, ...) b
/*
* Gcc before 3.0 needs [0] to declare a variable-size array
*/
@ -432,13 +415,6 @@
* for multi_threading, see THREAD_PAD() below. *
\*****************************************************************************/
/* Cache line size for alignment purposes. This value is incorrect for some
* Apple CPUs which have 128 bytes cache lines.
*/
#ifndef CACHELINE_SIZE
#define CACHELINE_SIZE 64
#endif
/* sets alignment for current field or variable */
#ifndef ALIGNED
#define ALIGNED(x) __attribute__((aligned(x)))
@ -462,12 +438,12 @@
#endif
#endif
/* Sets alignment for current field or variable only when threads are enabled.
* When no parameters are provided, we align to the cache line size.
/* sets alignment for current field or variable only when threads are enabled.
* Typically used to respect cache line alignment to avoid false sharing.
*/
#ifndef THREAD_ALIGNED
#ifdef USE_THREAD
#define THREAD_ALIGNED(...) ALIGNED(DEFVAL(CACHELINE_SIZE, ##__VA_ARGS__))
#define THREAD_ALIGNED(x) __attribute__((aligned(x)))
#else
#define THREAD_ALIGNED(x)
#endif
@ -500,44 +476,32 @@
#endif
#endif
/* Add an optional alignment for next fields in a structure, only when threads
* are enabled. When no parameters are provided, we align to the cache line size.
/* add an optional alignment for next fields in a structure, only when threads
* are enabled. Typically used to respect cache line alignment to avoid false
* sharing.
*/
#ifndef THREAD_ALIGN
#ifdef USE_THREAD
#define THREAD_ALIGN(...) union { } ALIGNED(DEFVAL(CACHELINE_SIZE, ##__VA_ARGS__))
#define THREAD_ALIGN(x) union { } ALIGNED(x)
#else
#define THREAD_ALIGN(x)
#endif
#endif
/* add padding of the specified size */
#define _PAD(x,l) char __pad_##l[x]
/* add optional padding of the specified size between fields in a structure,
* only when threads are enabled. This is used to avoid false sharing of cache
* lines for dynamically allocated structures which cannot guarantee alignment.
*/
#ifndef THREAD_PAD
# ifdef USE_THREAD
# define _THREAD_PAD(x,l) _PAD(x, l)
# define __THREAD_PAD(x,l) char __pad_##l[x]
# define _THREAD_PAD(x,l) __THREAD_PAD(x, l)
# define THREAD_PAD(x) _THREAD_PAD(x, __LINE__)
# else
# define THREAD_PAD(x)
# endif
#endif
/* add mandatory padding of the specified size between fields in a structure,
* This is used to avoid false sharing of cache lines for dynamically allocated
* structures which cannot guarantee alignment, or to ensure that the size of
* the struct remains consistent on architectures with different alignment
* constraints
*/
#ifndef ALWAYS_PAD
# define _ALWAYS_PAD(x,l) _PAD(x, l)
# define ALWAYS_PAD(x) _ALWAYS_PAD(x, __LINE__)
#endif
/* The THREAD_LOCAL type attribute defines thread-local storage and is defined
* to __thread when threads are enabled or empty when disabled.
*/

View File

@ -28,7 +28,7 @@
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <import/cebtree.h>
#include <import/ebtree-t.h>
#include <import/ist.h>
#include <haproxy/api-t.h>
@ -144,8 +144,9 @@ enum {
*/
CO_FL_WAIT_ROOM = 0x00000800, /* data sink is full */
CO_FL_WANT_SPLICING = 0x00001000, /* we wish to use splicing on the connection when possible */
CO_FL_SSL_NO_CACHED_INFO = 0x00002000, /* Don't use any cached information when creating a new SSL connection */
/* These flags are used to report whether the from/to addresses are set or not */
/* unused: 0x00001000 */
/* unused: 0x00002000 */
CO_FL_EARLY_SSL_HS = 0x00004000, /* We have early data pending, don't start SSL handshake yet */
CO_FL_EARLY_DATA = 0x00008000, /* At least some of the data are early data */
@ -212,13 +213,13 @@ static forceinline char *conn_show_flags(char *buf, size_t len, const char *deli
/* flags */
_(CO_FL_SAFE_LIST, _(CO_FL_IDLE_LIST, _(CO_FL_CTRL_READY,
_(CO_FL_REVERSED, _(CO_FL_ACT_REVERSING, _(CO_FL_OPT_MARK, _(CO_FL_OPT_TOS,
_(CO_FL_XPRT_READY, _(CO_FL_WANT_DRAIN, _(CO_FL_WAIT_ROOM, _(CO_FL_SSL_NO_CACHED_INFO, _(CO_FL_EARLY_SSL_HS,
_(CO_FL_XPRT_READY, _(CO_FL_WANT_DRAIN, _(CO_FL_WAIT_ROOM, _(CO_FL_EARLY_SSL_HS,
_(CO_FL_EARLY_DATA, _(CO_FL_SOCKS4_SEND, _(CO_FL_SOCKS4_RECV, _(CO_FL_SOCK_RD_SH,
_(CO_FL_SOCK_WR_SH, _(CO_FL_ERROR, _(CO_FL_FDLESS, _(CO_FL_WAIT_L4_CONN,
_(CO_FL_WAIT_L6_CONN, _(CO_FL_SEND_PROXY, _(CO_FL_ACCEPT_PROXY, _(CO_FL_ACCEPT_CIP,
_(CO_FL_SSL_WAIT_HS, _(CO_FL_PRIVATE, _(CO_FL_RCVD_PROXY, _(CO_FL_SESS_IDLE,
_(CO_FL_XPRT_TRACKED
)))))))))))))))))))))))))))));
))))))))))))))))))))))))))));
/* epilogue */
_(~0U);
return buf;
@ -329,7 +330,6 @@ enum {
CO_RFL_KEEP_RECV = 0x0008, /* Instruct the mux to still wait for read events */
CO_RFL_BUF_NOT_STUCK = 0x0010, /* Buffer is not stuck. Optims are possible during data copy */
CO_RFL_MAY_SPLICE = 0x0020, /* The producer can use the kernel splicing */
CO_RFL_TRY_HARDER = 0x0040, /* Try to read till READ0 even on short reads */
};
/* flags that can be passed to xprt->snd_buf() and mux->snd_buf() */
@ -433,24 +433,14 @@ union conn_handle {
int fd; /* file descriptor, for regular sockets (CO_FL_FDLESS=0) */
};
enum xprt_capabilities {
XPRT_CAN_SPLICE,
};
enum xprt_splice_cap {
XPRT_CONN_CAN_NOT_SPLICE, /* This connection can't, and won't ever be able to splice */
XPRT_CONN_COULD_SPLICE, /* This connection can't splice, but may later */
XPRT_CONN_CAN_SPLICE /* This connection can splice */
};
/* xprt_ops describes transport-layer operations for a connection. They
* generally run over a socket-based control layer, but not always. Some
* of them are used for data transfer with the upper layer (rcv_*, snd_*)
* and the other ones are used to setup and release the transport layer.
*/
struct xprt_ops {
size_t (*rcv_buf)(struct connection *conn, void *xprt_ctx, struct buffer *buf, size_t count, void *msg_control, size_t *msg_controllen, int flags); /* recv callback */
size_t (*snd_buf)(struct connection *conn, void *xprt_ctx, const struct buffer *buf, size_t count, void *msg_control, size_t msg_controllen, int flags); /* send callback */
size_t (*rcv_buf)(struct connection *conn, void *xprt_ctx, struct buffer *buf, size_t count, int flags); /* recv callback */
size_t (*snd_buf)(struct connection *conn, void *xprt_ctx, const struct buffer *buf, size_t count, int flags); /* send callback */
int (*rcv_pipe)(struct connection *conn, void *xprt_ctx, struct pipe *pipe, unsigned int count); /* recv-to-pipe callback */
int (*snd_pipe)(struct connection *conn, void *xprt_ctx, struct pipe *pipe, unsigned int count); /* send-to-pipe callback */
void (*shutr)(struct connection *conn, void *xprt_ctx, int); /* shutr function */
@ -474,12 +464,6 @@ struct xprt_ops {
struct ssl_sock_ctx *(*get_ssl_sock_ctx)(struct connection *); /* retrieve the ssl_sock_ctx in use, or NULL if none */
int (*show_fd)(struct buffer *, const struct connection *, const void *ctx); /* append some data about xprt for "show fd"; returns non-zero if suspicious */
void (*dump_info)(struct buffer *, const struct connection *);
/*
* Returns the value for various capabilities.
* Returns 0 if the capability is known, with the actual value in arg,
* or -1 otherwise
*/
int (*get_capability)(struct connection *connection, void *xprt_ctx, enum xprt_capabilities, void *arg);
};
/* mux_ops describes the mux operations, which are to be performed at the
@ -567,7 +551,7 @@ enum conn_hash_params_t {
#define CONN_HASH_PARAMS_TYPE_COUNT 7
#define CONN_HASH_PAYLOAD_LEN \
(((sizeof(((struct conn_hash_node *)0)->key)) * 8) - CONN_HASH_PARAMS_TYPE_COUNT)
(((sizeof(((struct conn_hash_node *)0)->node.key)) * 8) - CONN_HASH_PARAMS_TYPE_COUNT)
#define CONN_HASH_GET_PAYLOAD(hash) \
(((hash) << CONN_HASH_PARAMS_TYPE_COUNT) >> CONN_HASH_PARAMS_TYPE_COUNT)
@ -599,14 +583,6 @@ struct conn_tlv_list {
} __attribute__((packed));
/* node for backend connection in the idle trees for http-reuse
* A connection is identified by a hash generated from its specific parameters
*/
struct conn_hash_node {
struct ceb_node node; /* indexes the hashing key for safe/idle/avail */
uint64_t key; /* the hashing key, also used by session-owned */
};
/* This structure describes a connection with its methods and data.
* A connection may be performed to proxy or server via a local or remote
* socket, and can also be made to an internal applet. It can support
@ -631,14 +607,12 @@ struct connection {
/* second cache line */
struct wait_event *subs; /* Task to wake when awaited events are ready */
union {
/* Backend connections only */
struct {
struct mt_list toremove_list; /* list element when idle connection is ready to be purged */
struct list idle_list; /* list element for idle connection in server idle list */
struct list sess_el; /* used by private connections, list elem into session */
};
/* Frontend connections only */
struct list stopping_list; /* attach point in mux stopping list */
struct list idle_list; /* list element for idle connection in server idle list */
struct mt_list toremove_list; /* list element when idle connection is ready to be purged */
};
union {
struct list sess_el; /* used by private backend conns, list elem into session */
struct list stopping_list; /* used by frontend conns, attach point in mux stopping list */
};
union conn_handle handle; /* connection handle at the socket layer */
const struct netns_entry *proxy_netns;
@ -652,7 +626,7 @@ struct connection {
/* used to identify a backend connection for http-reuse,
* thus only present if conn.target is of type OBJ_TYPE_SERVER
*/
struct conn_hash_node hash_node;
struct conn_hash_node *hash_node;
/* Members used if connection must be reversed. */
struct {
@ -660,18 +634,24 @@ struct connection {
struct buffer name; /* Only used for passive reverse. Used as SNI when connection added to server idle pool. */
} reverse;
uint64_t sni_hash; /* Hash of the SNI. Used to cache the TLS session and try to reuse it. set to 0 is there is no SNI */
uint32_t term_evts_log; /* Termination events log: first 4 events reported from fd, handshake or xprt */
uint32_t mark; /* set network mark, if CO_FL_OPT_MARK is set */
uint8_t tos; /* set ip tos, if CO_FL_OPT_TOS is set */
};
/* node for backend connection in the idle trees for http-reuse
* A connection is identified by a hash generated from its specific parameters
*/
struct conn_hash_node {
struct eb64_node node; /* contains the hashing key */
struct connection *conn; /* connection owner of the node */
};
struct mux_proto_list {
const struct ist token; /* token name and length. Empty is catch-all */
enum proto_proxy_mode mode;
enum proto_proxy_side side;
const struct mux_ops *mux;
const char *alpn; /* Default alpn to set by default when the mux protocol is forced (optional, in binary form) */
struct list list;
};
@ -795,7 +775,7 @@ struct idle_conns {
struct mt_list toremove_conns;
struct task *cleanup_task;
__decl_thread(HA_SPINLOCK_T idle_conns_lock);
} THREAD_ALIGNED();
} THREAD_ALIGNED(64);
/* Termination events logs:

View File

@ -39,6 +39,7 @@
#include <haproxy/task-t.h>
extern struct pool_head *pool_head_connection;
extern struct pool_head *pool_head_conn_hash_node;
extern struct pool_head *pool_head_sockaddr;
extern struct pool_head *pool_head_pp_tlv_128;
extern struct pool_head *pool_head_pp_tlv_256;
@ -83,13 +84,14 @@ int conn_install_mux_be(struct connection *conn, void *ctx, struct session *sess
const struct mux_ops *force_mux_ops);
int conn_install_mux_chk(struct connection *conn, void *ctx, struct session *sess);
void conn_delete_from_tree(struct connection *conn, int thr);
void conn_delete_from_tree(struct connection *conn);
void conn_init(struct connection *conn, void *target);
struct connection *conn_new(void *target);
void conn_free(struct connection *conn);
void conn_release(struct connection *conn);
void conn_set_errno(struct connection *conn, int err);
struct conn_hash_node *conn_alloc_hash_node(struct connection *conn);
struct sockaddr_storage *sockaddr_alloc(struct sockaddr_storage **sap, const struct sockaddr_storage *orig, socklen_t len);
void sockaddr_free(struct sockaddr_storage **sap);

View File

@ -33,22 +33,15 @@
struct { \
uint16_t flags; /* COUNTERS_SHARED_F flags */\
};
/* /!\ any change performed here will impact shm-stats-file mapping because the
* struct is embedded in shm_stats_file_object struct, so proceed with caution
* and change shm stats file version if needed. Also please always keep this
* struct 64b-aligned.
*/
#define COUNTERS_SHARED_TG \
struct { \
unsigned long last_state_change; /* last time, when the state was changed */\
long long srv_aborts; /* aborted responses during DATA phase caused by the server */\
long long cli_aborts; /* aborted responses during DATA phase caused by the client */\
long long internal_errors; /* internal processing errors */\
long long failed_rewrites; /* failed rewrites (warning) */\
long long req_in; /* number of bytes received from the client */\
long long req_out; /* number of bytes sent to the server */\
long long res_in; /* number of bytes received from the server */\
long long res_out; /* number of bytes sent to the client */\
long long bytes_out; /* number of bytes transferred from the server to the client */\
long long bytes_in; /* number of bytes transferred from the client to the server */\
long long denied_resp; /* blocked responses because of security concerns */\
long long denied_req; /* blocked requests because of security concerns */\
long long cum_sess; /* cumulated number of accepted connections */\
@ -56,9 +49,7 @@
long long comp_in[2]; /* input bytes fed to the compressor */\
long long comp_out[2]; /* output bytes emitted by the compressor */\
long long comp_byp[2]; /* input bytes that bypassed the compressor (cpu/ram/bw limitation) */\
struct freq_ctr sess_per_sec; /* sessions per second on this server (3x32b) */\
unsigned int last_state_change; /* last time, when the state was changed (32b) */\
/* we're still 64b-aligned here */ \
struct freq_ctr sess_per_sec; /* sessions per second on this server */\
}
// for convenience (generic pointer)
@ -66,14 +57,10 @@ struct counters_shared {
COUNTERS_SHARED;
struct {
COUNTERS_SHARED_TG;
} **tg;
} *tg[MAX_TGROUPS];
};
/*
* /!\ any change performed here will impact shm-stats-file mapping because the
* struct is embedded in shm_stats_file_object struct, so proceed with caution
* and change shm stats file version if needed
*/
/* counters used by listeners and frontends */
struct fe_counters_shared_tg {
COUNTERS_SHARED_TG;
@ -97,14 +84,13 @@ struct fe_counters_shared_tg {
} p; /* protocol-specific stats */
long long failed_req; /* failed requests (eg: invalid or timeout) */
} ALIGNED(8);
};
struct fe_counters_shared {
COUNTERS_SHARED;
struct fe_counters_shared_tg **tg;
struct fe_counters_shared_tg *tg[MAX_TGROUPS];
};
/* counters used by listeners and frontends */
struct fe_counters {
struct fe_counters_shared shared; /* shared counters */
unsigned int conn_max; /* max # of active sessions */
@ -122,11 +108,6 @@ struct fe_counters {
} p; /* protocol-specific stats */
};
/* /!\ any change performed here will impact shm-stats-file mapping because the
* struct is embedded in shm_stats_file_object struct, so proceed with caution
* and change shm stats file version if needed. Pay attention to keeping the
* struct 64b-aligned.
*/
struct be_counters_shared_tg {
COUNTERS_SHARED_TG;
@ -134,6 +115,7 @@ struct be_counters_shared_tg {
long long connect; /* number of connection establishment attempts */
long long reuse; /* number of connection reuses */
unsigned long last_sess; /* last session time */
long long failed_checks, failed_hana; /* failed health checks and health analyses for servers */
long long down_trans; /* up->down transitions */
@ -154,13 +136,11 @@ struct be_counters_shared_tg {
long long retries; /* retried and redispatched connections (BE only) */
long long failed_resp; /* failed responses (BE only) */
long long failed_conns; /* failed connect() attempts (BE only) */
unsigned int last_sess; /* last session time */
/* 32-bit hole here */
} ALIGNED(8);
};
struct be_counters_shared {
COUNTERS_SHARED;
struct be_counters_shared_tg **tg;
struct be_counters_shared_tg *tg[MAX_TGROUPS];
};
/* counters used by servers and backends */

View File

@ -27,8 +27,8 @@
#include <haproxy/counters-t.h>
#include <haproxy/guid-t.h>
int counters_fe_shared_prepare(struct fe_counters_shared *counters, const struct guid_node *guid, char **errmsg);
int counters_be_shared_prepare(struct be_counters_shared *counters, const struct guid_node *guid, char **errmsg);
int counters_fe_shared_prepare(struct fe_counters_shared *counters, const struct guid_node *guid);
int counters_be_shared_prepare(struct be_counters_shared *counters, const struct guid_node *guid);
void counters_fe_shared_drop(struct fe_counters_shared *counters);
void counters_be_shared_drop(struct be_counters_shared *counters);
@ -43,13 +43,11 @@ void counters_be_shared_drop(struct be_counters_shared *counters);
*/
#define COUNTERS_SHARED_LAST_OFFSET(scounters, type, offset) \
({ \
unsigned long last = 0; \
unsigned long last = HA_ATOMIC_LOAD((type *)((char *)scounters[0] + offset));\
unsigned long now_seconds = ns_to_sec(now_ns); \
int it; \
\
if (scounters) \
last = HA_ATOMIC_LOAD((type *)((char *)scounters[0] + offset));\
for (it = 1; (it < global.nbtgroups && scounters); it++) { \
for (it = 1; it < global.nbtgroups; it++) { \
unsigned long cur = HA_ATOMIC_LOAD((type *)((char *)scounters[it] + offset));\
if ((now_seconds - cur) < (now_seconds - last)) \
last = cur; \
@ -76,7 +74,7 @@ void counters_be_shared_drop(struct be_counters_shared *counters);
uint64_t __ret = 0; \
int it; \
\
for (it = 0; (it < global.nbtgroups && scounters); it++) \
for (it = 0; it < global.nbtgroups; it++) \
__ret += rfunc((type *)((char *)scounters[it] + offset)); \
__ret; \
})
@ -96,7 +94,7 @@ void counters_be_shared_drop(struct be_counters_shared *counters);
uint64_t __ret = 0; \
int it; \
\
for (it = 0; (it < global.nbtgroups && scounters); it++) \
for (it = 0; it < global.nbtgroups; it++) \
__ret += rfunc(&scounters[it]->elem, arg1, arg2); \
__ret; \
})

View File

@ -366,13 +366,6 @@
#define STATS_VERSION_STRING " version " HAPROXY_VERSION ", released " HAPROXY_DATE
#endif
/* specifies the default max number of object per thread group that the shm stats file
* will be able to handle
*/
#ifndef SHM_STATS_FILE_MAX_OBJECTS
#define SHM_STATS_FILE_MAX_OBJECTS 2000
#endif
/* This is the default statistics URI */
#ifdef CONFIG_STATS_DEFAULT_URI
#define STATS_DEFAULT_URI CONFIG_STATS_DEFAULT_URI

View File

@ -1,13 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
#ifndef _HAPROXY_ECH_H
# define _HAPROXY_ECH_H
#ifdef USE_ECH
#include <openssl/ech.h>
int load_echkeys(SSL_CTX *ctx, char *dirname, int *loaded);
int conn_get_ech_status(struct connection *conn, struct buffer *buf);
int conn_get_ech_outer_sni(struct connection *conn, struct buffer *buf);
# endif /* USE_ECH */
#endif /* _HAPROXY_ECH_H */

View File

@ -202,7 +202,7 @@ struct fdtab {
#ifdef DEBUG_FD
unsigned int event_count; /* number of events reported */
#endif
} THREAD_ALIGNED();
} THREAD_ALIGNED(64);
/* polled mask, one bit per thread and per direction for each FD */
struct polled_mask {

View File

@ -31,7 +31,7 @@
ullong _freq_ctr_total_from_values(uint period, int pend, uint tick, ullong past, ullong curr);
ullong freq_ctr_total(const struct freq_ctr *ctr, uint period, int pend);
ullong freq_ctr_total_estimate(const struct freq_ctr *ctr, uint period, int pend);
uint freq_ctr_overshoot_period(const struct freq_ctr *ctr, uint period, uint freq);
int freq_ctr_overshoot_period(const struct freq_ctr *ctr, uint period, uint freq);
uint update_freq_ctr_period_slow(struct freq_ctr *ctr, uint period, uint inc);
/* Only usable during single threaded startup phase. */

View File

@ -80,12 +80,11 @@
#define GTUNE_DISABLE_ACTIVE_CLOSE (1<<22)
#define GTUNE_QUICK_EXIT (1<<23)
/* (1<<24) unused */
/* (1<<25) unused */
#define GTUNE_NO_QUIC (1<<25)
#define GTUNE_USE_FAST_FWD (1<<26)
#define GTUNE_LISTENER_MQ_FAIR (1<<27)
#define GTUNE_LISTENER_MQ_OPT (1<<28)
#define GTUNE_LISTENER_MQ_ANY (GTUNE_LISTENER_MQ_FAIR | GTUNE_LISTENER_MQ_OPT)
#define GTUNE_NO_KTLS (1<<29)
/* subsystem-specific debugging options for tune.debug */
#define GDBG_CPU_AFFINITY (1U<< 0)
@ -167,7 +166,6 @@ struct global {
char *server_state_base; /* path to a directory where server state files can be found */
char *server_state_file; /* path to the file where server states are loaded from */
char *stats_file; /* path to stats-file */
char *shm_stats_file; /* path to shm-stats-file */
unsigned char cluster_secret[16]; /* 128 bits of an SHA1 digest of a secret defined as ASCII string */
struct {
int maxpollevents; /* max number of poll events at once */
@ -214,6 +212,20 @@ struct global {
uint max_checks_per_thread; /* if >0, no more than this concurrent checks per thread */
uint ring_queues; /* if >0, #ring queues, otherwise equals #thread groups */
enum threadgroup_takeover tg_takeover; /* Policy for threadgroup takeover */
#ifdef USE_QUIC
unsigned int quic_backend_max_idle_timeout;
unsigned int quic_frontend_max_idle_timeout;
unsigned int quic_frontend_glitches_threshold;
unsigned int quic_frontend_max_data;
unsigned int quic_frontend_max_streams_bidi;
uint64_t quic_frontend_max_tx_mem;
size_t quic_frontend_max_window_size;
unsigned int quic_frontend_stream_data_ratio;
unsigned int quic_retry_threshold;
unsigned int quic_reorder_ratio;
unsigned int quic_max_frame_loss;
unsigned int quic_cubic_loss_tol;
#endif /* USE_QUIC */
} tune;
struct {
char *prefix; /* path prefix of unix bind socket */
@ -233,7 +245,6 @@ struct global {
* than 255 arguments
*/
/* 2-bytes hole */
int est_fd_usage; /* rough estimate of reserved FDs (listeners, pollers etc) */
int cfg_curr_line; /* line number currently being parsed */
const char *cfg_curr_file; /* config file currently being parsed or NULL */
char *cfg_curr_section; /* config section name currently being parsed or NULL */
@ -261,7 +272,6 @@ struct global {
unsigned int req_count; /* request counter (HTTP or TCP session) for logs and unique_id */
int last_checks;
uint32_t anon_key;
int maxthrpertgroup; /* Maximum number of threads per thread group */
/* leave this at the end to make sure we don't share this cache line by accident */
ALWAYS_ALIGN(64);

View File

@ -53,7 +53,6 @@ extern char *progname;
extern char **old_argv;
extern const char *old_unixsocket;
extern int daemon_fd[2];
extern int devnullfd;
struct proxy;
struct server;

View File

@ -1,15 +1,14 @@
#ifndef _HAPROXY_GUID_T_H
#define _HAPROXY_GUID_T_H
#include <import/cebtree.h>
#include <import/ebtree-t.h>
#include <haproxy/obj_type-t.h>
/* Maximum GUID size excluding final '\0' */
#define GUID_MAX_LEN 127
struct guid_node {
struct ceb_node node; /* attach point into GUID global tree */
char *key; /* the key itself */
struct ebpt_node node; /* attach point into GUID global tree */
enum obj_type *obj_type; /* pointer to GUID obj owner */
};

View File

@ -12,16 +12,7 @@ int guid_insert(enum obj_type *obj_type, const char *uid, char **errmsg);
void guid_remove(struct guid_node *guid);
struct guid_node *guid_lookup(const char *uid);
/* Returns the actual text key associated to <guid> node or NULL if not
* set
*/
static inline const char *guid_get(const struct guid_node *guid)
{
return guid->key;
}
int guid_is_valid_fmt(const char *uid, char **errmsg);
char *guid_name(const struct guid_node *guid);
int guid_count(void);
#endif /* _HAPROXY_GUID_H */

View File

@ -255,7 +255,6 @@ struct hlua_patref_iterator_context {
struct hlua_patref *ref;
struct bref bref; /* back-reference from the pat_ref_elt being accessed
* during listing */
struct pat_ref_gen *gen; /* the generation we are iterating over */
};
#else /* USE_LUA */

View File

@ -184,7 +184,6 @@ enum {
PERSIST_TYPE_NONE = 0, /* no persistence */
PERSIST_TYPE_FORCE, /* force-persist */
PERSIST_TYPE_IGNORE, /* ignore-persist */
PERSIST_TYPE_BE_SWITCH, /* force-be-switch */
};
/* final results for http-request rules */

View File

@ -177,7 +177,7 @@ static forceinline char *hsl_show_flags(char *buf, size_t len, const char *delim
#define HTX_FL_PARSING_ERROR 0x00000001 /* Set when a parsing error occurred */
#define HTX_FL_PROCESSING_ERROR 0x00000002 /* Set when a processing error occurred */
#define HTX_FL_FRAGMENTED 0x00000004 /* Set when the HTX buffer is fragmented */
/* 0x00000008 unused */
#define HTX_FL_ALTERED_PAYLOAD 0x00000008 /* The payload is altered, the extra value must not be trusted */
#define HTX_FL_EOM 0x00000010 /* Set when end-of-message is reached from the HTTP point of view
* (at worst, on the EOM block is missing)
*/
@ -265,12 +265,13 @@ struct htx {
uint32_t head_addr; /* start address of the free space at the beginning */
uint32_t end_addr; /* end address of the free space at the beginning */
uint64_t extra; /* known bytes amount remaining to receive */
uint32_t flags; /* HTX_FL_* */
/* XXX 4 bytes unused */
/* Blocks representing the HTTP message itself */
char blocks[VAR_ARRAY] ALIGNED(8);
char blocks[VAR_ARRAY] __attribute__((aligned(8)));
};
#endif /* _HAPROXY_HTX_T_H */

View File

@ -30,6 +30,11 @@
#include <haproxy/http-t.h>
#include <haproxy/htx-t.h>
/* ->extra field value when the payload length is unknown (non-chunked message
* with no "Content-length" header)
*/
#define HTX_UNKOWN_PAYLOAD_LENGTH ULLONG_MAX
extern struct htx htx_empty;
struct htx_blk *htx_defrag(struct htx *htx, struct htx_blk *blk, uint32_t info);
@ -655,6 +660,7 @@ static inline void htx_reset(struct htx *htx)
htx->tail = htx->head = htx->first = -1;
htx->data = 0;
htx->tail_addr = htx->head_addr = htx->end_addr = 0;
htx->extra = 0;
htx->flags = HTX_FL_NONE;
}
@ -694,6 +700,8 @@ static inline struct htx *htxbuf(const struct buffer *buf)
htx->size = buf->size - sizeof(*htx);
htx_reset(htx);
}
if (htx->flags & HTX_FL_ALTERED_PAYLOAD)
htx->extra = 0;
return htx;
}
@ -829,10 +837,10 @@ static inline void htx_dump(struct buffer *chunk, const struct htx *htx, int ful
{
int32_t pos;
chunk_appendf(chunk, " htx=%p(size=%u,data=%u,used=%u,wrap=%s,flags=0x%08x,"
chunk_appendf(chunk, " htx=%p(size=%u,data=%u,used=%u,wrap=%s,flags=0x%08x,extra=%llu,"
"first=%d,head=%d,tail=%d,tail_addr=%d,head_addr=%d,end_addr=%d)",
htx, htx->size, htx->data, htx_nbblks(htx), (!htx->head_addr) ? "NO" : "YES",
htx->flags, htx->first, htx->head, htx->tail,
htx->flags, (unsigned long long)htx->extra, htx->first, htx->head, htx->tail,
htx->tail_addr, htx->head_addr, htx->end_addr);
if (!full || !htx_nbblks(htx))

View File

@ -14,7 +14,6 @@ extern struct list post_server_check_list;
extern struct list per_thread_alloc_list;
extern struct list per_thread_init_list;
extern struct list post_deinit_list;
extern struct list post_deinit_master_list;
extern struct list proxy_deinit_list;
extern struct list server_deinit_list;
extern struct list per_thread_free_list;
@ -25,7 +24,6 @@ void hap_register_post_check(int (*fct)());
void hap_register_post_proxy_check(int (*fct)(struct proxy *));
void hap_register_post_server_check(int (*fct)(struct server *));
void hap_register_post_deinit(void (*fct)());
void hap_register_post_deinit_master(void (*fct)());
void hap_register_proxy_deinit(void (*fct)(struct proxy *));
void hap_register_server_deinit(void (*fct)(struct server *));
@ -65,10 +63,6 @@ void hap_register_unittest(const char *name, int (*fct)(int, char **));
#define REGISTER_POST_DEINIT(fct) \
INITCALL1(STG_REGISTER, hap_register_post_deinit, (fct))
/* simplified way to declare a post-deinit (master process when launched in master/worker mode) callback in a file */
#define REGISTER_POST_DEINIT_MASTER(fct) \
INITCALL1(STG_REGISTER, hap_register_post_deinit_master, (fct))
/* simplified way to declare a proxy-deinit callback in a file */
#define REGISTER_PROXY_DEINIT(fct) \
INITCALL1(STG_REGISTER, hap_register_proxy_deinit, (fct))

View File

@ -67,7 +67,6 @@ enum init_stage {
STG_ALLOC, // allocate required structures
STG_POOL, // create pools
STG_INIT, // subsystems normal initialization
STG_INIT_2, // runs after step_init_2, to have global.nbthread
STG_SIZE // size of the stages array, must be last
};
@ -203,7 +202,6 @@ DECLARE_INIT_SECTION(STG_REGISTER);
DECLARE_INIT_SECTION(STG_ALLOC);
DECLARE_INIT_SECTION(STG_POOL);
DECLARE_INIT_SECTION(STG_INIT);
DECLARE_INIT_SECTION(STG_INIT_2);
// for use in the main haproxy.c file
#define DECLARE_INIT_STAGES asm("")

View File

@ -6,13 +6,13 @@
#include <haproxy/openssl-compat.h>
#include <haproxy/jwt-t.h>
size_t bn2base64url(const BIGNUM *bn, char *dst, size_t dsize);
size_t EVP_PKEY_to_pub_jwk(EVP_PKEY *pkey, char *dst, size_t dsize);
int bn2base64url(const BIGNUM *bn, char *dst, size_t dsize);
int EVP_PKEY_to_pub_jwk(EVP_PKEY *pkey, char *dst, size_t dsize);
enum jwt_alg EVP_PKEY_to_jws_alg(EVP_PKEY *pkey);
size_t jws_b64_payload(char *payload, char *dst, size_t dsize);
size_t jws_b64_protected(enum jwt_alg alg, char *kid, char *jwk, char *nonce, char *url, char *dst, size_t dsize);
size_t jws_b64_signature(EVP_PKEY *pkey, enum jwt_alg alg, char *b64protected, char *b64payload, char *dst, size_t dsize);
size_t jws_flattened(char *protected, char *payload, char *signature, char *dst, size_t dsize);
size_t jws_thumbprint(EVP_PKEY *pkey, char *dst, size_t dsize);
int jws_b64_payload(char *payload, char *dst, size_t dsize);
int jws_b64_protected(enum jwt_alg alg, char *kid, char *jwk, char *nonce, char *url, char *dst, size_t dsize);
int jws_b64_signature(EVP_PKEY *pkey, enum jwt_alg alg, char *b64protected, char *b64payload, char *dst, size_t dsize);
int jws_flattened(char *protected, char *payload, char *signature, char *dst, size_t dsize);
int jws_thumbprint(EVP_PKEY *pkey, char *dst, size_t dsize);
#endif /* ! _HAPROXY_JWK_H_ */

View File

@ -55,7 +55,6 @@ struct jwt_ctx {
struct jwt_item signature;
char *key;
unsigned int key_length;
int is_x509; /* 1 if 'key' field is a certificate, 0 otherwise */
};
enum jwt_elt {
@ -65,8 +64,17 @@ enum jwt_elt {
JWT_ELT_MAX
};
enum jwt_entry_type {
JWT_ENTRY_DFLT,
JWT_ENTRY_STORE,
JWT_ENTRY_PKEY,
JWT_ENTRY_INVALID, /* already tried looking into ckch_store tree (unsuccessful) */
};
struct jwt_cert_tree_entry {
EVP_PKEY *pubkey;
struct ckch_store *ckch_store;
int type; /* jwt_entry_type */
struct ebmb_node node;
char path[VAR_ARRAY];
};
@ -80,8 +88,7 @@ enum jwt_vrfy_status {
JWT_VRFY_INVALID_TOKEN = -3,
JWT_VRFY_OUT_OF_MEMORY = -4,
JWT_VRFY_UNKNOWN_CERT = -5,
JWT_VRFY_INTERNAL_ERR = -6,
JWT_VRFY_UNAVAIL_CERT = -7,
JWT_VRFY_INTERNAL_ERR = -6
};
#endif /* USE_OPENSSL */

View File

@ -28,10 +28,12 @@
#ifdef USE_OPENSSL
enum jwt_alg jwt_parse_alg(const char *alg_str, unsigned int alg_len);
int jwt_tokenize(const struct buffer *jwt, struct jwt_item *items, unsigned int *item_num);
int jwt_tree_load_cert(char *path, int pathlen, int tryload_cert, const char *file, int line, char **err);
int jwt_tree_load_cert(char *path, int pathlen, const char *file, int line, char **err);
enum jwt_vrfy_status jwt_verify(const struct buffer *token, const struct buffer *alg,
const struct buffer *key, int is_x509);
const struct buffer *key);
void jwt_replace_ckch_store(struct ckch_store *old_ckchs, struct ckch_store *new_ckchs);
#endif /* USE_OPENSSL */

View File

@ -140,7 +140,6 @@ struct ssl_bind_conf {
unsigned int verify:3; /* verify method (set of SSL_VERIFY_* flags) */
unsigned int no_ca_names:1;/* do not send ca names to clients (ca_file related) */
unsigned int early_data:1; /* early data allowed */
unsigned int ktls:1; /* use kTLS if available */
char *ca_file; /* CAfile to use on verify and ca-names */
char *ca_verify_file; /* CAverify file to use on verify only */
char *crl_file; /* CRLfile to use on verify */
@ -152,9 +151,6 @@ struct ssl_bind_conf {
char *client_sigalgs; /* Client Signature algorithms */
struct tls_version_filter ssl_methods_cfg; /* original ssl methods found in configuration */
struct tls_version_filter ssl_methods; /* actual ssl methods used at runtime */
#ifdef USE_ECH
char *ech_filedir; /* ECH config, file/directory name */
#endif
#endif
};
@ -186,7 +182,7 @@ struct bind_conf {
#endif
#ifdef USE_QUIC
struct quic_transport_params quic_params; /* QUIC transport parameters. */
const struct quic_cc_algo *quic_cc_algo; /* QUIC control congestion algorithm */
struct quic_cc_algo *quic_cc_algo; /* QUIC control congestion algorithm */
size_t max_cwnd; /* QUIC maximumu congestion control window size (kB) */
enum quic_sock_mode quic_mode; /* QUIC socket allocation strategy */
#endif
@ -198,13 +194,11 @@ struct bind_conf {
int maxseg; /* for TCP, advertised MSS */
int tcp_ut; /* for TCP, user timeout */
char *tcp_md5sig; /* TCP MD5 signature password (RFC2385) */
char *cc_algo; /* TCP congestion control algorithm ("cc" parameter) */
int idle_ping; /* MUX idle-ping interval in ms */
int maxaccept; /* if set, max number of connections accepted at once (-1 when disabled) */
unsigned int backlog; /* if set, listen backlog */
int maxconn; /* maximum connections allowed on this listener */
int (*accept)(struct connection *conn); /* upper layer's accept() */
int tcp_ss; /* for TCP, Save SYN */
int level; /* stats access level (ACCESS_LVL_*) */
int severity_output; /* default severity output format in cli feedback messages */
short int nice; /* nice value to assign to the instantiated tasks */
@ -243,7 +237,7 @@ struct listener {
enum obj_type obj_type; /* object type = OBJ_TYPE_LISTENER */
enum li_state state; /* state: NEW, INIT, ASSIGNED, LISTEN, READY, FULL */
uint16_t flags; /* listener flags: LI_F_* */
int luid; /* listener universally unique ID, used for SNMP, indexed by <luid_node> below */
int luid; /* listener universally unique ID, used for SNMP */
int nbconn; /* current number of connections on this listener */
unsigned long thr_idx; /* thread indexes for queue distribution (see listener_accept()) */
__decl_thread(HA_RWLOCK_T lock);
@ -258,7 +252,10 @@ struct listener {
struct list by_bind; /* chaining in bind_conf's list of listeners */
struct bind_conf *bind_conf; /* "bind" line settings, include SSL settings among other things */
struct receiver rx; /* network receiver parts */
struct ceb_node luid_node; /* place in the tree of used IDs, indexes <luid> above */
struct {
struct eb32_node id; /* place in the tree of used IDs */
} conf; /* config information */
struct guid_node guid; /* GUID global tree node */
struct li_per_thread *per_thr; /* per-thread fields (one per thread in the group) */
@ -310,7 +307,7 @@ struct bind_kw_list {
struct accept_queue_ring {
uint32_t idx; /* (head << 16) | tail */
struct tasklet *tasklet; /* tasklet of the thread owning this ring */
struct connection *entry[ACCEPT_QUEUE_SIZE] THREAD_ALIGNED();
struct connection *entry[ACCEPT_QUEUE_SIZE] __attribute((aligned(64)));
};

View File

@ -25,11 +25,8 @@
#include <stdlib.h>
#include <string.h>
#include <import/ceb32_tree.h>
#include <haproxy/api.h>
#include <haproxy/listener-t.h>
#include <haproxy/proxy-t.h>
struct proxy;
struct task;
@ -85,12 +82,6 @@ int relax_listener(struct listener *l, int lpx, int lli);
*/
void stop_listener(struct listener *l, int lpx, int lpr, int lli);
/* This function returns the first unused listener ID greater than or equal to
* <from> in the proxy <px>. Zero is returned if no spare one is found (should
* never happen).
*/
uint listener_get_next_id(const struct proxy *px, uint from);
/* This function adds the specified listener's file descriptor to the polling
* lists if it is in the LI_LISTEN state. The listener enters LI_READY or
* LI_FULL state depending on its number of connections. In daemon mode, we
@ -231,7 +222,7 @@ const char *listener_state_str(const struct listener *l);
struct task *accept_queue_process(struct task *t, void *context, unsigned int state);
struct task *manage_global_listener_queue(struct task *t, void *context, unsigned int state);
extern struct accept_queue_ring accept_queue_rings[MAX_THREADS] THREAD_ALIGNED();
extern struct accept_queue_ring accept_queue_rings[MAX_THREADS] __attribute__((aligned(64)));
extern const char* li_status_st[LI_STATE_COUNT];
enum li_status get_li_status(struct listener *l);
@ -239,12 +230,6 @@ enum li_status get_li_status(struct listener *l);
/* number of times an accepted connection resulted in maxconn being reached */
extern ullong maxconn_reached;
/* index listener <li>'s id into proxy <px>'s used_listener_id */
static inline void listener_index_id(struct proxy *px, struct listener *li)
{
ceb32_item_insert(&px->conf.used_listener_id, luid_node, luid, li);
}
static inline uint accept_queue_ring_len(const struct accept_queue_ring *ring)
{
uint idx, head, tail, len;
@ -258,12 +243,6 @@ static inline uint accept_queue_ring_len(const struct accept_queue_ring *ring)
return len;
}
/* Returns a pointer to the first bind_conf matching either name <name>, or
* filename:linenum in <name> if <name> begins with a '@'. NULL is returned if
* no match is found.
*/
struct bind_conf *bind_conf_find_by_name(struct proxy *front, const char *name);
#endif /* _HAPROXY_LISTENER_H */
/*

View File

@ -335,13 +335,6 @@ struct log_profile {
struct eb_root extra; // extra log profile steps (if any)
};
/* add additional bitmasks in this struct if needed but don't
* forget to update px_parse_log_steps() and log_orig_proxy() accordingly
*/
struct log_steps {
uint64_t steps_1; // first 64 steps
};
#endif /* _HAPROXY_LOG_T_H */
/*

View File

@ -62,7 +62,6 @@
#define H2_CF_RCVD_SHUT 0x00020000 // a recv() attempt already failed on a shutdown
#define H2_CF_END_REACHED 0x00040000 // pending data too short with RCVD_SHUT present
#define H2_CF_SETTINGS_NEEDED 0x00080000 // can't proceed without knowing settings (frontend or extensions)
#define H2_CF_RCVD_RFC8441 0x00100000 // settings from RFC8441 has been received indicating support for Extended CONNECT
#define H2_CF_SHTS_UPDATED 0x00200000 // SETTINGS_HEADER_TABLE_SIZE updated
#define H2_CF_DTSU_EMITTED 0x00400000 // HPACK Dynamic Table Size Update opcode emitted

View File

@ -41,7 +41,6 @@ struct qcc {
struct connection *conn;
uint64_t nb_sc; /* number of attached stream connectors */
uint64_t nb_hreq; /* number of in-progress http requests */
uint64_t tot_sc; /* total number of stream connectors seen since conn init */
uint32_t flags; /* QC_CF_* */
enum qcc_app_st app_st; /* application layer state */
int glitches; /* total number of glitches on this connection */

View File

@ -1,32 +0,0 @@
#ifndef _HAPROXY_NCBMBUF_T_H
#define _HAPROXY_NCBMBUF_T_H
#include <haproxy/ncbuf_common-t.h>
/* Non-contiguous bitmap buffer
*
* This module is an alternative implementation to ncbuf type. Its main
* difference is that filled blocks and gaps are encoded via a bitmap.
*
* The main advantage of the bitmap is that contrary to ncbuf type there is no
* limitation on the minimal size of gaps. Thus, operation such as add and
* advance are guaranteed to succeed.
*
* Storage is reserved for the bitmap at the end of the buffer area,
* representing roughly 1/9 of the total space. Thus, usable buffer storage is
* smaller than the default ncbuf type.
*/
#define NCBMBUF_NULL ((struct ncbmbuf){ })
struct ncbmbuf {
char *area; /* allocated area used for both data and bitmap storage */
unsigned char *bitmap; /* bitmap storage located at the end of allocated area */
ncb_sz_t size; /* size usable for data storage */
ncb_sz_t size_bm; /* size of bitmap storage */
ncb_sz_t head;
};
#endif /* _HAPROXY_NCBMBUF_T_H */

View File

@ -1,51 +0,0 @@
#ifndef _HAPROXY_NCBMBUF_H
#define _HAPROXY_NCBMBUF_H
#include <haproxy/ncbmbuf-t.h>
static inline int ncbmb_is_null(const struct ncbmbuf *buf)
{
return buf->size == 0;
}
void ncbmb_init(struct ncbmbuf *buf, ncb_sz_t head);
struct ncbmbuf ncbmb_make(char *area, ncb_sz_t size, ncb_sz_t head);
/* Returns start of allocated buffer area. */
static inline char *ncbmb_orig(const struct ncbmbuf *buf)
{
return buf->area;
}
/* Returns current head pointer into buffer area. */
static inline char *ncbmb_head(const struct ncbmbuf *buf)
{
return buf->area + buf->head;
}
/* Returns the first byte after the allocated buffer area. */
static inline char *ncbmb_wrap(const struct ncbmbuf *buf)
{
return buf->area + buf->size;
}
/* Returns the usable size of <buf> for data storage. This is the size of the
* allocated buffer without the bitmap space.
*/
static inline ncb_sz_t ncbmb_size(const struct ncbmbuf *buf)
{
if (ncbmb_is_null(buf))
return 0;
return buf->size;
}
int ncbmb_is_empty(const struct ncbmbuf *buf);
ncb_sz_t ncbmb_data(const struct ncbmbuf *buf, ncb_sz_t offset);
enum ncb_ret ncbmb_add(struct ncbmbuf *buf, ncb_sz_t off,
const char *data, ncb_sz_t len, enum ncb_add_mode mode);
enum ncb_ret ncbmb_advance(struct ncbmbuf *buf, ncb_sz_t adv);
#endif /* _HAPROXY_NCBMBUF_H */

View File

@ -1,8 +1,6 @@
#ifndef _HAPROXY_NCBUF_T_H
#define _HAPROXY_NCBUF_T_H
#include <haproxy/ncbuf_common-t.h>
/* **** public documentation ****
*
* <ncbuf> stands for non-contiguous circular buffer. This type can be used to
@ -64,6 +62,15 @@
*
*/
#include <inttypes.h>
/* ncb_sz_t is the basic type used in ncbuf to represent data and gap sizes.
* Use a bigger type to extend the maximum data size supported in the buffer.
* On the other hand, this also increases the minimal gap size which can
* cause more rejection for add/delete operations.
*/
typedef uint32_t ncb_sz_t;
/* reserved size before head used to store first data block size */
#define NCB_RESERVED_SZ (sizeof(ncb_sz_t))
@ -80,4 +87,18 @@ struct ncbuf {
ncb_sz_t head;
};
enum ncb_ret {
NCB_RET_OK = 0, /* no error */
NCB_RET_GAP_SIZE, /* operation would create a too small gap */
NCB_RET_DATA_REJ, /* operation would overwrite data with different one */
};
/* Define how insert is conducted in regards with already stored data. */
enum ncb_add_mode {
NCB_ADD_PRESERVE, /* keep the already stored data and only insert in gaps */
NCB_ADD_OVERWRT, /* overwrite old data with new ones */
NCB_ADD_COMPARE, /* compare before insert : if new data are different do not proceed */
};
#endif /* _HAPROXY_NCBUF_T_H */

View File

@ -1,27 +0,0 @@
#ifndef _HAPROXY_NCBUF_COMMON_T_H
#define _HAPROXY_NCBUF_COMMON_T_H
#include <inttypes.h>
/* ncb_sz_t is the basic type used in ncbuf to represent data and gap sizes.
* Use a bigger type to extend the maximum data size supported in the buffer.
* On the other hand, this also increases the minimal gap size which can
* cause more rejection for add/delete operations.
*/
typedef uint32_t ncb_sz_t;
enum ncb_ret {
NCB_RET_OK = 0, /* no error */
NCB_RET_GAP_SIZE, /* operation would create a too small gap */
NCB_RET_DATA_REJ, /* operation would overwrite data with different one */
};
/* Define how insert is conducted in regards with already stored data. */
enum ncb_add_mode {
NCB_ADD_PRESERVE, /* keep the already stored data and only insert in gaps */
NCB_ADD_OVERWRT, /* overwrite old data with new ones */
NCB_ADD_COMPARE, /* compare before insert : if new data are different do not proceed */
};
#endif /* _HAPROXY_NCBUF_COMMON_T_H */

View File

@ -77,8 +77,7 @@ enum ssl_encryption_level_t {
#if defined(OPENSSL_IS_AWSLC)
#define OPENSSL_NO_DH
#define SSL_CTX_set1_sigalgs_list SSL_CTX_set1_sigalgs_list
#define SSL_set_quic_early_data_enabled SSL_set_early_data_enabled
#define SSL_CTX_set1_sigalgs_list SSL_CTX_set1_sigalgs_list
#endif
@ -129,11 +128,6 @@ enum ssl_encryption_level_t {
#define HAVE_CRYPTO_memcmp
#endif
#if !defined(USE_OPENSSL_WOLFSSL) && !defined(OPENSSL_IS_AWSLC) && !defined(OPENSSL_IS_BORINGSSL) && !defined(LIBRESSL_VERSION_NUMBER)
/* Defined if our SSL lib is really OpenSSL */
#define HAVE_VANILLA_OPENSSL
#endif
#if (defined(SN_ct_cert_scts) && !defined(OPENSSL_NO_TLSEXT))
#define HAVE_SSL_SCTL
#endif
@ -554,36 +548,5 @@ static inline unsigned long ERR_peek_error_func(const char **func)
*(cb) = (void (*) (void))ctx->tlsext_status_cb
#endif
#ifdef USE_KTLS
#ifdef __linux__
#include <linux/tls.h>
#endif
#if defined(HAVE_VANILLA_OPENSSL) && (OPENSSL_VERSION_NUMBER >= 0x3000000fL)
#define HA_USE_KTLS
/*
* Only provided by internal/bio.h, but we need it
*/
#ifndef BIO_CTRL_SET_KTLS
#define BIO_CTRL_SET_KTLS 72
#endif
#ifndef BIO_CTRL_SET_KTLS_TX_SEND_CTRL_MSG
#define BIO_CTRL_SET_KTLS_TX_SEND_CTRL_MSG 74
#endif
#ifndef BIO_CTRL_CLEAR_KTLS_TX_CTRL_MSG
#define BIO_CTRL_CLEAR_KTLS_TX_CTRL_MSG 75
#endif
#endif /* HAVE_VANILLA_OPENSSL && OPENSSL_VERSION_NUMBER >= 0x3000000fL */
#if defined(OPENSSL_IS_BORINGSSL) || defined(OPENSSL_IS_AWSLC)
#include <openssl/hkdf.h>
#define HA_USE_KTLS
#endif /* OPENSSL_IS_BORINGSSL || OPENSSL_IS_AWSLC */
#endif /* USE_KTLS */
#endif /* USE_OPENSSL */
#endif /* _HAPROXY_OPENSSL_COMPAT_H */

View File

@ -22,7 +22,6 @@
#ifndef _HAPROXY_PATTERN_T_H
#define _HAPROXY_PATTERN_T_H
#include <import/cebtree.h>
#include <import/ebtree-t.h>
#include <haproxy/api-t.h>
@ -107,34 +106,20 @@ struct pat_ref {
struct list list; /* Used to chain refs. */
char *reference; /* The reference name. */
char *display; /* String displayed to identify the pattern origin. */
struct ceb_root *gen_root; /* The tree mapping generation IDs to pattern reference elements */
struct list head; /* The head of the list of struct pat_ref_elt. */
struct eb_root ebmb_root; /* The tree where pattern reference elements are attached. */
struct list pat; /* The head of the list of struct pattern_expr. */
unsigned int flags; /* flags PAT_REF_*. */
unsigned int curr_gen; /* current generation number (anything below can be removed) */
unsigned int next_gen; /* next generation number (insertions use this one) */
/* We keep a cached pointer to the current generation for performance. */
struct {
struct pat_ref_gen *data;
unsigned int id;
} cached_gen;
int unique_id; /* Each pattern reference have unique id. */
unsigned long long revision; /* updated for each update */
unsigned long long entry_cnt; /* the total number of entries */
THREAD_ALIGN();
THREAD_ALIGN(64);
__decl_thread(HA_RWLOCK_T lock); /* Lock used to protect pat ref elements */
event_hdl_sub_list e_subs; /* event_hdl: pat_ref's subscribers list (atomically updated) */
};
/* This struct represents all the elements in a pattern reference generation. The tree
* is used most of the time, but we also maintain a list for when order matters.
*/
struct pat_ref_gen {
struct list head; /* The head of the list of struct pat_ref_elt. */
struct ceb_root *elt_root; /* The tree where pattern reference elements are attached. */
struct ceb_node gen_node; /* Linkage for the gen_root cebtree in struct pat_ref */
unsigned int gen_id;
};
/* This is a part of struct pat_ref. Each entry contains one pattern and one
* associated value as original string. All derivative forms (via exprs) are
* accessed from list_head or tree_head. Be careful, it's variable-sized!
@ -147,7 +132,7 @@ struct pat_ref_elt {
char *sample;
unsigned int gen_id; /* generation of pat_ref this was made for */
int line;
struct ceb_node node; /* Node to attach this element to its <pat_ref_gen> cebtree. */
struct ebmb_node node; /* Node to attach this element to its <pat_ref> ebtree. */
const char pattern[0]; // const only to make sure nobody tries to free it.
};

View File

@ -189,10 +189,8 @@ struct pat_ref *pat_ref_new(const char *reference, const char *display, unsigned
struct pat_ref *pat_ref_newid(int unique_id, const char *display, unsigned int flags);
struct pat_ref_elt *pat_ref_find_elt(struct pat_ref *ref, const char *key);
struct pat_ref_elt *pat_ref_gen_find_elt(struct pat_ref *ref, unsigned int gen_id, const char *key);
struct pat_ref_elt *pat_ref_append(struct pat_ref *ref, unsigned int gen, const char *pattern, const char *sample, int line);
struct pat_ref_elt *pat_ref_append(struct pat_ref *ref, const char *pattern, const char *sample, int line);
struct pat_ref_elt *pat_ref_load(struct pat_ref *ref, unsigned int gen, const char *pattern, const char *sample, int line, char **err);
struct pat_ref_gen *pat_ref_gen_new(struct pat_ref *ref, unsigned int gen_id);
struct pat_ref_gen *pat_ref_gen_get(struct pat_ref *ref, unsigned int gen_id);
int pat_ref_push(struct pat_ref_elt *elt, struct pattern_expr *expr, int patflags, char **err);
int pat_ref_add(struct pat_ref *ref, const char *pattern, const char *sample, char **err);
int pat_ref_set(struct pat_ref *ref, const char *pattern, const char *sample, char **err);

View File

@ -86,7 +86,7 @@ static inline void *pool_alloc_area_uaf(size_t size, size_t align)
*/
static inline void pool_free_area_uaf(void *area, size_t size)
{
size_t pad = (uintptr_t)area & 4095;
size_t pad = (4096 - size) & 0xFF0;
/* This object will be released for real in order to detect a use after
* free. We also force a write to the area to ensure we crash on double
@ -97,8 +97,7 @@ static inline void pool_free_area_uaf(void *area, size_t size)
if (pad >= sizeof(void *) && *(void **)(area - sizeof(void *)) != area)
ABORT_NOW();
/* better know immediately if an address calculation was wrong! */
BUG_ON(munmap(area - pad, (size + 4095) & -4096) == -1);
munmap(area - pad, (size + 4095) & -4096);
}
#endif /* _HAPROXY_POOL_OS_H */

View File

@ -63,7 +63,7 @@ struct pool_cache_head {
unsigned int tid; /* thread id, for debugging only */
struct pool_head *pool; /* assigned pool, for debugging only */
ulong fill_pattern; /* pattern used to fill the area on free */
} THREAD_ALIGNED();
} THREAD_ALIGNED(64);
/* This describes a pool registration, which is what was passed to
* create_pool() and that might have been merged with an existing pool.
@ -75,7 +75,6 @@ struct pool_registration {
unsigned int line; /* line in the file where the pool is declared, 0 if none */
unsigned int size; /* expected object size */
unsigned int flags; /* MEM_F_* */
unsigned int type_align; /* type-imposed alignment; 0=unspecified */
unsigned int align; /* expected alignment; 0=unspecified */
};
@ -139,7 +138,7 @@ struct pool_head {
struct list regs; /* registrations: alt names for this pool */
/* heavily read-write part */
THREAD_ALIGN();
THREAD_ALIGN(64);
/* these entries depend on the pointer value, they're used to reduce
* the contention on fast-changing values. The alignment here is
@ -148,7 +147,7 @@ struct pool_head {
* just meant to shard elements and there are no per-free_list stats.
*/
struct {
THREAD_ALIGN();
THREAD_ALIGN(64);
struct pool_item *free_list; /* list of free shared objects */
unsigned int allocated; /* how many chunks have been allocated */
unsigned int used; /* how many chunks are currently in use */
@ -156,8 +155,8 @@ struct pool_head {
unsigned int failed; /* failed allocations (indexed by hash of TID) */
} buckets[CONFIG_HAP_POOL_BUCKETS];
struct pool_cache_head cache[MAX_THREADS] THREAD_ALIGNED(); /* pool caches */
} THREAD_ALIGNED();
struct pool_cache_head cache[MAX_THREADS] THREAD_ALIGNED(64); /* pool caches */
} __attribute__((aligned(64)));
#endif /* _HAPROXY_POOL_T_H */

View File

@ -33,14 +33,13 @@
/* This creates a pool_reg registers a call to create_pool_callback(ptr) with it.
* Do not use this one, use REGISTER_POOL() instead.
*/
#define __REGISTER_POOL(_line, _ptr, _name, _size, _type_align, _align) \
#define __REGISTER_POOL(_line, _ptr, _name, _size, _align) \
static struct pool_registration __pool_reg_##_line = { \
.name = _name, \
.file = __FILE__, \
.line = __LINE__, \
.size = _size, \
.flags = MEM_F_STATREG, \
.type_align = _type_align, \
.align = _align, \
}; \
INITCALL3(STG_POOL, create_pool_callback, (_ptr), (_name), &__pool_reg_##_line);
@ -48,62 +47,54 @@
/* intermediary level for line number resolution, do not use this one, use
* REGISTER_POOL() instead.
*/
#define _REGISTER_POOL(line, ptr, name, size, align, type_align) \
__REGISTER_POOL(line, ptr, name, size, align, type_align)
#define _REGISTER_POOL(line, ptr, name, size, align) \
__REGISTER_POOL(line, ptr, name, size, align)
/* This registers a call to create_pool_callback(ptr) with these args */
#define REGISTER_POOL(ptr, name, size) \
_REGISTER_POOL(__LINE__, ptr, name, size, 0, 0)
_REGISTER_POOL(__LINE__, ptr, name, size, 0)
/* This macro declares a pool head <ptr> and registers its creation */
#define DECLARE_POOL(ptr, name, size) \
struct pool_head *(ptr) __read_mostly = NULL; \
_REGISTER_POOL(__LINE__, &ptr, name, size, 0, 0)
_REGISTER_POOL(__LINE__, &ptr, name, size, 0)
/* This macro declares a static pool head <ptr> and registers its creation */
#define DECLARE_STATIC_POOL(ptr, name, size) \
static struct pool_head *(ptr) __read_mostly; \
_REGISTER_POOL(__LINE__, &ptr, name, size, 0, 0)
_REGISTER_POOL(__LINE__, &ptr, name, size, 0)
/*** below are the aligned pool macros, taking one extra arg for alignment ***/
/* This registers a call to create_pool_callback(ptr) with these args */
#define REGISTER_ALIGNED_POOL(ptr, name, size, align) \
_REGISTER_POOL(__LINE__, ptr, name, size, 0, align)
_REGISTER_POOL(__LINE__, ptr, name, size, align)
/* This macro declares an aligned pool head <ptr> and registers its creation */
#define DECLARE_ALIGNED_POOL(ptr, name, size, align) \
struct pool_head *(ptr) __read_mostly = NULL; \
_REGISTER_POOL(__LINE__, &ptr, name, size, 0, align)
_REGISTER_POOL(__LINE__, &ptr, name, size, align)
/* This macro declares a static aligned pool head <ptr> and registers its creation */
#define DECLARE_STATIC_ALIGNED_POOL(ptr, name, size, align) \
static struct pool_head *(ptr) __read_mostly; \
_REGISTER_POOL(__LINE__, &ptr, name, size, 0, align)
_REGISTER_POOL(__LINE__, &ptr, name, size, align)
/*** below are the typed pool macros, taking a type and an extra size ***/
/* This is only used by REGISTER_TYPED_POOL below */
#define _REGISTER_TYPED_POOL(ptr, name, type, extra, align, ...) \
_REGISTER_POOL(__LINE__, ptr, name, sizeof(type) + extra, __alignof__(type), align)
/* This registers a call to create_pool_callback(ptr) with these args.
* It supports two optional args:
* - extra: the extra size to be allocated at the end of the type. Def: 0.
* - align: the desired alignment on the type. Def: 0 = same as type.
*/
#define REGISTER_TYPED_POOL(ptr, name, type, args...) \
_REGISTER_TYPED_POOL(ptr, name, type, ##args, 0, 0)
/* This registers a call to create_pool_callback(ptr) with these args */
#define REGISTER_TYPED_POOL(ptr, name, type, extra) \
_REGISTER_POOL(__LINE__, ptr, name, sizeof(type) + extra, __alignof__(type))
/* This macro declares an aligned pool head <ptr> and registers its creation */
#define DECLARE_TYPED_POOL(ptr, name, type, args...) \
#define DECLARE_TYPED_POOL(ptr, name, type, extra) \
struct pool_head *(ptr) __read_mostly = NULL; \
_REGISTER_TYPED_POOL(&ptr, name, type, ##args, 0, 0)
_REGISTER_POOL(__LINE__, &ptr, name, sizeof(type) + extra, __alignof__(type))
/* This macro declares a static aligned pool head <ptr> and registers its creation */
#define DECLARE_STATIC_TYPED_POOL(ptr, name, type, args...) \
#define DECLARE_STATIC_TYPED_POOL(ptr, name, type, extra) \
static struct pool_head *(ptr) __read_mostly; \
_REGISTER_TYPED_POOL(&ptr, name, type, ##args, 0, 0)
_REGISTER_POOL(__LINE__, &ptr, name, sizeof(type) + extra, __alignof__(type))
/* By default, free objects are linked by a pointer stored at the beginning of
* the memory area. When DEBUG_MEMORY_POOLS is set, the allocated area is
@ -177,6 +168,7 @@ void *pool_get_from_os_noinc(struct pool_head *pool);
void pool_put_to_os_nodec(struct pool_head *pool, void *ptr);
void *pool_alloc_nocache(struct pool_head *pool, const void *caller);
void pool_free_nocache(struct pool_head *pool, void *ptr);
void dump_pools(void);
int pool_parse_debugging(const char *str, char **err);
int pool_total_failures(void);
unsigned long long pool_total_allocated(void);

View File

@ -160,7 +160,6 @@ struct protocol {
/* default I/O handler */
void (*default_iocb)(int fd); /* generic I/O handler (typically accept callback) */
int (*get_info)(struct connection *conn, long long int *info, int info_num); /* Callback to get connection level statistical counters */
int (*get_opt)(const struct connection *conn, int level, int optname, void *buf, int size); /* getsockopt(level:optname) into buf:size */
uint flags; /* flags describing protocol support (PROTO_F_*) */
uint nb_receivers; /* number of receivers (under proto_lock) */

View File

@ -247,7 +247,6 @@ enum PR_SRV_STATE_FILE {
#define PR_FL_IMPLICIT_REF 0x10 /* The default proxy is implicitly referenced by another proxy */
#define PR_FL_PAUSED 0x20 /* The proxy was paused at run time (reversible) */
#define PR_FL_CHECKED 0x40 /* The proxy configuration was fully checked (including postparsing checks) */
#define PR_FL_BE_UNPUBLISHED 0x80 /* The proxy cannot be targetted by content switching rules */
struct stream;
@ -305,14 +304,13 @@ struct error_snapshot {
struct proxy_per_tgroup {
struct queue queue;
struct lbprm_per_tgrp lbprm;
} THREAD_ALIGNED();
} THREAD_ALIGNED(64);
struct proxy {
enum obj_type obj_type; /* object type == OBJ_TYPE_PROXY */
char flags; /* bit field PR_FL_* */
enum pr_mode mode; /* mode = PR_MODE_TCP, PR_MODE_HTTP, ... */
char cap; /* supported capabilities (PR_CAP_*) */
int to_log; /* things to be logged (LW_*), special value LW_LOGSTEPS == follow log-steps */
unsigned long last_change; /* internal use only: last time the proxy state was changed */
struct list global_list; /* list member for global proxy list */
@ -357,8 +355,7 @@ struct proxy {
struct server *srv, *defsrv; /* known servers; default server configuration */
struct lbprm lbprm; /* load-balancing parameters */
int srv_act, srv_bck; /* # of servers eligible for LB (UP|!checked) AND (enabled+weight!=0) */
int load_server_state_from_file; /* location of the file containing server state.
* flag PR_SRV_STATE_FILE_* */
int served; /* # of active sessions currently being served */
int cookie_len; /* strlen(cookie_name), computed only once */
struct server *ready_srv; /* a server being ready to serve requests */
char *cookie_domain; /* domain used to insert the cookie */
@ -381,7 +378,6 @@ struct proxy {
int srvtcpka_cnt; /* The maximum number of keepalive probes TCP should send before dropping the connection. (server side) */
int srvtcpka_idle; /* The time (in seconds) the connection needs to remain idle before TCP starts sending keepalive probes. (server side) */
int srvtcpka_intvl; /* The time (in seconds) between individual keepalive probes. (server side) */
unsigned int tot_fe_maxconn; /* #maxconn of frontends linked to that backend, it is used to compute fullconn */
struct ist monitor_uri; /* a special URI to which we respond with HTTP/200 OK */
struct list mon_fail_cond; /* list of conditions to fail monitoring requests (chained) */
struct { /* WARNING! check proxy_reset_timeouts() in proxy.h !!! */
@ -400,11 +396,14 @@ struct proxy {
} timeout;
__decl_thread(HA_RWLOCK_T lock); /* may be taken under the server's lock */
char *id; /* proxy id (name), indexed by <conf.name_node> below */
char *desc; /* proxy description */
char *id, *desc; /* proxy id (name) and description */
struct proxy_per_tgroup *per_tgrp; /* array of per-tgroup stuff such as queues */
unsigned int queueslength; /* Sum of the length of each queue */
int totpend; /* total number of pending connections on this instance (for stats) */
unsigned int feconn, beconn; /* # of active frontend and backends streams */
unsigned int fe_sps_lim; /* limit on new sessions per second on the frontend */
unsigned int fullconn; /* #conns on backend above which servers are used at full load */
unsigned int tot_fe_maxconn; /* #maxconn of frontends linked to that backend, it is used to compute fullconn */
struct ist server_id_hdr_name; /* the header to use to send the server id (name) */
int conn_retries; /* maximum number of connect retries */
unsigned int retry_type; /* Type of retry allowed */
@ -423,6 +422,7 @@ struct proxy {
struct buffer log_tag; /* override default syslog tag */
struct ist header_unique_id; /* unique-id header */
struct lf_expr format_unique_id; /* unique-id format */
int to_log; /* things to be logged (LW_*), special value LW_LOGSTEPS == follow log-steps */
int nb_req_cap, nb_rsp_cap; /* # of headers to be captured */
struct cap_hdr *req_cap; /* chained list of request headers to be captured */
struct cap_hdr *rsp_cap; /* chained list of response headers to be captured */
@ -440,7 +440,7 @@ struct proxy {
char *check_path; /* PATH environment to use for external agent checks */
struct http_reply *replies[HTTP_ERR_SIZE]; /* HTTP replies for known errors */
unsigned int log_count; /* number of logs produced by the frontend */
int uuid; /* universally unique proxy ID, used for SNMP, indexed by conf.uuid_node below */
int uuid; /* universally unique proxy ID, used for SNMP */
unsigned int backlog; /* force the frontend's listen backlog */
unsigned int li_all; /* total number of listeners attached to this proxy */
unsigned int li_paused; /* total number of listeners paused (LI_PAUSED) */
@ -459,24 +459,24 @@ struct proxy {
struct {
const char *file; /* file where the section appears */
struct ceb_node uuid_node; /* place in the tree of used IDs, indexes <uuid> above */
struct eb32_node id; /* place in the tree of used IDs */
int line; /* line where the section appears */
struct ceb_root *used_listener_id; /* list of listener IDs in use */
struct ceb_root *used_server_id; /* list of server IDs in use */
struct ceb_root *used_server_name; /* list of server names in use */
struct eb_root used_listener_id;/* list of listener IDs in use */
struct eb_root used_server_id; /* list of server IDs in use */
struct eb_root used_server_name; /* list of server names in use */
struct list bind; /* list of bind settings */
struct list listeners; /* list of listeners belonging to this frontend */
struct list errors; /* list of all custom error files */
struct arg_list args; /* sample arg list that need to be resolved */
struct ceb_node name_node; /* proxies are stored sorted by name here; indexes <id> below */
struct ebpt_node by_name; /* proxies are stored sorted by name here */
struct list lf_checks; /* list of logformats found in the proxy section that needs to be checked during postparse */
struct log_steps log_steps; /* bitfield of log origins where log should be generated during request handling */
struct eb_root log_steps; /* tree of log origins where log should be generated during request handling */
const char *file_prev; /* file of the previous instance found with the same name, or NULL */
int line_prev; /* line of the previous instance found with the same name, or 0 */
unsigned int refcount; /* refcount on this proxy (only used for default proxy for now) */
} conf; /* config information */
struct http_ext *http_ext; /* http ext options */
struct ceb_root *used_server_addr; /* list of server addresses in use */
struct eb_root used_server_addr; /* list of server addresses in use */
void *parent; /* parent of the proxy when applicable */
struct comp *comp; /* http compression */
@ -494,6 +494,8 @@ struct proxy {
struct email_alertq *queues; /* per-mailer alerts queues */
} email_alert;
int load_server_state_from_file; /* location of the file containing server state.
* flag PR_SRV_STATE_FILE_* */
char *server_state_file_name; /* used when load_server_state_from_file is set to
* PR_SRV_STATE_FILE_LOCAL. Give a specific file name for
* this backend. If not specified or void, then the backend
@ -505,12 +507,6 @@ struct proxy {
EXTRA_COUNTERS(extra_counters_fe);
EXTRA_COUNTERS(extra_counters_be);
THREAD_ALIGN();
unsigned int queueslength; /* Sum of the length of each queue */
int served; /* # of active sessions currently being served */
int totpend; /* total number of pending connections on this instance (for stats) */
unsigned int feconn, beconn; /* # of active frontend and backends streams */
};
struct switching_rule {

View File

@ -22,8 +22,6 @@
#ifndef _HAPROXY_PROXY_H
#define _HAPROXY_PROXY_H
#include <import/ceb32_tree.h>
#include <haproxy/api.h>
#include <haproxy/applet-t.h>
#include <haproxy/freq_ctr.h>
@ -36,9 +34,9 @@
extern struct proxy *proxies_list;
extern struct list proxies;
extern struct ceb_root *used_proxy_id; /* list of proxy IDs in use */
extern struct eb_root used_proxy_id; /* list of proxy IDs in use */
extern unsigned int error_snapshot_id; /* global ID assigned to each error then incremented */
extern struct ceb_root *proxy_by_name; /* tree of proxies sorted by name */
extern struct eb_root proxy_by_name; /* tree of proxies sorted by name */
extern const struct cfg_opt cfg_opts[];
extern const struct cfg_opt cfg_opts2[];
@ -59,7 +57,6 @@ void free_proxy(struct proxy *p);
const char *proxy_cap_str(int cap);
const char *proxy_mode_str(int mode);
const char *proxy_find_best_option(const char *word, const char **extra);
uint proxy_get_next_id(uint from);
void proxy_store_name(struct proxy *px);
struct proxy *proxy_find_by_id(int id, int cap, int table);
struct proxy *proxy_find_by_name(const char *name, int cap, int table);
@ -122,12 +119,6 @@ static inline struct proxy *proxy_be_by_name(const char *name)
return proxy_find_by_name(name, PR_CAP_BE, 0);
}
/* index proxy <px>'s id into used_proxy_id */
static inline void proxy_index_id(struct proxy *px)
{
ceb32_item_insert(&used_proxy_id, conf.uuid_node, uuid, px);
}
/* this function initializes all timeouts for proxy p */
static inline void proxy_reset_timeouts(struct proxy *proxy)
{
@ -141,37 +132,13 @@ static inline void proxy_reset_timeouts(struct proxy *proxy)
proxy->timeout.tunnel = TICK_ETERNITY;
}
/* Return proxy's abortonclose status: 0=off, non-zero=on, with a default to
* <def> when neither choice was forced.
*/
static inline int proxy_abrt_close_def(const struct proxy *px, int def)
{
if (px->options & PR_O_ABRT_CLOSE)
return 1;
else if (px->no_options & PR_O_ABRT_CLOSE)
return 0;
/* When unset: 1 for HTTP, 0 for TCP */
return def;
}
/* return proxy's abortonclose status: 0=off, non-zero=on.
* Considers the proxy's mode when neither on/off was set,
* and HTTP mode defaults to on.
*/
static inline int proxy_abrt_close(const struct proxy *px)
{
return proxy_abrt_close_def(px, px->mode == PR_MODE_HTTP);
}
/* increase the number of cumulated connections received on the designated frontend */
static inline void proxy_inc_fe_conn_ctr(struct listener *l, struct proxy *fe)
{
if (fe->fe_counters.shared.tg) {
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_conn);
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->conn_per_sec, 1);
}
if (l && l->counters && l->counters->shared.tg)
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_conn);
if (l && l->counters)
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_conn);
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->conn_per_sec, 1);
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.cps_max,
update_freq_ctr(&fe->fe_counters._conn_per_sec, 1));
}
@ -179,12 +146,11 @@ static inline void proxy_inc_fe_conn_ctr(struct listener *l, struct proxy *fe)
/* increase the number of cumulated connections accepted by the designated frontend */
static inline void proxy_inc_fe_sess_ctr(struct listener *l, struct proxy *fe)
{
if (fe->fe_counters.shared.tg) {
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_sess);
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->sess_per_sec, 1);
}
if (l && l->counters && l->counters->shared.tg)
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_sess);
if (l && l->counters)
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_sess);
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->sess_per_sec, 1);
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.sps_max,
update_freq_ctr(&fe->fe_counters._sess_per_sec, 1));
}
@ -199,19 +165,16 @@ static inline void proxy_inc_fe_cum_sess_ver_ctr(struct listener *l, struct prox
http_ver > sizeof(fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver) / sizeof(*fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver))
return;
if (fe->fe_counters.shared.tg)
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
if (l && l->counters && l->counters->shared.tg && l->counters->shared.tg[tgid - 1])
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
if (l && l->counters)
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
}
/* increase the number of cumulated streams on the designated backend */
static inline void proxy_inc_be_ctr(struct proxy *be)
{
if (be->be_counters.shared.tg) {
_HA_ATOMIC_INC(&be->be_counters.shared.tg[tgid - 1]->cum_sess);
update_freq_ctr(&be->be_counters.shared.tg[tgid - 1]->sess_per_sec, 1);
}
_HA_ATOMIC_INC(&be->be_counters.shared.tg[tgid - 1]->cum_sess);
update_freq_ctr(&be->be_counters.shared.tg[tgid - 1]->sess_per_sec, 1);
HA_ATOMIC_UPDATE_MAX(&be->be_counters.sps_max,
update_freq_ctr(&be->be_counters._sess_per_sec, 1));
}
@ -226,12 +189,10 @@ static inline void proxy_inc_fe_req_ctr(struct listener *l, struct proxy *fe,
if (http_ver >= sizeof(fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req) / sizeof(*fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req))
return;
if (fe->fe_counters.shared.tg) {
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req[http_ver]);
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->req_per_sec, 1);
}
if (l && l->counters && l->counters->shared.tg)
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req[http_ver]);
if (l && l->counters)
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->p.http.cum_req[http_ver]);
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->req_per_sec, 1);
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.p.http.rps_max,
update_freq_ctr(&fe->fe_counters.p.http._req_per_sec, 1));
}

View File

@ -35,13 +35,13 @@
#define QUIC_CC_INFINITE_SSTHESH ((uint32_t)-1)
extern const struct quic_cc_algo quic_cc_algo_nr;
extern const struct quic_cc_algo quic_cc_algo_cubic;
extern const struct quic_cc_algo quic_cc_algo_bbr;
extern const struct quic_cc_algo *default_quic_cc_algo;
extern struct quic_cc_algo quic_cc_algo_nr;
extern struct quic_cc_algo quic_cc_algo_cubic;
extern struct quic_cc_algo quic_cc_algo_bbr;
extern struct quic_cc_algo *default_quic_cc_algo;
/* Fake algorithm with its fixed window */
extern const struct quic_cc_algo quic_cc_algo_nocc;
extern struct quic_cc_algo quic_cc_algo_nocc;
extern unsigned long long last_ts;
@ -90,7 +90,7 @@ enum quic_cc_algo_type {
struct quic_cc {
/* <conn> is there only for debugging purpose. */
struct quic_conn *qc;
const struct quic_cc_algo *algo;
struct quic_cc_algo *algo;
uint32_t priv[144];
};

Some files were not shown because too many files have changed in this diff Show More