Compare commits

..

No commits in common. "master" and "v3.4-dev3" have entirely different histories.

372 changed files with 7972 additions and 23302 deletions

View File

@ -6,42 +6,27 @@ runs:
steps:
- name: Setup coredumps
if: ${{ runner.os == 'Linux' }}
shell: sh
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
shell: bash
run: |
sudo mkdir -p /tmp/core
sudo sysctl fs.suid_dumpable=1
sudo sysctl kernel.core_pattern=/tmp/core/core.%h.%e.%t
sudo sysctl -w fs.suid_dumpable=1
sudo sysctl kernel.core_pattern=/tmp/core.%h.%e.%t
- name: Setup ulimit for core dumps
shell: sh
shell: bash
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
ulimit -c unlimited
- name: Get VTest latest commit SHA
id: vtest-sha
shell: sh
run: |
echo "sha=$(git ls-remote https://code.vinyl-cache.org/vtest/VTest2 HEAD | cut -f1)" >> $GITHUB_OUTPUT
- name: Cache VTest
id: cache-vtest
uses: actions/cache@v5
with:
path: ${{ github.workspace }}/vtest
key: vtest-${{ runner.os }}-${{ runner.arch }}-${{ steps.vtest-sha.outputs.sha }}
- name: Install VTest
if: ${{ steps.cache-vtest.outputs.cache-hit != 'true' }}
shell: sh
shell: bash
run: |
DESTDIR=${{ github.workspace }}/vtest scripts/build-vtest.sh
scripts/build-vtest.sh
- name: Install problem matcher for VTest
shell: sh
shell: bash
# This allows one to more easily see which tests fail.
run: echo "::add-matcher::.github/vtest.json"

122
.github/matrix.py vendored
View File

@ -12,7 +12,6 @@ import functools
import json
import re
import sys
import urllib.error
import urllib.request
from os import environ
from packaging import version
@ -20,10 +19,9 @@ from packaging import version
#
# this CI is used for both development and stable branches of HAProxy
#
# naming convention used, if branch/tag name matches:
# naming convention used, if branch name matches:
#
# "haproxy-" - stable branches
# "vX.Y.Z" - release tags
# otherwise - development branch (i.e. "latest" ssl variants, "latest" github images)
#
@ -34,24 +32,13 @@ def get_all_github_tags(url):
headers = {}
if environ.get("GITHUB_TOKEN") is not None:
headers["Authorization"] = "token {}".format(environ.get("GITHUB_TOKEN"))
all_tags = []
page = 1
sep = "&" if "?" in url else "?"
while True:
paginated_url = "{}{}per_page=100&page={}".format(url, sep, page)
request = urllib.request.Request(paginated_url, headers=headers)
try:
response = urllib.request.urlopen(request)
except urllib.error.URLError:
return all_tags if all_tags else None
tags = json.loads(response.read().decode("utf-8"))
if not tags:
break
all_tags.extend([tag['name'] for tag in tags])
if len(tags) < 100:
break
page += 1
return all_tags if all_tags else None
request = urllib.request.Request(url, headers=headers)
try:
tags = urllib.request.urlopen(request)
except:
return None
tags = json.loads(tags.read().decode("utf-8"))
return [tag['name'] for tag in tags]
@functools.lru_cache(5)
def determine_latest_openssl(ssl):
@ -69,7 +56,7 @@ def aws_lc_version_string_to_num(version_string):
return tuple(map(int, version_string[1:].split('.')))
def aws_lc_version_valid(version_string):
return re.match(r'^v[0-9]+(\.[0-9]+)*$', version_string)
return re.match('^v[0-9]+(\.[0-9]+)*$', version_string)
@functools.lru_cache(5)
def determine_latest_aws_lc(ssl):
@ -77,8 +64,6 @@ def determine_latest_aws_lc(ssl):
if not tags:
return "AWS_LC_VERSION=failed_to_detect"
valid_tags = list(filter(aws_lc_version_valid, tags))
if not valid_tags:
return "AWS_LC_VERSION=failed_to_detect"
latest_tag = max(valid_tags, key=aws_lc_version_string_to_num)
return "AWS_LC_VERSION={}".format(latest_tag[1:])
@ -86,16 +71,15 @@ def aws_lc_fips_version_string_to_num(version_string):
return tuple(map(int, version_string[12:].split('.')))
def aws_lc_fips_version_valid(version_string):
return re.match(r'^AWS-LC-FIPS-[0-9]+(\.[0-9]+)*$', version_string)
return re.match('^AWS-LC-FIPS-[0-9]+(\.[0-9]+)*$', version_string)
@functools.lru_cache(5)
def determine_latest_aws_lc_fips(ssl):
tags = get_all_github_tags("https://api.github.com/repos/aws/aws-lc/tags")
# the AWS-LC-FIPS tags are at the end of the list, so let's get a lot
tags = get_all_github_tags("https://api.github.com/repos/aws/aws-lc/tags?per_page=200")
if not tags:
return "AWS_LC_FIPS_VERSION=failed_to_detect"
valid_tags = list(filter(aws_lc_fips_version_valid, tags))
if not valid_tags:
return "AWS_LC_FIPS_VERSION=failed_to_detect"
latest_tag = max(valid_tags, key=aws_lc_fips_version_string_to_num)
return "AWS_LC_FIPS_VERSION={}".format(latest_tag[12:])
@ -103,7 +87,7 @@ def wolfssl_version_string_to_num(version_string):
return tuple(map(int, version_string[1:].removesuffix('-stable').split('.')))
def wolfssl_version_valid(version_string):
return re.match(r'^v[0-9]+(\.[0-9]+)*-stable$', version_string)
return re.match('^v[0-9]+(\.[0-9]+)*-stable$', version_string)
@functools.lru_cache(5)
def determine_latest_wolfssl(ssl):
@ -136,20 +120,16 @@ def clean_compression(compression):
def main(ref_name):
print("Generating matrix for branch '{}'.".format(ref_name))
is_stable = "haproxy-" in ref_name or re.match(r'^v\d+\.\d+\.\d+$', ref_name)
matrix = []
# Ubuntu
if is_stable:
if "haproxy-" in ref_name:
os = "ubuntu-24.04" # stable branch
os_arm = "ubuntu-24.04-arm" # stable branch
os_i686 = "ubuntu-24.04" # stable branch
else:
os = "ubuntu-24.04" # development branch
os_arm = "ubuntu-24.04-arm" # development branch
os_i686 = "ubuntu-24.04" # development branch
TARGET = "linux-glibc"
for CC in ["gcc", "clang"]:
@ -207,7 +187,6 @@ def main(ref_name):
'OPT_CFLAGS="-O1"',
"USE_ZLIB=1",
"USE_OT=1",
"DEBUG=-DDEBUG_STRICT=2",
"OT_INC=${HOME}/opt-ot/include",
"OT_LIB=${HOME}/opt-ot/lib",
"OT_RUNPATH=1",
@ -249,7 +228,7 @@ def main(ref_name):
# "BORINGSSL=yes",
]
if not is_stable: # development branch
if "haproxy-" not in ref_name: # development branch
ssl_versions = ssl_versions + [
"OPENSSL_VERSION=latest",
"LIBRESSL_VERSION=latest",
@ -296,63 +275,24 @@ def main(ref_name):
}
)
# macOS on dev branches
if not is_stable:
# macOS
if "haproxy-" in ref_name:
os = "macos-13" # stable branch
else:
os = "macos-26" # development branch
TARGET = "osx"
for CC in ["clang"]:
matrix.append(
{
"name": "{}, {}, no features".format(os, CC),
"os": os,
"TARGET": TARGET,
"CC": CC,
"FLAGS": [],
}
)
# Alpine / musl
matrix.append(
{
"name": "Alpine+musl, gcc",
"os": "ubuntu-latest",
"container": {
"image": "alpine:latest",
"options": "--privileged --ulimit core=-1 --security-opt seccomp=unconfined",
"volumes": ["/tmp/core:/tmp/core"],
},
"TARGET": "linux-musl",
"CC": "gcc",
"FLAGS": [
"ARCH_FLAGS='-ggdb3'",
"USE_LUA=1",
"LUA_INC=/usr/include/lua5.3",
"LUA_LIB=/usr/lib/lua5.3",
"USE_OPENSSL=1",
"USE_PCRE2=1",
"USE_PCRE2_JIT=1",
"USE_PROMEX=1",
],
}
)
# i686
matrix.append(
{
"name": "{}, i686-linux-gnu-gcc".format(os_i686),
"os": os_i686,
"TARGET": "linux-glibc",
"CC": "i686-linux-gnu-gcc",
"FLAGS": [
"USE_OPENSSL=1",
"USE_PCRE2=1",
"USE_PCRE2_JIT=1",
],
}
)
TARGET = "osx"
for CC in ["clang"]:
matrix.append(
{
"name": "{}, {}, no features".format(os, CC),
"os": os,
"TARGET": TARGET,
"CC": CC,
"FLAGS": [],
}
)
# Print matrix

12
.github/workflows/aws-lc-fips.yml vendored Normal file
View File

@ -0,0 +1,12 @@
name: AWS-LC-FIPS
on:
schedule:
- cron: "0 0 * * 4"
workflow_dispatch:
jobs:
test:
uses: ./.github/workflows/aws-lc-template.yml
with:
command: "from matrix import determine_latest_aws_lc_fips; print(determine_latest_aws_lc_fips(''))"

94
.github/workflows/aws-lc-template.yml vendored Normal file
View File

@ -0,0 +1,94 @@
name: AWS-LC template
on:
workflow_call:
inputs:
command:
required: true
type: string
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- name: Determine latest AWS-LC release
id: get_aws_lc_release
run: |
result=$(cd .github && python3 -c "${{ inputs.command }}")
echo $result
echo "result=$result" >> $GITHUB_OUTPUT
- name: Cache AWS-LC
id: cache_aws_lc
uses: actions/cache@v4
with:
path: '~/opt/'
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb jose
- name: Install AWS-LC
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-vtest
- name: Run VTest for HAProxy
id: vtest
run: |
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi
- name: Show Unit-Tests results
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
run: |
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
printf "::group::"
cat $result
echo "::endgroup::"
done
exit 1

View File

@ -5,95 +5,8 @@ on:
- cron: "0 0 * * 4"
workflow_dispatch:
permissions:
contents: read
jobs:
Test:
name: ${{ matrix.name }}
runs-on: ubuntu-latest
strategy:
matrix:
include:
- name: AWS-LC
command: "from matrix import determine_latest_aws_lc; print(determine_latest_aws_lc(''))"
- name: AWS-LC (FIPS)
command: "from matrix import determine_latest_aws_lc_fips; print(determine_latest_aws_lc_fips(''))"
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v6
- name: Determine latest AWS-LC release
id: get_aws_lc_release
run: |
result=$(cd .github && python3 -c "${{ matrix.command }}")
echo $result
echo "result=$result" >> $GITHUB_OUTPUT
- name: Cache AWS-LC
id: cache_aws_lc
uses: actions/cache@v5
with:
path: '~/opt/'
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb jose
- name: Install AWS-LC
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -vq)" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-vtest
- name: Run VTest for HAProxy
id: vtest
run: |
make reg-tests VTEST_PROGRAM=${{ github.workspace }}/vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi
- name: Show Unit-Tests results
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
run: |
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
printf "::group::"
cat $result
echo "::endgroup::"
done
exit 1
test:
uses: ./.github/workflows/aws-lc-template.yml
with:
command: "from matrix import determine_latest_aws_lc; print(determine_latest_aws_lc(''))"

View File

@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- uses: codespell-project/codespell-problem-matcher@v1.2.0
- uses: codespell-project/actions-codespell@master
with:

View File

@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- name: Install h2spec
id: install-h2spec
run: |
@ -45,7 +45,7 @@ jobs:
fi
echo "::endgroup::"
haproxy -vv
echo "version=$(haproxy -vq)" >> $GITHUB_OUTPUT
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Launch HAProxy ${{ steps.show-version.outputs.version }}
run: haproxy -f .github/h2spec.config -D
- name: Run h2spec ${{ steps.install-h2spec.outputs.version }}

View File

@ -7,30 +7,19 @@ permissions:
contents: read
jobs:
compile:
name: ${{ matrix.name }}
runs-on: ubuntu-slim
strategy:
matrix:
include:
- name: admin/halog/
targets:
- admin/halog/halog
- name: dev/flags/
targets:
- dev/flags/flags
- name: dev/haring/
targets:
- dev/haring/haring
- name: dev/hpack/
targets:
- dev/hpack/decode
- dev/hpack/gen-enc
- dev/hpack/gen-rht
- name: dev/poll/
targets:
- dev/poll/poll
fail-fast: false
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- run: make ${{ join(matrix.targets, ' ') }}
- uses: actions/checkout@v5
- name: Compile admin/halog/halog
run: |
make admin/halog/halog
- name: Compile dev/flags/flags
run: |
make dev/flags/flags
- name: Compile dev/poll/poll
run: |
make dev/poll/poll
- name: Compile dev/hpack
run: |
make dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht

View File

@ -17,7 +17,7 @@ jobs:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none

View File

@ -5,7 +5,7 @@ name: Cross Compile
on:
schedule:
- cron: "0 2 * * 1"
- cron: "0 0 21 * *"
workflow_dispatch:
permissions:
@ -17,10 +17,6 @@ jobs:
matrix:
platform: [
{
arch: i686-linux-gnu,
libs: libc6-dev-i386-cross,
target: linux-x86
}, {
arch: aarch64-linux-gnu,
libs: libc6-dev-arm64-cross,
target: linux-aarch64
@ -103,12 +99,12 @@ jobs:
sudo apt-get -yq --force-yes install \
gcc-${{ matrix.platform.arch }} \
${{ matrix.platform.libs }}
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- name: install quictls
run: |
QUICTLS_EXTRA_ARGS="--cross-compile-prefix=${{ matrix.platform.arch }}- ${{ matrix.platform.target }}" QUICTLS_VERSION=openssl-3.1.7+quic scripts/build-ssl.sh
QUICTLS_EXTRA_ARGS="--cross-compile-prefix=${{ matrix.platform.arch }}- ${{ matrix.platform.target }}" QUICTLS_VERSION=OpenSSL_1_1_1w-quic1 scripts/build-ssl.sh
- name: Build
run: |

View File

@ -24,17 +24,22 @@ jobs:
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
container:
image: fedora:rawhide
options: --privileged
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- name: Install dependencies
run: |
dnf -y install awk diffutils git zlib-devel pcre2-devel 'perl(FindBin)' perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils systemd-devel clang openssl-devel.x86_64 procps-ng
dnf -y install 'perl(FindBin)' 'perl(File::Compare)' perl-IPC-Cmd 'perl(File::Copy)' glibc-devel.i686 lua-devel.i686 lua-devel.x86_64 systemd-devel.i686 zlib-ng-compat-devel.i686 libatomic.i686 openssl-devel.i686 pcre2-devel.i686
dnf -y install awk diffutils git pcre-devel zlib-devel pcre2-devel 'perl(FindBin)' perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils systemd-devel clang openssl-devel.x86_64
dnf -y install 'perl(FindBin)' 'perl(File::Compare)' perl-IPC-Cmd 'perl(File::Copy)' glibc-devel.i686 lua-devel.i686 lua-devel.x86_64 systemd-devel.i686 zlib-ng-compat-devel.i686 pcre-devel.i686 libatomic.i686 openssl-devel.i686
- uses: ./.github/actions/setup-vtest
- name: Build contrib tools
run: |
make admin/halog/halog
make dev/flags/flags
make dev/poll/poll
make dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
- name: Compile HAProxy with ${{ matrix.platform.cc }}
run: |
make -j3 CC=${{ matrix.platform.cc }} V=1 ERR=1 TARGET=linux-glibc DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" USE_PROMEX=1 USE_OPENSSL=1 USE_QUIC=1 USE_ZLIB=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_LUA=1 ADDLIB="${{ matrix.platform.ADDLIB_ATOMIC }}" ARCH_FLAGS="${{ matrix.platform.ARCH_FLAGS }}"
make -j3 CC=${{ matrix.platform.cc }} V=1 ERR=1 TARGET=linux-glibc DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" USE_PROMEX=1 USE_OPENSSL=1 USE_QUIC=1 USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 ADDLIB="${{ matrix.platform.ADDLIB_ATOMIC }}" ARCH_FLAGS="${{ matrix.platform.ARCH_FLAGS }}"
make install
- name: Show HAProxy version
id: show-version
@ -43,7 +48,7 @@ jobs:
ldd $(command -v haproxy)
echo "::endgroup::"
haproxy -vv
echo "version=$(haproxy -vq)" >> $GITHUB_OUTPUT
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
#
# TODO: review this workaround later
- name: relax crypto policies
@ -54,7 +59,7 @@ jobs:
- name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
id: vtest
run: |
make reg-tests VTEST_PROGRAM=${{ github.workspace }}/vtest/vtest REGTESTS_TYPES=default,bug,devel
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |

View File

@ -5,16 +5,15 @@ on:
- cron: "0 0 25 * *"
workflow_dispatch:
permissions:
contents: read
jobs:
gcc:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
permissions:
contents: read
steps:
- name: "Checkout repository"
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: "Build on VM"
uses: vmactions/solaris-vm@v1

76
.github/workflows/musl.yml vendored Normal file
View File

@ -0,0 +1,76 @@
name: alpine/musl
on:
push:
permissions:
contents: read
jobs:
musl:
name: gcc
runs-on: ubuntu-latest
container:
image: alpine:latest
options: --privileged --ulimit core=-1 --security-opt seccomp=unconfined
volumes:
- /tmp/core:/tmp/core
steps:
- name: Setup coredumps
run: |
ulimit -c unlimited
echo '/tmp/core/core.%h.%e.%t' > /proc/sys/kernel/core_pattern
- uses: actions/checkout@v5
- name: Install dependencies
run: apk add gcc gdb make tar git python3 libc-dev linux-headers pcre-dev pcre2-dev openssl-dev lua5.3-dev grep socat curl musl-dbg lua5.3-dbg jose
- name: Install VTest
run: scripts/build-vtest.sh
- name: Build
run: make -j$(nproc) TARGET=linux-musl DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" ARCH_FLAGS='-ggdb3' CC=cc V=1 USE_LUA=1 LUA_INC=/usr/include/lua5.3 LUA_LIB=/usr/lib/lua5.3 USE_OPENSSL=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_PROMEX=1
- name: Show version
run: ./haproxy -vv
- name: Show linked libraries
run: ldd haproxy
- name: Install problem matcher for VTest
# This allows one to more easily see which tests fail.
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest
id: vtest
run: make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
ls /tmp/core/
for file in /tmp/core/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi
- name: Show results
if: ${{ failure() }}
run: |
for folder in /tmp/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
- name: Show Unit-Tests results
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
run: |
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
printf "::group::"
cat $result
echo "::endgroup::"
done
exit 1

View File

@ -5,16 +5,15 @@ on:
- cron: "0 0 25 * *"
workflow_dispatch:
permissions:
contents: read
jobs:
gcc:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
permissions:
contents: read
steps:
- name: "Checkout repository"
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: "Build on VM"
uses: vmactions/netbsd-vm@v1

View File

@ -13,13 +13,15 @@ jobs:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- name: Install VTest
run: |
scripts/build-vtest.sh
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb
sudo apt-get --no-install-recommends -y install libpsl-dev
- uses: ./.github/actions/setup-vtest
- name: Install OpenSSL+ECH
run: env OPENSSL_VERSION="git-feature/ech" GIT_TYPE="branch" scripts/build-ssl.sh
- name: Install curl+ECH
@ -38,7 +40,7 @@ jobs:
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -vq)" >> $GITHUB_OUTPUT
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Install problem matcher for VTest
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy
@ -49,7 +51,7 @@ jobs:
ulimit -n 65536
# allow to catch coredumps
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=${{ github.workspace }}/vtest/vtest REGTESTS_TYPES=default,bug,devel
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |

View File

@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
@ -35,7 +35,7 @@ jobs:
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -vq)" >> $GITHUB_OUTPUT
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Install problem matcher for VTest
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy
@ -46,7 +46,7 @@ jobs:
ulimit -n 65536
# allow to catch coredumps
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=${{ github.workspace }}/vtest/vtest REGTESTS_TYPES=default,bug,devel
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |

View File

@ -9,58 +9,96 @@ on:
schedule:
- cron: "0 0 * * 2"
permissions:
contents: read
jobs:
combined-build-and-run:
build:
runs-on: ubuntu-24.04
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
permissions:
contents: read
packages: write
steps:
- uses: actions/checkout@v5
- name: Log in to the Container registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Docker image
id: push
uses: docker/build-push-action@v5
with:
context: https://github.com/haproxytech/haproxy-qns.git
push: true
build-args: |
SSLLIB=AWS-LC
tags: ghcr.io/${{ github.repository }}:aws-lc
- name: Cleanup registry
uses: actions/delete-package-versions@v5
with:
owner: ${{ github.repository_owner }}
package-name: 'haproxy'
package-type: container
min-versions-to-keep: 1
delete-only-untagged-versions: 'true'
run:
needs: build
strategy:
matrix:
suite: [
{ client: chrome, tests: "http3" },
{ client: picoquic, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" },
{ client: quic-go, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" },
{ client: ngtcp2, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" }
]
fail-fast: false
name: ${{ matrix.suite.client }}
runs-on: ubuntu-24.04
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- name: Update Docker to the latest
uses: docker/setup-docker-action@v4
- name: Build Docker image
id: push
uses: docker/build-push-action@v6
- name: Log in to the Container registry
uses: docker/login-action@v3
with:
context: https://github.com/haproxytech/haproxy-qns.git
platforms: linux/amd64
build-args: |
SSLLIB=AWS-LC
tags: local:aws-lc
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Install tshark
run: |
sudo apt-get update
sudo apt-get -y install tshark
- name: Pull image
run: |
docker pull ghcr.io/${{ github.repository }}:aws-lc
- name: Run
run: |
git clone https://github.com/quic-interop/quic-interop-runner
cd quic-interop-runner
pip install -r requirements.txt --break-system-packages
python run.py -j result.json -l logs-chrome -r haproxy=local:aws-lc -t "http3" -c chrome -s haproxy
python run.py -j result.json -l logs-picoquic -r haproxy=local:aws-lc -t "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" -c picoquic -s haproxy
python run.py -j result.json -l logs-quic-go -r haproxy=local:aws-lc -t "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" -c quic-go -s haproxy
python run.py -j result.json -l logs-ngtcp2 -r haproxy=local:aws-lc -t "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" -c ngtcp2 -s haproxy
python run.py -j result.json -l logs -r haproxy=ghcr.io/${{ github.repository }}:aws-lc -t ${{ matrix.suite.tests }} -c ${{ matrix.suite.client }} -s haproxy
- name: Delete succeeded logs
if: ${{ failure() }}
if: failure()
run: |
for client in chrome picoquic quic-go ngtcp2; do
pushd quic-interop-runner/logs-${client}/haproxy_${client}
cat ../../result.json | jq -r '.results[][] | select(.result=="succeeded") | .name' | xargs rm -rf
popd
done
cd quic-interop-runner/logs/haproxy_${{ matrix.suite.client }}
cat ../../result.json | jq -r '.results[][] | select(.result=="succeeded") | .name' | xargs rm -rf
- name: Logs upload
if: ${{ failure() }}
uses: actions/upload-artifact@v7
if: failure()
uses: actions/upload-artifact@v4
with:
name: logs
path: quic-interop-runner/logs*/
name: logs-${{ matrix.suite.client }}
path: quic-interop-runner/logs/
retention-days: 6

View File

@ -9,56 +9,94 @@ on:
schedule:
- cron: "0 0 * * 2"
permissions:
contents: read
jobs:
combined-build-and-run:
build:
runs-on: ubuntu-24.04
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
permissions:
contents: read
packages: write
steps:
- uses: actions/checkout@v5
- name: Log in to the Container registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Docker image
id: push
uses: docker/build-push-action@v5
with:
context: https://github.com/haproxytech/haproxy-qns.git
push: true
build-args: |
SSLLIB=LibreSSL
tags: ghcr.io/${{ github.repository }}:libressl
- name: Cleanup registry
uses: actions/delete-package-versions@v5
with:
owner: ${{ github.repository_owner }}
package-name: 'haproxy'
package-type: container
min-versions-to-keep: 1
delete-only-untagged-versions: 'true'
run:
needs: build
strategy:
matrix:
suite: [
{ client: picoquic, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,http3,blackhole,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,v2" },
{ client: quic-go, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,http3,blackhole,amplificationlimit,transferloss,transfercorruption,v2" }
]
fail-fast: false
name: ${{ matrix.suite.client }}
runs-on: ubuntu-24.04
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- name: Update Docker to the latest
uses: docker/setup-docker-action@v4
- name: Build Docker image
id: push
uses: docker/build-push-action@v6
- name: Log in to the Container registry
uses: docker/login-action@v3
with:
context: https://github.com/haproxytech/haproxy-qns.git
platforms: linux/amd64
build-args: |
SSLLIB=LibreSSL
tags: local:libressl
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Install tshark
run: |
sudo apt-get update
sudo apt-get -y install tshark
- name: Pull image
run: |
docker pull ghcr.io/${{ github.repository }}:libressl
- name: Run
run: |
git clone https://github.com/quic-interop/quic-interop-runner
cd quic-interop-runner
pip install -r requirements.txt --break-system-packages
python run.py -j result.json -l logs-picoquic -r haproxy=local:libressl -t "handshake,transfer,longrtt,chacha20,multiplexing,retry,http3,blackhole,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,v2" -c picoquic -s haproxy
python run.py -j result.json -l logs-quic-go -r haproxy=local:libressl -t "handshake,transfer,longrtt,chacha20,multiplexing,retry,http3,blackhole,amplificationlimit,transferloss,transfercorruption,v2" -c quic-go -s haproxy
python run.py -j result.json -l logs -r haproxy=ghcr.io/${{ github.repository }}:libressl -t ${{ matrix.suite.tests }} -c ${{ matrix.suite.client }} -s haproxy
- name: Delete succeeded logs
if: ${{ failure() }}
if: failure()
run: |
for client in picoquic quic-go; do
pushd quic-interop-runner/logs-${client}/haproxy_${client}
cat ../../result.json | jq -r '.results[][] | select(.result=="succeeded") | .name' | xargs rm -rf
popd
done
cd quic-interop-runner/logs/haproxy_${{ matrix.suite.client }}
cat ../../result.json | jq -r '.results[][] | select(.result=="succeeded") | .name' | xargs rm -rf
- name: Logs upload
if: ${{ failure() }}
uses: actions/upload-artifact@v7
if: failure()
uses: actions/upload-artifact@v4
with:
name: logs
path: quic-interop-runner/logs*/
name: logs-${{ matrix.suite.client }}
path: quic-interop-runner/logs/
retention-days: 6

View File

@ -17,7 +17,7 @@ jobs:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
@ -38,12 +38,12 @@ jobs:
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -vq)" >> $GITHUB_OUTPUT
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-vtest
- name: Run VTest for HAProxy
id: vtest
run: |
make reg-tests VTEST_PROGRAM=${{ github.workspace }}/vtest/vtest REGTESTS_TYPES=default,bug,devel
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |

View File

@ -19,11 +19,11 @@ jobs:
# generated by .github/matrix.py.
generate-matrix:
name: Generate Build Matrix
runs-on: ubuntu-slim
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- name: Generate Build Matrix
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@ -38,14 +38,13 @@ jobs:
strategy:
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
fail-fast: false
container: ${{ matrix.container }}
env:
# Configure a short TMPDIR to prevent failures due to long unix socket
# paths.
TMPDIR: /tmp
OT_CPP_VERSION: 1.6.0
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
with:
fetch-depth: 100
@ -60,7 +59,7 @@ jobs:
- name: Cache SSL libs
if: ${{ matrix.ssl && matrix.ssl != 'stock' && matrix.ssl != 'BORINGSSL=yes' && !contains(matrix.ssl, 'QUICTLS') }}
id: cache_ssl
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: '~/opt/'
key: ssl-${{ steps.generate-cache-key.outputs.key }}
@ -68,28 +67,21 @@ jobs:
- name: Cache OpenTracing
if: ${{ contains(matrix.FLAGS, 'USE_OT=1') }}
id: cache_ot
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: '~/opt-ot/'
key: ${{ matrix.os }}-ot-${{ matrix.CC }}-${{ env.OT_CPP_VERSION }}-${{ contains(matrix.name, 'ASAN') }}
- name: Add i386 architecture
if: ${{ matrix.CC == 'i686-linux-gnu-gcc' }}
run: sudo dpkg --add-architecture i386
- name: Install apt dependencies
if: ${{ startsWith(matrix.os, 'ubuntu-') && matrix.TARGET != 'linux-musl' }}
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install \
${{ case(contains(matrix.FLAGS, 'USE_LUA=1'), 'liblua5.4-dev', '') }} \
${{ case(contains(matrix.FLAGS, 'USE_PCRE2=1'), 'libpcre2-dev', '') }} \
${{ case(contains(matrix.ssl, 'BORINGSSL=yes'), 'ninja-build', '') }} \
${{ case(matrix.CC == 'i686-linux-gnu-gcc', 'gcc-i686-linux-gnu libc6-dev-i386-cross libssl-dev:i386 libpcre2-dev:i386', '') }} \
${{ contains(matrix.FLAGS, 'USE_LUA=1') && 'liblua5.4-dev' || '' }} \
${{ contains(matrix.FLAGS, 'USE_PCRE2=1') && 'libpcre2-dev' || '' }} \
${{ contains(matrix.ssl, 'BORINGSSL=yes') && 'ninja-build' || '' }} \
socat \
gdb \
jose
- name: Install apk dependencies
if: ${{ matrix.TARGET == 'linux-musl' }}
run: apk add gcc gdb make tar git python3 libc-dev linux-headers pcre-dev pcre2-dev openssl-dev lua5.3-dev grep socat curl musl-dbg lua5.3-dbg jose sudo
- name: Install brew dependencies
if: ${{ startsWith(matrix.os, 'macos-') }}
run: |
@ -122,6 +114,15 @@ jobs:
${{ join(matrix.FLAGS, ' ') }} \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install-bin
- name: Compile admin/halog/halog
run: |
make -j$(nproc) admin/halog/halog \
ERR=1 \
TARGET=${{ matrix.TARGET }} \
CC=${{ matrix.CC }} \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
${{ join(matrix.FLAGS, ' ') }} \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
- name: Show HAProxy version
id: show-version
run: |
@ -135,11 +136,11 @@ jobs:
fi
echo "::endgroup::"
haproxy -vv
echo "version=$(haproxy -vq)" >> $GITHUB_OUTPUT
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
id: vtest
run: |
make reg-tests VTEST_PROGRAM=${{ github.workspace }}/vtest/vtest REGTESTS_TYPES=default,bug,devel
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
@ -167,7 +168,8 @@ jobs:
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
for file in /tmp/core/core.*; do
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file

View File

@ -18,7 +18,6 @@ jobs:
msys2:
name: ${{ matrix.name }}
runs-on: ${{ matrix.os }}
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
defaults:
run:
shell: msys2 {0}
@ -36,7 +35,7 @@ jobs:
- USE_THREAD=1
- USE_ZLIB=1
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- uses: msys2/setup-msys2@v2
with:
install: >-

View File

@ -13,13 +13,13 @@ jobs:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb jose
- name: Install WolfSSL
run: env WOLFSSL_VERSION=git-master WOLFSSL_DEBUG=1 CFLAGS="-fsanitize=address -g" scripts/build-ssl.sh
run: env WOLFSSL_VERSION=git-master WOLFSSL_DEBUG=1 scripts/build-ssl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
@ -34,12 +34,12 @@ jobs:
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -vq)" >> $GITHUB_OUTPUT
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-vtest
- name: Run VTest for HAProxy
id: vtest
run: |
make reg-tests VTEST_PROGRAM=${{ github.workspace }}/vtest/vtest REGTESTS_TYPES=default,bug,devel
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Run Unit tests
id: unittests
run: |

906
CHANGELOG
View File

@ -1,912 +1,6 @@
ChangeLog :
===========
2026/04/29 : 3.4-dev10
- DOC: config: fix spelling of "max-threads-per-group" in the index
- MEDIUM: threads: change the default max-threads-per-group value to 16
- BUG/MEDIUM: mux-h2: ignore conn->owner when deciding if a connection is dead
- BUG/MINOR: task: fix uninitialised read in run_tasks_from_lists()
- MINOR: compression: prefix compression oriented functions with "comp_"
- BUG/MINOR: mux_quic: limit avail_streams() to 2^62
- MINOR: h3: simplify GOAWAY local emission
- MEDIUM: h3: prevent new streams on GOAWAY reception
- MINOR: mux-quic: release BE idle conn after GOAWAY reception
- MINOR: otel: added debug thread ID support for the OTel C wrapper library
- MINOR: otel: test: added option parsing to the speed test script
- MINOR: otel: test: replaced argument variables with positional parameters in run scripts
- CLEANUP: otel: removed insecure-fork-wanted requirement
- MINOR: otel: test: unified run scripts into a single symlinked script
- BUILD: haterm: don't pass size_t to %lu in error messages
- CI: github: merge Test and Test-musl in VTest.yml
- CI: Build halog as part of contrib.yml
- BUG/MINOR: xprt_qstrm: read record length in 64bits
- BUG/MINOR: mux_quic: convert QCC rx.rlen to 64bits
- CI: github: revert quictls version on cross-zoo.yml
- BUG/MINOR: xprt_qstrm: reduce max record length check
- CI: github: use quictls-3.1.7 for cross-zoo.yml
- BUILD: ssl/sample: potential null pointer dereference in sample_conv_aes
- CI: github: add an i686 job in cross-zoo.yml
- CI: github: run cross-zoo.yml weekly
- CI: github: add cross-zoo.yml in README.md
- BUG/MEDIUM: checks: Don't forget to set the "alt_proto" field
- CI: github: do not install pcre-devel on Fedora Rawhide build
- CI: github: fix sysctl in fedora-rawhide
- CI: github: switch to USE_PCRE2 in Fedora Rawhide build
- MINOR: acme: implement draft-ietf-acme-profiles
- MINOR: acme: allow IP SAN in certificate request
- BUG/MINOR: log: consider format expression dependencies to decide when to log
- MINOR: sample: make RQ/RS stats available everywhere
- BUG/MINOR: sample: adjust dependencies for channel output bytes counters
- MEDIUM: muxes: always set conn->owner to the session that owns the connection
- MEDIUM: session: always reset the conn->owner on backend when installing mux
- CLEANUP: mux-h1: avoid using conn->owner in uncertain areas
- CLEANUP: mux-h1: remove the unneeded test on conn->owner in h1s_finish_detach()
- BUG/MAJOR: sched: protect task->expire on 32-bit platforms
- CI: github: add an i686 job to the push job
- BUILD: config: also set DEF_MAX_THREADS_PER_GROUP when not using threads
- reg-tests/ssl/ssl_dh.vtc: fix syntax error
- ci: modernize actions/upload-artifact@v4
- BUG/MINOR: reg-tests: make shell syntax errors fatal
- MINOR: cli: Handle the paylod pattern as a pointer in the cmdline buffer
- MEDIUM: cli: Make a buffer for the command payload
- MEDIUM: cli: Add support for dynamically allocated payloads
- MEDIUM: cli: increase the payload pattern up to 64 bytes
- MINOR: stream: Move the HTTP txn in an union
- MINOR: stream: Add flags to identify the stream tansaction when allocated
- MINOR: stream: Use a pcli transaction to replace pcli_* members
- CLEANUP: applet: Remove useless shadow pointer from appctx
- REGTESTS: ssl: mark ssl_dh.vtc as broken
- BUG/MINOR: mux-h2: count a protocol error when failing to parse a trailer
- BUG/MINOR: mux-h2: count a proto error when rejecting a stream on parsing error
- BUG/MEDIUM: tasks: Make sure we don't schedule a task already running
- BUG/MAJOR: net_helper: ip.fp infinite loop on malformed tcp options
- BUG/MINOR: h2: make tune.h2.log-errors actually work
- BUG/MINOR: h2: Don't look at the exclusive bit for PRIORITY frame
- BUG/MINOR: H2: Don't forget to free shared_rx_bufs on failure
- BUG/MINOR: log: also wait for the response when logging response headers
- BUG/MINOR: mux-h1: Fix condition to send null-chunk for bodyless message
- BUG/MINOR: mux-h1: Fix test to skip trailers from chunked messages
- BUG/MINOR: http-act: fix a typo in a "del-heeaders-bin" error message
- CLEANUP: tcpcheck: Fix some typos in comments
- MINOR: tcpcheck: Rely on free_tcpcheck_ruleset() to deinit tcpchecks
- BUG/MINOR: tcpcheck: Don't release ruleset when parsing 'spop-check' ruleset
- BUG/MINOR: tcpcheck: Fix a leak on deinit by releasing ruleset's conf.file
- CLEANUP: haterm: Fix typos in comments
- CLEANUP: config: Fix warning about invalid small buffer size
- CLEANUP: htx: Fix typos in comments
- CLEANUP: chunk: Fix a typo in a comment
- CLEANUP: http-client: Fix typos in comments
- BUG/MEDIUM: tcpcheck: Release temporary small chunk when retrying on http-check
- CLEANUP: proxy: Fix typos in comments
- DOC: config: Fix a typo for "external-check" directive
- CLEANUP: cli: Fix typos in comments
- BUG/MINOR: stream: Add SF_TXN_HTTP/SF_TXN_PCLI flags in strm_show_flags()
- REGTESTS: Never reuse server connection in jwt/jws_verify.vtc
- REGTESTS: Never reuse server connection in server/cli_delete_dynamic_server.vtc
- BUG/MINOR: compression: properly disable request when setting response
- BUG/MINOR: servers: fix last_sess date calculation
- DOC: config: fix typo introduce in max-threads-per-group documentation
- BUG/MINOR: stream: add the newly added SF_TXN_* flags to strm_show_flags()
- BUG/MINOR: debug: properly mark the entire libs archive read-only
- Revert "BUG/MINOR: stream: add the newly added SF_TXN_* flags to strm_show_flags()"
- BUG/MINOR: server: fix a possible leak of an error message in dynamic servers
- BUG/MAJOR: mux-h2: detect incomplete transfers on HEADERS frames as well
- BUG/MEDIUM: mux-h1: Force close mode for bodyless message announcing a C-L
- BUG/MINOR: mux_quic: prevent crash on qc_frm_free() with QMux
- BUG/MINOR: xprt_qstrm: ensure all local TPs are allocated
- BUG/MINOR: xprt_qstrm: prevent crash if conn release on MUX wake
- BUG/MINOR: mux_quic: do not release conn on qcc_recv() for QMux
- MINOR: xprt_qstrm: remove unused subs
- MINOR: connection: document conn_create_mux()
- MINOR: xprt_qstrm: implement close callback
- MINOR: mux_quic: refactor QMux send frames function
- MINOR: mux_quic: use dynamic Tx streams buffers for QMux
- MINOR: mux_quic: use dynamic conn buffers for QMux
- MINOR: mux_quic/xprt_qstrm: simplify Rx buffer transfer
- MINOR: mux_quic: receive MAX_STREAMS_BIDI frames in QMux
- MINOR: mux_quic: handle conn errors on QMux without crash
- MINOR: mux_quic: handle incomplete QMux record read
- BUG/MINOR: tcpcheck: Allow connection reuse without prior traffic
- MINOR: sample: converter for frontend existence check
- BUG/MEDIUM: stats: fix crash on 'dump stats-file'
- BUG/MINOR: ssl: fix memory leaks on realloc failure in ssl_ckch.c
- BUG/MINOR: ssl: fix memory leaks on realloc failure in ssl_sock.c
- BUG/MINOR: ssl: fix memory leak on realloc failure in acme.ips
- DOC: config: Fix log-format example with last rule expressions
- DOC: config: Fix typo in tune.bufsize.large description
- MEDIUM: ot: emitted deprecation warning at filter init
- BUILD: ot: emitted deprecation warning at build time
- BUG/MINOR: ssl: fix double-free on failed realloc in ssl_sock.c
- BUG/MINOR: tree-wide: fix a few user-visible spelling mistakes from dev7
- CLEANUP: tree-wide: address various spelling mistakes in comments from -dev7
- BUG/MINOR: tools: my_memspn/my_memcspn wrong cast causing incorrect byte reading
- BUG/MINOR: tools: fix memory leak in indent_msg() on out of memory
- BUG/MINOR: tools: free previously allocated strings on strdup failure in backup_env()
- BUG/MINOR: sample: fix memory leak in check_when_cond() when ACL is not found
- BUG/MINOR: sample: fix memory leak in smp_resolve_args error paths
- BUG/MINOR: sample: fix NULL strm dereference in sample_conv_when
- BUG/MINOR: peers: fix logical "and" when checking for local in PEER_APP_ST_STARTING
- BUG/MINOR: peers: fix wrong flag reported twice for dump_flags
- CLEANUP: peers: fix a few user-visible spelling mistakes
- CLEANUP: tools: drop upper case check after tolower()
- CLEANUP: mux-h2: remove duplicate forward declaration of h2s_rxbuf_{head,tail}()
- CLEANUP: tree-wide: fix around 20 mistakes in comments in h2,tools,peers
- MINOR: mux_quic: return conn error code in debug string
- MINOR: mux_quic: display QCS sd on traces
- MINOR: mux_quic/h3: report termination events at connection level
- MINOR: mux_quic/h3: report termination events at stream layer
- BUG/MEDIUM: mux_h1: fix stack buffer overflow in h1_append_chunk_size()
- BUG/MINOR: http_ana: use scf to report term_evts in http_wait_for_request()
- MINOR: lb: infrastructure for declarative initialization
- MEDIUM: lb: use the LB ops tables
- MINOR: lb: cleanups
- MINOR: mux_quic: remove superfluous b_size() before b_alloc()
- BUG/MINOR: mux_quic: free frames emitted with QMux
- BUILD: 51d: fix bool definition on dummy lib v4
- CLEANUP: Reapply ist.cocci (4)
- CLEANUP: Reapply strcmp.cocci (3)
- CLEANUP: Reapply ha_free.cocci (2)
- BUG/MAJOR: http-htx: Store new host in a chunk for scheme-based normalization
- BUG/MEDIUM: http-htx: Don't use data from HTX message to update authority
- BUG/MEDIUM: http-htx: Loop on full host value during scheme based normalization
- MEDIUM: http-htx: Make authority update optional when replacing a header value
- MEDIUM: http-htx: Make authority update optional when adding a header
- BUG/MAJOR: http: forbid comma character in authority value
- BUG/MEDIUM: h1: Enforce the authority validation during H1 request parsing
- BUG/MAJOR: mux-h1: Deal with true 64-bits integer to emit chunks size
- BUG/MEDIUM: tasks: Do not loop in task_schedule() if a task is running
- BUG/MINOR: fix various typos and spelling mistakes in user-visible messages
- CLEANUP: tree-wide: fix comment typos all over the tree (~68)
- BUG/MINOR: payload: validate minimum keyshare_len in smp_fetch_ssl_keyshare_groups
- BUG/MINOR: payload: prevent integer overflow in distcc token parsing
- BUG/MINOR: net_helper: fix out-of-bounds read in tcp_fullhdr_find_opt
- BUG/MINOR: net_helper: fix out-of-bounds read in sample_conv_tcp_options_list
- BUG/MINOR: net_helper: fix incomplete decoding in sample_conv_eth_vlan
- BUG/MEDIUM: mux-fcgi: Properly handle full buffer for FCGI_PARAM record
- BUG/MINOR: http-htx: Don't normalize emtpy path for OPTIONS requests
2026/04/15 : 3.4-dev9
- DOC: config: fix ambiguous info in log-steps directive description
- MINOR: filters: add filter name to flt_conf struct
- MEDIUM: filters: add "filter-sequence" directive
- REGTESTS: add a test for "filter-sequence" directive
- Revert "CLEANUP: tcpcheck: Don't needlessly expose proxy_parse_tcpcheck()"
- MINOR: tcpcheck: reintroduce proxy_parse_tcpcheck() symbol
- BUG/MEDIUM: haterm: Move all init functions of haterm in haterm_init.c
- BUG/MEDIUM: mux-h1: Disable 0-copy forwarding when draining the request
- MINOR: servers: The right parameter for idle-pool.shared is "full"
- DOC: config: Fix two typos in the server param "healthcheck" description
- BUG/MINOR: http-act: fix a typo in the "pause" action error message
- MINOR: tcpcheck: Reject unknown keyword during parsing of healthcheck section
- BUG/MEDIUM: tcpcheck/server: Fix parsing of healthcheck param for dynamic servers
- BUG/MINOR: counters: fix unexpected 127 char GUID truncation for shm-stats-file objects
- BUG/MEDIUM: tcpcheck: Properly retrieve tcpcheck type to install the best mux
- BUG/MEDIUM: payload: validate SNI name_len in req.ssl_sni
- BUG/MEDIUM: jwe: fix NULL deref crash with empty CEK and non-dir alg
- BUG/MEDIUM: jwt: fix heap overflow in ECDSA signature DER conversion
- BUG/MEDIUM: jwe: fix memory leak in jwt_decrypt_secret with var argument
- BUG: hlua: fix stack overflow in httpclient headers conversion
- BUG/MINOR: hlua: fix stack overflow in httpclient headers conversion
- BUG/MINOR: hlua: fix format-string vulnerability in Patref error path
- BUG/MEDIUM: chunk: fix typo allocating small trash with bufsize_large
- BUG/MEDIUM: chunk: fix infinite loop in get_larger_trash_chunk()
- BUG/MINOR: peers: fix OOB heap write in dictionary cache update
- CI: VTest build with git clone + cache
- BUG/MEDIUM: connection: Wake the stconn on error when failing to create mux
- CI: github: update to cache@v5
- Revert "BUG: hlua: fix stack overflow in httpclient headers conversion"
- CI: github: fix vtest path to allow correct caching
- CI: github: add the architecture to the cache key for vtest2
- MEDIUM: connections: Really enforce mux protocol requirements
- MINOR: tools: Implement net_addr_type_is_quic()
- MEDIUM: check: Revamp the way the protocol and xprt are determined
- BUG/MAJOR: slz: always make sure to limit fixed output to less than worst case literals
- MINOR: lua: add tune.lua.openlibs to restrict loaded Lua standard libraries
- REGTESTS: lua: add tune.lua.openlibs to all Lua reg-tests
- BUG/MINOR: resolvers: fix memory leak on AAAA additional records
- BUG/MINOR: spoe: fix pointer arithmetic overflow in spoe_decode_buffer()
- BUG/MINOR: http-act: validate decoded lengths in *-headers-bin
- BUG/MINOR: haterm: Return the good start-line for 100-continue interim message
- BUG/MEDIUM: samples: Fix handling of SMP_T_METH samples
- BUG/MINOR: sample: fix info leak in regsub when exp_replace fails
- BUG/MEDIUM: mux-fcgi: prevent record-length truncation with large bufsize
- BUG/MINOR: hlua: fix use-after-free of HTTP reason string
- BUG/MINOR: mux-quic: fix potential NULL deref on qcc_release()
- BUG/MINOR: quic: increment pos pointer on QMux transport params parsing
- MINOR: xprt_qstrm: implement Rx buffering
- MINOR: xprt_qstrm/mux-quic: handle extra QMux frames after params
- MINOR: xprt_qstrm: implement Tx buffering
- MINOR: xprt_qstrm: handle connection errors
- MEDIUM: mux-quic: implement QMux record parsing
- MEDIUM: xprt_qstrm: implement QMux record parsing
- MEDIUM: mux-quic/xprt_qstrm: implement QMux record emission
- DOC: update draft link for QMux protocol
- BUG/MINOR: do not crash on QMux reception of BLOCKED frames
- Revert "BUG/MEDIUM: haterm: Move all init functions of haterm in haterm_init.c"
- BUG/MEDIUM: haterm: Properly initialize the splicing support for haterm
- BUG/MINOR: mux_quic: prevent QMux crash on qcc_io_send() error path
- BUG/MINOR: xprt_qstrm: do not parse record length on read again
- MEDIUM: otel: added OpenTelemetry filter skeleton
- MEDIUM: otel: added configuration and utility layer
- MEDIUM: otel: added configuration parser and event model
- MEDIUM: otel: added post-parse configuration check
- MEDIUM: otel: added memory pool and runtime scope layer
- MEDIUM: otel: implemented filter callbacks and event dispatcher
- MEDIUM: otel: wired OTel C wrapper library integration
- MEDIUM: otel: implemented scope execution and span management
- MEDIUM: otel: added context propagation via carrier interfaces
- MEDIUM: otel: added HTTP header operations for context propagation
- MEDIUM: otel: added HAProxy variable storage for context propagation
- MINOR: otel: added prefix-based variable scanning
- MEDIUM: otel: added CLI commands for runtime filter management
- MEDIUM: otel: added group action for rule-based scope execution
- MINOR: otel: added log-format support to the sample parser and runtime
- MINOR: otel: test: added test and benchmark suite for the OTel filter
- MINOR: otel: added span link support
- MINOR: otel: added metrics instrument support
- MINOR: otel: added log-record signal support
- MINOR: otel: test: added full-event test config
- DOC: otel: added documentation
- DOC: otel: test: added test README-* files
- DOC: otel: test: added speed test guide and benchmark results
- DOC: otel: added cross-cutting design patterns document
- MINOR: otel: added flt_otel_sample_eval and exposed flt_otel_sample_add_kv
- MINOR: otel: changed log-record attr to use sample expressions
- MINOR: otel: changed instrument attr to use sample expressions
- DOC: otel: added README.md overview document
- CLEANUP: ot: use the item API for the variables trees
- BUG/MINOR: ot: removed dead code in flt_ot_parse_cfg_str()
- BUG/MINOR: ot: fixed wrong NULL check in flt_ot_parse_cfg_group()
- BUILD: ot: removed explicit include path when building opentracing filter
- MINOR: ot: renamed the variable dbg_indent_level to flt_ot_dbg_indent_level
- CI: Drop obsolete `packages: write` permission from `quic-interop-*.yml`
- CI: Consistently add a top-level `permissions` definition to GHA workflows
- CI: Wrap all `if:` conditions in `${{ }}`
- CI: Fix regular expression escaping in matrix.py
- CI: Update to actions/checkout@v6
- CI: Simplify version extraction with `haproxy -vq`
- CI: Merge `aws-lc.yml` and `aws-lc-fips.yml` into `aws-lc.yml`
- CI: Merge `aws-lc-template.yml` into `aws-lc.yml`
- CI: Consistently set up VTest with `./.github/actions/setup-vtest`
- MINOR: mux_quic: remove duplicate QMux local transport params
- CI: github: add bash to the musl job
- BUG/MINOR: quic: do not use hardcoded values in QMux TP frame builder
- BUG/MINOR: log: Fix error message when using unavailable fetch in logfmt
- CLEANUP: log: Return `size_t` from `sess_build_logline_orig()`
- CLEANUP: stream: Explain the two-step initialization in `stream_generate_unique_id()`
- CLEANUP: stream: Reduce duplication in `stream_generate_unique_id()`
- CLEANUP: http_fetch: Use local `unique_id` variable in `smp_fetch_uniqueid()`
- CI: build WolfSSL job with asan enabled
- MINOR: tools: memvprintf(): remove <out> check that always true
- BUG/MEDIUM: cli: Properly handle too big payload on a command line
- REGTESTS: Never reuse server connection in reg-tests/jwt/jwt_decrypt.vtc
- MINOR: errors: remove excessive errmsg checks
- BUG/MINOR: haterm: preserve the pipe size margin for splicing
- MEDIUM: acme: implement dns-persist-01 challenge
- MINOR: acme: extend resolver-based DNS pre-check to dns-persist-01
- DOC: configuration: document dns-persist-01 challenge type and options
- BUG/MINOR: acme: read the wildcard flag from the authorization response
- BUG/MINOR: acme: don't pass NULL into format string
- BUG/MINOR: haterm: don't apply the default pipe size margin twice
- CLEANUP: Make `lf_expr` parameter of `sess_build_logline_orig()` const
- MINOR: Add `generate_unique_id()` helper
- MINOR: Allow inlining of `stream_generate_unique_id()`
- CLEANUP: log: Stop touching `struct stream` internals for `%ID`
- MINOR: check: Support generating a `unique_id` for checks
- MINOR: http_fetch: Add support for checks to `unique-id` fetch
- MINOR: acme: display the type of challenge in ACME_INITIAL_DELAY
- MINOR: mjson: reintroduce mjson_next()
- CI: Remove obsolete steps from musl.yml
- CI: Use `sh` in `actions/setup-vtest/action.yml`
- CI: Sync musl.yml with vtest.yml
- CI: Integrate Musl build into vtest.yml
- CI: Use `case()` function
- CI: Generate vtest.yml matrix on `ubuntu-slim`
- CI: Run contrib.yml on `ubuntu-slim`
- CI: Use `matrix:` in contrib.yml
- CI: Build `dev/haring/` as part of contrib.yml
- MINOR: htx: Add helper function to get type and size from the block info field
- BUG/MEDIUM: htx: Properly handle block modification during defragmentation
- BUG/MEDIUM: htx: Don't count delta twice when block value is replaced
- MINOR: ssl: add TLS 1.2 values in HAPROXY_KEYLOG_XX_LOG_FMT
- EXAMPLES: ssl: keylog entries are greater than 1024
- BUILD: Makefile: don't forget to also delete haterm on make clean
- MINOR: stats: report the number of thread groups in "show info"
- CLEANUP: sample: fix the comment regarding the range of the thread sample fetch
- MINOR: sample: return the number of the current thread group
- MINOR: sample: add new sample fetch functions reporting current CPU usage
- BUG/MEDIUM: peers: trash of expired entries delayed after fullresync
- DOC: remove the alpine/musl status job image
- MINOR: mux-quic: improve documentation for qcs_attach_sc()
- MINOR: mux-quic: reorganize code for app init/shutdown
- MINOR: mux-quic: perform app init in case of early shutdown
- MEDIUM: quic: implement fe.stream.max-total
- MINOR: mux-quic: close connection when reaching max-total streams
- REGTESTS: add QUIC test for max-total streams setting
- MEDIUM: threads: start threads by groups
- MINOR: acme: opportunistic DNS check for dns-persist-01 to skip challenge-ready steps
- BUG/MINOR: acme: fix fallback state after failed initial DNS check
- CLEANUP: acme: no need to reset ctx state and http_state before nextreq
- BUG/MINOR: threads: properly set the number of tgroups when non using policy
2026/04/03 : 3.4-dev8
- MINOR: log: split do_log() in do_log() + do_log_ctx()
- MINOR: log: provide a way to override logger->profile from process_send_log_ctx
- MINOR: log: support optional 'profile <log_profile_name>' argument to do-log action
- BUG/MINOR: sock: adjust accept() error messages for ENFILE and ENOMEM
- BUG/MINOR: qpack: fix 62-bit overflow and 1-byte OOB reads in decoding
- MEDIUM: sched: do not run a same task multiple times in series
- MINOR: sched: do not requeue a tasklet into the current queue
- MINOR: sched: do not punish self-waking tasklets anymore
- MEDIUM: sched: do not punish self-waking tasklets if TASK_WOKEN_ANY
- MEDIUM: sched: change scheduler budgets to lower TL_BULK
- MINOR: mux-h2: assign a limited frames processing budget
- BUILD: sched: fix leftover of debugging test in single-run changes
- BUG/MEDIUM: acme: fix multiple resource leaks in acme_x509_req()
- MINOR: http_htx: use enum for arbitrary values in conf_errors
- MINOR: http_htx: rename fields in struct conf_errors
- MINOR: http_htx: split check/init of http_errors
- MINOR/OPTIM: http_htx: lookup once http_errors section on check/init
- MEDIUM: proxy: remove http-errors limitation for dynamic backends
- BUG/MINOR: acme: leak of ext_san upon insertion error
- BUG/MINOR: acme: wrong error when checking for duplicate section
- BUG/MINOR: acme/cli: wrong argument check in 'acme renew'
- BUG/MINOR: http_htx: fix null deref in http-errors config check
- MINOR: buffers: Move small buffers management from quic to dynbuf part
- MINOR: dynbuf: Add helper functions to alloc large and small buffers
- MINOR: quic: Use b_alloc_small() to allocate a small buffer
- MINOR: config: Relax tests on the configured size of small buffers
- MINOR: config: Report the warning when invalid large buffer size is set
- MEDIUM: htx: Add htx_xfer function to replace htx_xfer_blks
- MINOR: htx: Add helper functions to xfer a message to smaller or larger one
- MINOR: http-ana: Use HTX API to move to a large buffer
- MEDIUM: chunk: Add support for small chunks
- MEDIUM: stream: Try to use a small buffer for HTTP request on queuing
- MEDIUM: stream: Try to use small buffer when TCP stream is queued
- MEDIUM: stconn: Use a small buffer if possible for L7 retries
- MEDIUM: tree-wide: Rely on htx_xfer() instead of htx_xfer_blks()
- Revert "BUG/MEDIUM: mux-h2: make sure to always report pending errors to the stream"
- MEDIUM: mux-h2: Stop dealing with HTX flags transfer in h2_rcv_buf()
- MEDIUM: tcpcheck: Use small buffer if possible for healthchecks
- MINOR: proxy: Review options flags used to configure healthchecks
- DOC: config: Fix alphabetical ordering of proxy options
- DOC: config: Fix alphabetical ordering of external-check directives
- MINOR: proxy: Add use-small-buffers option to set where to use small buffers
- DOC: config: Add missing 'status-code' param for 'http-check expect' directive
- DOC: config: Reorder params for 'tcp-check expect' directive
- BUG/MINOR: acme: NULL check on my_strndup()
- BUG/MINOR: acme: free() DER buffer on a2base64url error path
- BUG/MINOR: acme: replace atol with len-bounded __strl2uic() for retry-after
- BUG/MINOR: acme/cli: fix argument check and error in 'acme challenge_ready'
- BUILD: tools: potential null pointer dereference in dl_collect_libs_cb
- BUG/MINOR: ech: permission checks on the CLI
- BUG/MINOR: acme: permission checks on the CLI
- BUG/MEDIUM: check: Don't reuse the server xprt if we should not
- MINOR: checks: Store the protocol to be used in struct check
- MINOR: protocols: Add a new proto_is_quic() function
- MEDIUM: connections: Enforce mux protocol requirements
- MEDIUM: server: remove a useless memset() in srv_update_check_addr_port.
- BUG/MINOR: config: Warn only if warnif_cond_conflicts report a conflict
- BUG/MINOR: config: Properly test warnif_misplaced_* return values
- BUG/MINOR: http-ana: Only consider client abort for abortonclose
- BUG/MEDIUM: acme: skip doing challenge if it is already valid
- MINOR: connections: Enhance tune.idle-pool.shared
- BUG/MINOR: acme: fix task allocation leaked upon error
- BUG/MEDIUM: htx: Fix htx_xfer() to consume more data than expected
- CI: github: fix tag listing by implementing proper API pagination
- CLEANUP: fix typos and spelling in comments and documentation
- BUG/MINOR: quic: close conn on packet reception with incompatible frame
- CLEANUP: stconn: Remove usless sc_new_from_haterm() declaration
- BUG/MINOR: stconn: Always declare the SC created from healthchecks as a back SC
- MINOR: stconn: flag the stream endpoint descriptor when the app has started
- MINOR: mux-h2: report glitches on early RST_STREAM
- BUG/MINOR: net_helper: fix length controls on ip.fp tcp options parsing
- BUILD: net_helper: fix unterminated comment that broke the build
- MINOR: resolvers: basic TXT record implementation
- MINOR: acme: store the TXT record in auth->token
- MEDIUM: acme: add dns-01 DNS propagation pre-check
- MEDIUM: acme: new 'challenge-ready' option
- DOC: configuration: document challenge-ready and dns-delay options for ACME
- SCRIPTS: git-show-backports: list new commits and how to review them with -L
- BUG/MEDIUM: ssl/cli: tls-keys commands warn when accessed without admin level
- BUG/MEDIUM: ssl/ocsp: ocsp commands warn when accessed without admin level
- BUG/MEDIUM: map/cli: map/acl commands warn when accessed without admin level
- BUG/MEDIUM: ssl/cli: tls-keys commands are missing permission checks
- BUG/MEDIUM: ssl/ocsp: ocsp commands are missing permission checks
- BUG/MEDIUM: map/cli: CLI commands lack admin permission checks
- DOC: configuration: mention QUIC server support
- MEDIUM: Add set-headers-bin, add-headers-bin and del-headers-bin actions
- BUG/MEDIUM: mux-h1: Don't set MSG_MORE on bodyless responses forwarded to client
- BUG/MINOR: http_act: Properly handle decoding errors in *-headers-bin actions
- MEDIUM: stats: Hide the version by default and add stats-showversion
- MINOR: backends: Don't update last_sess if it did not change
- MINOR: servers: Don't update last_sess if it did not change
- MINOR: ssl/log: add keylog format variables and env vars
- DOC: configuration: update tune.ssl.keylog URL to IETF draft
- BUG/MINOR: http_act: Make set/add-headers-bin compatible with ACL conditions
- MINOR: action: Add a sample expression field in arguments used by HTTP actions
- MEDIUM: http_act: Rework *-headers-bin actions
- BUG/MINOR: tcpcheck: Remove unexpected flag on tcpcheck rules for httchck option
- MEDIUM: tcpcheck: Refactor how tcp-check rulesets are stored
- MINOR: tcpcheck: Deal with disable-on-404 and send-state in the tcp-check itself
- BUG/MINOR: tcpcheck: Don't enable http_needed when parsing HTTP samples
- MINOR: tcpcheck: Use tcpcheck flags to know a healthcheck uses SSL connections
- BUG/MINOR: tcpcheck: Use tcpcheck context for expressions parsing
- CLEANUP: tcpcheck: Don't needlessly expose proxy_parse_tcpcheck()
- MINOR: tcpcheck: Add a function to stringify the healthcheck type
- MEDIUM: tcpcheck: Split parsing functions to prepare healthcheck sections parsing
- MEDIUM: tcpcheck: Add parsing support for healthcheck sections
- MINOR: tcpcheck: Extract tcpheck ruleset post-config in a dedicated function
- MEDIUM: tcpcheck/server: Add healthcheck server keyword
- REGTESTS: tcpcheck: Add a script to check healthcheck section
- MINOR: acme: add 'dns-timeout' keyword for dns-01 challenge
- CLEANUP: net_helper: fix typo in comment
- MINOR: acme: set the default dns-delay to 30s
- MINOR: connection: add function to identify a QUIC connection
- MINOR: quic: refactor frame parsing
- MINOR: quic: refactor frame encoding
- BUG/MINOR: quic: fix documentation for transport params decoding
- MINOR: quic: split transport params decoding/check
- MINOR: quic: remove useless quic_tp_dec_err type
- MINOR: quic: define QMux transport parameters frame type
- MINOR: quic: implement QMux transport params frame parser/builder
- MINOR: mux-quic: move qcs stream member into tx inner struct
- MINOR: mux-quic: prepare Tx support for QMux
- MINOR: mux-quic: convert init/closure for QMux compatibility
- MINOR: mux-quic: protect qcc_io_process for QMux
- MINOR: mux-quic: prepare traces support for QMux
- MINOR: quic: abstract stream type in qf_stream frame
- MEDIUM: mux-quic: implement QMux receive
- MINOR: mux-quic: handle flow-control frame on qstream read
- MINOR: mux-quic: define Rx connection buffer for QMux
- MINOR: mux_quic: implement qstrm rx buffer realign
- MEDIUM: mux-quic: implement QMux send
- MINOR: mux-quic: implement qstream send callback
- MINOR: mux-quic: define Tx connection buffer for QMux
- MINOR: xprt_qstrm: define new xprt module for QMux protocol
- MINOR: xprt_qstrm: define callback for ALPN retrieval
- MINOR: xprt_qstrm: implement reception of transport parameters
- MINOR: xprt_qstrm: implement sending of transport parameters
- MEDIUM: ssl: load xprt_qstrm after handshake completion
- MINOR: mux-quic: use QMux transport parameters from qstrm xprt
- MAJOR: mux-quic: activate QMux for frontend side
- MAJOR: mux-quic: activate QMux on the backend side
- MINOR: acme: split the CLI wait from the resolve wait
- MEDIUM: acme: initialize the dns timer starting from the first DNS request
- DEBUG: connection/flags: add QSTRM flags for the decoder
- BUG/MINOR: mux_quic: fix uninit for QMux emission
- MINOR: acme: remove remaining CLI wait in ACME_RSLV_TRIGGER
- MEDIUM: acme: split the initial delay from the retry DNS delay
- BUG/MINOR: cfgcond: properly set the error pointer on evaluation error
- BUG/MINOR: cfgcond: always set the error string on openssl_version checks
- BUG/MINOR: cfgcond: always set the error string on awslc_api checks
- BUG/MINOR: cfgcond: fail cleanly on missing argument for "feature"
- MINOR: ssl: add the ssl_fc_crtname sample fetch
- MINOR: hasterm: Change hstream_add_data() to prepare zero-copy data forwarding
- MEDIUM: haterm: Add support for 0-copy data forwading and option to disable it
- MEDIUM: haterm: Prepare support for splicing by initializing a master pipe
- MEDIUM: haterm: Add support for splicing and option to disable it
- MINOR: haterm: Handle boolean request options as flags
- MINOR: haterm: Add an request option to disable splicing
- BUG/MINOR: ssl: fix memory leak in ssl_fc_crtname by using SSL_CTX ex_data index
2026/03/20 : 3.4-dev7
- BUG/MINOR: stconn: Increase SC bytes_out value in se_done_ff()
- BUG/MINOR: ssl-sample: Fix sample_conv_sha2() by checking EVP_Digest* failures
- BUG/MINOR: backend: Don't get proto to use for webscoket if there is no server
- BUG/MINOR: jwt: Missing 'jwt_tokenize' return value check
- MINOR: flt_http_comp: define and use proxy_get_comp() helper function
- MEDIUM: flt_http_comp: split "compression" filter in 2 distinct filters
- CLEANUP: flt_http_comp: comp_state doesn't bother about the direction anymore
- BUG/MINOR: admin: haproxy-reload use explicit socat address type
- MEDIUM: admin: haproxy-reload conversion to POSIX sh
- BUG/MINOR: admin: haproxy-reload rename -vv long option
- SCRIPTS: git-show-backports: hide the common ancestor warning in quiet mode
- SCRIPTS: git-show-backports: add a restart-from-last option
- MINOR: mworker: add a BUG_ON() on mproxy_li in _send_status
- BUG/MINOR: mworker: don't set the PROC_O_LEAVING flag on master process
- Revert "BUG/MINOR: jwt: Missing 'jwt_tokenize' return value check"
- MINOR: jwt: Improve 'jwt_tokenize' function
- MINOR: jwt: Convert EC JWK to EVP_PKEY
- MINOR: jwt: Parse ec-specific fields in jose header
- MINOR: jwt: Manage ECDH-ES algorithm in jwt_decrypt_jwk function
- MINOR: jwt: Add ecdh-es+axxxkw support in jwt_decrypt_jwk converter
- MINOR: jwt: Manage ec certificates in jwt_decrypt_cert
- DOC: jwt: Add ECDH support in jwt_decrypt converters
- MINOR: stconn: Call sc_conn_process from the I/O callback if TASK_WOKEN_MSG state was set
- MINOR: mux-h2: Rely on h2s_notify_send() when resuming h2s for sending
- MINOR: mux-spop: Rely on spop_strm_notify_send() when resuming streams for sending
- MINOR: muxes: Wakup the data layer from a mux stream with TASK_WOKEN_IO state
- MAJOR: muxes: No longer use app_ops .wake() callback function from muxes
- MINOR: applet: Call sc_applet_process() instead of .wake() callback function
- MINOR: connection: Call sc_conn_process() instead of .wake() callback function
- MEDIUM: stconn: Remove .wake() callback function from app_ops
- MINOR: check: Remove wake_srv_chk() function
- MINOR: haterm: Remove hstream_wake() function
- MINOR: stconn: Wakup the SC with TASK_WOKEN_IO state from opposite side
- MEDIUM: stconn: Merge all .chk_rcv() callback functions in sc_chk_rcv()
- MINOR: stconn: Remove .chk_rcv() callback functions
- MEDIUM: stconn: Merge all .chk_snd() callback functions in sc_chk_snd()
- MINOR: stconn: Remove .chk_snd() callback functions
- MEDIUM: stconn: Merge all .abort() callback functions in sc_abort()
- MINOR: stconn: Remove .abort() callback functions
- MEDIUM: stconn: Merge all .shutdown() callback functions in sc_shutdown()
- MINOR: stconn: Remove .shutdown() callback functions
- MINOR: stconn: Totally app_ops from the stconns
- MINOR: stconn: Simplify sc_abort/sc_shutdown by merging calls to se_shutdown
- DEBUG: stconn: Add a CHECK_IF() when I/O are performed on a orphan SC
- MEDIUM: mworker: exiting when couldn't find the master mworker_proc element
- BUILD: ssl: use ASN1_STRING accessors for OpenSSL 4.0 compatibility
- BUILD: ssl: make X509_NAME usage OpenSSL 4.0 ready
- BUG/MINOR: tcpcheck: Fix typo in error error message for `http-check expect`
- BUG/MINOR: jws: fix memory leak in jws_b64_signature
- DOC: configuration: http-check expect example typo
- DOC/CLEANUP: config: update mentions of the old "Global parameters" section
- BUG/MEDIUM: ssl: Handle receiving early data with BoringSSL/AWS-LC
- BUG/MINOR: mworker: always stop the receiving listener
- BUG/MEDIUM: ssl: Don't report read data as early data with AWS-LC
- BUILD: makefile: fix range build without test command
- BUG/MINOR: memprof: avoid a small memory leak in "show profiling"
- BUG/MINOR: proxy: do not forget to validate quic-initial rules
- MINOR: activity: use dynamic allocation for "show profiling" entries
- MINOR: tools: extend the pointer hashing code to ease manipulations
- MINOR: tools: add a new pointer hash function that also takes an argument
- MINOR: memprof: attempt different retry slots for different hashes on collision
- MINOR: tinfo: start to add basic thread_exec_ctx
- MINOR: memprof: prepare to consider exec_ctx in reporting
- MINOR: memprof: also permit to sort output by calling context
- MINOR: tools: add a function to write a thread execution context.
- MINOR: debug: report the execution context on thread dumps
- MINOR: memprof: report the execution context on profiling output
- MINOR: initcall: record the file and line declaration of an INITCALL
- MINOR: tools: decode execution context TH_EX_CTX_INITCALL
- MINOR: tools: support decoding ha_caller type exec context
- MINOR: sample: store location for fetch/conv via initcalls
- MINOR: sample: also report contexts registered directly
- MINOR: tools: support an execution context that is just a function
- MINOR: actions: store the location of keywords registered via initcalls
- MINOR: actions: also report execution contexts registered directly
- MINOR: filters: set the exec context to the current filter config
- MINOR: ssl: set the thread execution context during message callbacks
- MINOR: connection: track mux calls to report their allocation context
- MINOR: task: set execution context on task/tasklet calls
- MINOR: applet: set execution context on applet calls
- MINOR: cli: keep the info of the current keyword being processed in the appctx
- MINOR: cli: keep track of the initcall context since kw registration
- MINOR: cli: implement execution context for manually registered keywords
- MINOR: activity: support aggregating by caller also for memprofile
- MINOR: activity: raise the default number of memprofile buckets to 4k
- DOC: internals: short explanation on how thread_exec_ctx works
- BUG/MINOR: mworker: only match worker processes when looking for unspawned proc
- MINOR: traces: defer processing of "-dt" options
- BUG/MINOR: mworker: fix typo &= instead of & in proc list serialization
- BUG/MINOR: mworker: set a timeout on the worker socketpair read at startup
- BUG/MINOR: mworker: avoid passing NULL version in proc list serialization
- BUG/MINOR: sockpair: set FD_CLOEXEC on fd received via SCM_RIGHTS
- BUG/MEDIUM: stconn: Don't forget to wakeup applets on shutdown
- BUG/MINOR: spoe: Properly switch SPOE filter to WAITING_ACK state
- BUG/MEDIUM: spoe: Properly abort processing on client abort
- BUG/MEDIUM: stconn: Fix abort on close when a large buffer is used
- BUG/MEDIUM: stconn: Don't perform L7 retries with large buffer
- BUG/MINOR: h2/h3: Only test number of trailers inserted in HTX message
- MINOR: htx: Add function to truncate all blocks after a specific block
- BUG/MINOR: h2/h3: Never insert partial headers/trailers in an HTX message
- BUG/MINOR: http-ana: Swap L7 buffer with request buffer by hand
- BUG/MINOR: stream: Fix crash in stream dump if the current rule has no keyword
- BUG/MINOR: mjson: make mystrtod() length-aware to prevent out-of-bounds reads
- MEDIUM: stats-file/clock: automatically update now_offset based on shared clock
- MINOR: promex: export "haproxy_sticktable_local_updates" metric
- BUG/MINOR: spoe: Fix condition to abort processing on client abort
- BUILD: spoe: Remove unsused variable
- MINOR: tools: add a function to create a tar file header
- MINOR: tools: add a function to load a file into a tar archive
- MINOR: config: support explicit "on" and "off" for "set-dumpable"
- MINOR: debug: read all libs in memory when set-dumpable=libs
- DEV: gdb: add a new utility to extract libs from a core dump: libs-from-core
- MINOR: debug: copy debug symbols from /usr/lib/debug when present
- MINOR: debug: opportunistically load libthread_db.so.1 with set-dumpable=libs
- BUG/MINOR: mworker: don't try to access an initializing process
- BUG/MEDIUM: peers: enforce check on incoming table key type
- BUG/MINOR: mux-h2: properly ignore R bit in GOAWAY stream ID
- BUG/MINOR: mux-h2: properly ignore R bit in WINDOW_UPDATE increments
- OPTIM: haterm: use chunk builders for generated response headers
- BUG/MAJOR: h3: check body size with content-length on empty FIN
- BUG/MEDIUM: h3: reject unaligned frames except DATA
- BUG/MINOR: mworker/cli: fix show proc pagination losing entries on resume
- CI: github: treat vX.Y.Z release tags as stable like haproxy-* branches
- MINOR: freq_ctr: add a function to add values with a peak
- MINOR: task: maintain a per-thread indicator of the peak run-queue size
- MINOR: mux-h2: store the concurrent streams hard limit in the h2c
- MINOR: mux-h2: permit to moderate the advertised streams limit depending on load
- MINOR: mux-h2: permit to fix a minimum value for the advertised streams limit
- BUG/MINOR: mworker: fix sort order of mworker_proc in 'show proc'
- CLEANUP: mworker: fix tab/space mess in mworker_env_to_proc_list()
2026/03/05 : 3.4-dev6
- CLEANUP: acme: remove duplicate includes
- BUG/MINOR: proxy: detect strdup error on server auto SNI
- BUG/MINOR: server: set auto SNI for dynamic servers
- BUG/MINOR: server: enable no-check-sni-auto for dynamic servers
- MINOR: haterm: provide -b and -c options (RSA key size, ECDSA curves)
- MINOR: haterm: add long options for QUIC and TCP "bind" settings
- BUG/MINOR: haterm: missing allocation check in copy_argv()
- BUG/MINOR: quic: fix counters used on BE side
- MINOR: quic: add BUG_ON() on half_open_conn counter access from BE
- BUG/MINOR: quic/h3: display QUIC/H3 backend module on HTML stats
- BUG/MINOR: acme: acme_ctx_destroy() leaks auth->dns
- BUG/MINOR: acme: wrong labels logic always memprintf errmsg
- MINOR: ssl: clarify error reporting for unsupported keywords
- BUG/MINOR: acme: fix incorrect number of arguments allowed in config
- CLEANUP: haterm: remove unreachable labels hstream_add_data()
- CLEANUP: haterm: avoid static analyzer warnings about rand() use
- CLEANUP: ssl: Remove a useless variable from ssl_gen_x509()
- CI: use the latest docker for QUIC Interop
- CI: remove redundant "halog" compilation
- CLENAUP: cfgparse: accept-invalid-http-* does not support "no"/"defaults"
- BUG/MEDIUM: spoe: Acquire context buffer in applet before consuming a frame
- MINOR: traces: always mark trace_source as thread-aligned
- MINOR: ncbmbuf: improve itbmap_next() code
- MINOR: proxy: improve code when checking server name conflicts
- MINOR: quic: add a new metric for ncbuf failures
- BUG/MINOR: haterm: cannot reset default "haterm" mode
- BUG/MEDIUM: cpu-topo: Distribute CPUs fairly across groups
- BUG/MINOR: quic: missing app ops init during backend 0-RTT sessions
- CLEANUP: ssl: remove outdated comments
- MINOR: mux-h2: also count glitches on invalid trailers
- MINOR: mux-h2: add a new setting, "tune.h2.log-errors" to tweak error logging
- BUG/MEDIUM: mux-h2: make sure to always report pending errors to the stream
- BUG/MINOR: server: adjust initialization order for dynamic servers
- CLEANUP: tree-wide: drop a few useless null-checks before free()
- CLEANUP: quic-stats: include counters from quic_stats
- REORG: stats/counters: move extra_counters to counters not stats
- CLEANUP: stats: drop stats.h / stats-t.h where not needed
- MEDIUM: counters: change the fill_stats() API to pass the module and extra_counters
- CLEANUP: counters: only retrieve zeroes for unallocated extra_counters
- MEDIUM: counters: add a dedicated storage for extra_counters in various structs
- MINOR: counters: store a tgroup step for extra_counters to access multiple tgroups
- MEDIUM: counters: store the number of thread groups accessing extra_counters
- MINOR: counters: add EXTRA_COUNTERS_BASE() to retrieve extra_counters base storage
- MEDIUM: counters: return aggregate extra counters in ->fill_stats()
- MEDIUM: counters: make EXTRA_COUNTERS_GET() consider tgid
- BUG/MINOR: call EXTRA_COUNTERS_FREE() before srv_free_params() in srv_drop()
- MINOR: promex: test applet resume in stress mode
- BUG/MINOR: promex: fix server iteration when last server is deleted
- BUG/MINOR: proxy: add dynamic backend into ID tree
- MINOR: proxy: convert proxy flags to uint
- MINOR: server: refactor srv_detach()
- MINOR: proxy: define a basic "del backend" CLI
- MINOR: proxy: define proxy watcher member
- MINOR: stats: protect proxy iteration via watcher
- MINOR: promex: use watcher to iterate over backend instances
- MINOR: lua: use watcher for proxies iterator
- MINOR: proxy: add refcount to proxies
- MINOR: proxy: rename default refcount to avoid confusion
- MINOR: server: take proxy refcount when deleting a server
- MINOR: lua: handle proxy refcount
- MINOR: proxy: prevent backend removal when unsupported
- MINOR: proxy: prevent deletion of backend referenced by config elements
- MINOR: proxy: prevent backend deletion if server still exists in it
- MINOR: server: mark backend removal as forbidden if QUIC was used
- MINOR: cli: implement wait on be-removable
- MINOR: proxy: add comment for defaults_px_ref/unref_all()
- MEDIUM: proxy: add lock for global accesses during proxy free
- MEDIUM: proxy: add lock for global accesses during default free
- MINOR: proxy: use atomic ops for default proxy refcount
- MEDIUM: proxy: implement backend deletion
- REGTESTS: add a test on "del backend"
- REGTESTS: complete "del backend" with unnamed defaults ref free
- BUG/MINOR: hlua: fix return with push nil on proxy check
- BUG/MEDIUM: stream: Handle TASK_WOKEN_RES as a stream event
- MINOR: quic: use signed char type for ALPN manipulation
- MINOR: quic/h3: reorganize stream reject after MUX closure
- MINOR: mux-quic: add function for ALPN to app-ops conversion
- MEDIUM: quic/mux-quic: adjust app-ops install
- MINOR: quic: use server cache for ALPN on BE side
- BUG/MEDIUM: hpack: correctly deal with too large decoded numbers
- BUG/MAJOR: qpack: unchecked length passed to huffman decoder
- BUG/MINOR: qpack: fix 1-byte OOB read in qpack_decode_fs_pfx()
- BUG/MINOR: quic: fix OOB read in preferred_address transport parameter
- BUG/MEDIUM: qpack: correctly deal with too large decoded numbers
- BUG/MINOR: hlua: Properly enable/disable line receives from HTTP applet
- BUG/MEDIUM: hlua: Fix end of request detection when retrieving payload
- BUG/MINOR: hlua: Properly enable/disable receives for TCP applets
- MINOR: htx: Add a function to retrieve the HTTP version from a start-line
- MINOR: h1-htx: Reports non-HTTP version via dedicated flags
- BUG/MINOR: h1-htx: Be sure that H1 response version starts by "HTTP/"
- MINOR: http-ana: Save the message version in the http_msg structure
- MEDIUM: http-fetch: Rework how HTTP message version is retrieved
- MEDIUM: http-ana: Use the version of the opposite side for internal messages
- DEBUG: stream: Display the currently running rule in stream dump
- MINOR: filters: Use filter API as far as poissible to break loops on filters
- MINOR: filters: Set last_entity when a filter fails on stream_start callback
- MINOR: stream: Display the currently running filter per channel in stream dump
- DOC: config: Use the right alias for %B
- BUG/MINOR: channel: Increase the stconn bytes_in value in channel_add_input()
- BUG/MINOR: sample: Fix sample to retrieve the number of bytes received and sent
- BUG/MINOR: http-ana: Increment scf bytes_out value if an haproxy error is sent
- BUG/MAJOR: fcgi: Fix param decoding by properly checking its size
- BUG/MAJOR: resolvers: Properly lowered the names found in DNS response
- BUG/MEDIUM: mux-fcgi: Use a safe loop to resume each stream eligible for sending
- MINOR: mux-fcgi: Use a dedicated function to resume streams eligible for sending
- CLEANUP: qpack: simplify length checks in qpack_decode_fs()
- MINOR: counters: Introduce COUNTERS_UPDATE_MAX()
- MINOR: listeners: Update the frequency counters separately when needed
- MINOR: proxies: Update beconn separately
- MINOR: stats: Add an option to disable the calculation of max counters
2026/02/19 : 3.4-dev5
- DOC: internals: addd mworker V3 internals
- BUG/MINOR: threads: Initialize maxthrpertgroup earlier.
- BUG/MEDIUM: threads: Differ checking the max threads per group number
- BUG/MINOR: startup: fix allocation error message of progname string
- BUG/MINOR: startup: handle a possible strdup() failure
- MINOR: cfgparse: validate defaults proxies separately
- MINOR: cfgparse: move proxy post-init in a dedicated function
- MINOR: proxy: refactor proxy inheritance of a defaults section
- MINOR: proxy: refactor mode parsing
- MINOR: backend: add function to check support for dynamic servers
- MINOR: proxy: define "add backend" handler
- MINOR: proxy: parse mode on dynamic backend creation
- MINOR: proxy: parse guid on dynamic backend creation
- MINOR: proxy: check default proxy compatibility on "add backend"
- MEDIUM: proxy: implement dynamic backend creation
- MINOR: proxy: assign dynamic proxy ID
- REGTESTS: add dynamic backend creation test
- BUG/MINOR: proxy: fix clang build error on "add backend" handler
- BUG/MINOR: proxy: fix null dereference in "add backend" handler
- MINOR: net_helper: extend the ip.fp output with an option presence mask
- BUG/MINOR: proxy: fix default ALPN bind settings
- CLEANUP: lb-chash: free lb_nodes from chash's deinit(), not global
- BUG/MEDIUM: lb-chash: always properly initialize lb_nodes with dynamic servers
- CLEANUP: haproxy: fix bad line wrapping in run_poll_loop()
- MINOR: activity: support setting/clearing lock/memory watching for task profiling
- MEDIUM: activity: apply and use new finegrained task profiling settings
- MINOR: activity: allow to switch per-task lock/memory profiling at runtime
- MINOR: startup: Add the SSL lib verify directory in haproxy -vv
- BUG/MINOR: ssl: SSL_CERT_DIR environment variable doesn't affect haproxy
- CLEANUP: initcall: adjust comments to INITCALL{0,1} macros
- DOC: proxy-proto: underline the packed attribute for struct pp2_tlv_ssl
- MINOR: queues: Check minconn first in srv_dynamic_maxconn()
- MINOR: servers: Call process_srv_queue() without lock when possible
- BUG/MINOR: quic: ensure handshake speed up is only run once per conn
- BUG/MAJOR: quic: reject invalid token
- BUG/MAJOR: quic: fix parsing frame type
- MINOR: ssl: Missing '\n' in error message
- MINOR: jwt: Convert an RSA JWK into an EVP_PKEY
- MINOR: jwt: Add new jwt_decrypt_jwk converter
- REGTESTS: jwt: Add new "jwt_decrypt_jwk" tests
- MINOR: startup: Add HAVE_WORKING_TCP_MD5SIG in haproxy -vv
- MINOR: startup: sort the feature list in haproxy -vv
- MINOR: startup: show the list of detected features at runtime with haproxy -vv
- SCRIPTS: build-vtest: allow to set a TMPDIR and a DESTDIR
- MINOR: filters: rework RESUME_FILTER_* macros as inline functions
- MINOR: filters: rework filter iteration for channel related callback functions
- MEDIUM: filters: use per-channel filter list when relevant
- DEV: gdb: add a utility to find the post-mortem address from a core
- BUG/MINOR: deviceatlas: add missing return on error in config parsers
- BUG/MINOR: deviceatlas: add NULL checks on strdup() results in config parsers
- BUG/MEDIUM: deviceatlas: fix resource leaks on init error paths
- BUG/MINOR: deviceatlas: fix off-by-one in da_haproxy_conv()
- BUG/MINOR: deviceatlas: fix cookie vlen using wrong length after extraction
- BUG/MINOR: deviceatlas: fix double-checked locking race in checkinst
- BUG/MINOR: deviceatlas: fix resource leak on hot-reload compile failure
- BUG/MINOR: deviceatlas: fix deinit to only finalize when initialized
- BUG/MINOR: deviceatlas: set cache_size on hot-reloaded atlas instance
- MINOR: deviceatlas: check getproptype return and remove pprop indirection
- MINOR: deviceatlas: increase DA_MAX_HEADERS and header buffer sizes
- MINOR: deviceatlas: define header_evidence_entry in dummy library header
- MINOR: deviceatlas: precompute maxhdrlen to skip oversized headers early
- CLEANUP: deviceatlas: add unlikely hints and minor code tidying
- DEV: gdb: use unsigned longs to display pools memory usage
- BUG/MINOR: ssl: lack crtlist_dup_ssl_conf() declaration
- BUG/MINOR: ssl: double-free on error path w/ ssl-f-use parser
- BUG/MINOR: ssl: fix leak in ssl-f-use parser upon error
- BUG/MINOR: ssl: clarify ssl-f-use errors in post-section parsing
- BUG/MINOR: ssl: error with ssl-f-use when no "crt"
- MEDIUM: backend: make "balance random" consider tg local req rate when loads are equal
- BUG/MAJOR: Revert "MEDIUM: mux-quic: add BUG_ON if sending on locally closed QCS"
- BUG/MEDIUM: h3: reject frontend CONNECT as currently not implemented
- MINOR: mux-quic: add BUG_ON_STRESS() when draining data on closed stream
- REGTESTS: fix quoting in feature cmd which prevents test execution
- BUG/MEDIUM: mux-h2/quic: Stop sending via fast-forward if stream is closed
- BUG/MEDIUM: mux-h1: Stop sending vi fast-forward for unexpected states
- BUG/MEDIUM: applet: Fix test on shut flags for legacy applets (v2)
- DEV: term-events: Fix hanshake events decoding
- BUG/MINOR: flt-trace: Properly compute length of the first DATA block
- MINOR: flt-trace: Add an option to limit the amount of data forwarded
- CLEANUP: compression: Remove unused static buffers
- BUG/MEDIUM: shctx: Use the next block when data exactly filled a block
- BUG/MINOR: http-ana: Stop to wait for body on client error/abort
- MINOR: stconn: Add missing SC_FL_NO_FASTFWD flag in sc_show_flags
- REORG: stconn: Move functions related to channel buffers to sc_strm.h
- BUG/MEDIUM: jwe: fix timing side-channel and dead code in JWE decryption
- MINOR: tree-wide: Use the buffer size instead of global setting when possible
- MINOR: buffers: Swap buffers of same size only
- BUG/MINOR: config: Check buffer pool creation for failures
- MEDIUM: cache: Don't rely on a chunk to store messages payload
- MEDIUM: stream: Limit number of synchronous send per stream wakeup
- MEDIUM: compression: Be sure to never compress more than a chunk at once
- MEDIUM: mux-h1/mux-h2/mux-fcgi/h3: Disable 0-copy for buffers of different size
- MEDIUM: applet: Disable 0-copy for buffers of different size
- MINOR: h1-htx: Disable 0-copy for buffers of different size
- MEDIUM: stream: Offer buffers of default size only
- BUG/MEDIUM: htx: Fix function used to change part of a block value when defrag
- MEDIUM: htx: Refactor transfer of htx blocks to merge DATA blocks if possible
- MEDIUM: htx: Refactor htx defragmentation to merge data blocks
- MEDIUM: htx: Improve detection of fragmented/unordered HTX messages
- MINOR: http-ana: Do a defrag on unaligned HTX message when waiting for payload
- MINOR: http-fetch: Use pointer to HTX DATA block when retrieving HTX body
- MEDIUM: dynbuf: Add a pool for large buffers with a configurable size
- MEDIUM: chunk: Add support for large chunks
- MEDIUM: stconn: Properly handle large buffers during a receive
- MEDIUM: sample: Get chunks with a size dependent on input data when necessary
- MEDIUM: http-fetch: Be able to use large chunks when necessary
- MINPR: htx: Get large chunk if necessary to perform a defrag
- MEDIUM: http-ana: Use a large buffer if necessary when waiting for body
- MINOR: dynbuf: Add helpers to know if a buffer is a default or a large buffer
- MINOR: config: reject configs using HTTP with large bufsize >= 256 MB
- CI: do not use ghcr.io for Quic Interop workflows
- BUG/MEDIUM: ssl: SSL backend sessions used after free
- CI: vtest: move the vtest2 URL to vinyl-cache.org
- CI: github: disable windows.yml by default on unofficials repo
- MEDIUM: Add connect/queue/tarpit timeouts to set-timeout
- CLEANUP: mux-h1: Remove unneeded null check
- DOC: remove openssl no-deprecated CI image
- BUG/MINOR: acme: fix X509_NAME leak when X509_set_issuer_name() fails
- BUG/MINOR: backend: check delay MUX before conn_prepare()
- OPTIM: backend: reduce contention when checking MUX init with ALPN
- DOC: configuration: add the ACME wiki page link
- MINOR: ssl/ckch: Move EVP_PKEY and cert code generation from acme
- MINOR: ssl/ckch: certificates generation from "load" "crt-store" directive
- MINOR: trace: add definitions for haterm streams
- MINOR: init: allow a fileless init mode
- MEDIUM: init: allow the redefinition of argv[] parsing function
- MINOR: stconn: stream instantiation from proxy callback
- MINOR: haterm: add haterm HTTP server
- MINOR: haterm: new "haterm" utility
- MINOR: haterm: increase thread-local pool size
- BUG/MEDIUM: stats-file: fix shm-stats-file recover when all process slots are full
- BUG/MINOR: stats-file: manipulate shm-stats-file heartbeat using unsigned int
- BUG/MEDIUM: stats-file: detect and fix inconsistent shared clock when resuming from shm-stats-file
- CI: github: only enable OS X on development branches
2026/02/04 : 3.4-dev4
- BUG/MEDIUM: hlua: fix invalid lua_pcall() usage in hlua_traceback()
- BUG/MINOR: hlua: consume error object if ignored after a failing lua_pcall()
- BUG/MINOR: promex: Detach promex from the server on error dump its metrics dump
- BUG/MEDIUM: mux-h1: Skip UNUSED htx block when formating the start line
- BUG/MINOR: proto_tcp: Properly report support for HAVE_TCP_MD5SIG feature
- BUG/MINOR: config: check capture pool creations for failures
- BUG/MINOR: stick-tables: abort startup on stk_ctr pool creation failure
- MEDIUM: pools: better check for size rounding overflow on registration
- DOC: reg-tests: update VTest upstream link in the starting guide
- BUG/MINOR: ssl: Properly manage alloc failures in SSL passphrase callback
- BUG/MINOR: ssl: Encrypted keys could not be loaded when given alongside certificate
- MINOR: ssl: display libssl errors on private key loading
- BUG/MAJOR: applet: Don't call I/O handler if the applet was shut
- MINOR: ssl: allow to disable certificate compression
- BUG/MINOR: ssl: fix error message of tune.ssl.certificate-compression
- DOC: config: mention some possible TLS versions restrictions for kTLS
- OPTIM: server: move queueslength in server struct
- OPTIM: proxy: separate queues fields from served
- OPTIM: server: get rid of the last use of _ha_barrier_full()
- DOC: config: mention that idle connection sharing is per thread-group
- MEDIUM: h1: strictly verify quoting in chunk extensions
- BUG/MINOR: config/ssl: fix spelling of "expose-experimental-directives"
- BUG/MEDIUM: ssl: fix msg callbacks on QUIC connections
- MEDIUM: ssl: remove connection from msg callback args
- MEDIUM: ssl: porting to X509_STORE_get1_objects() for OpenSSL 4.0
- REGTESTS: ssl: make reg-tests compatible with OpenSSL 4.0
- DOC: internals: cleanup few typos in master-worker documentation
- BUG/MEDIUM: applet: Fix test on shut flags for legacy applets
- MINOR: quic: Fix build with USE_QUIC_OPENSSL_COMPAT
- MEDIUM: tcpcheck: add post-80 option for mysql-check to support MySQL 8.x
- BUG/MEDIUM: threads: Atomically set TH_FL_SLEEPING and clr FL_NOTIFIED
- BUG/MINOR: cpu-topo: count cores not cpus to distinguish core types
- DOC: config: mention the limitation on server id range for consistent hash
- MEDIUM: backend: make "balance random" consider req rate when loads are equal
- BUG/MINOR: config: Fix setting of alt_proto
2026/01/22 : 3.4-dev3
- BUILD: ssl: strchr definition changed in C23
- BUILD: tools: memchr definition changed in C23

View File

@ -60,7 +60,6 @@
# USE_OBSOLETE_LINKER : use when the linker fails to emit __start_init/__stop_init
# USE_THREAD_DUMP : use the more advanced thread state dump system. Automatic.
# USE_OT : enable the OpenTracing filter
# EXTRA_MAKE : space-separated list of external addons using a Makefile.inc
# USE_MEMORY_PROFILING : enable the memory profiler. Linux-glibc only.
# USE_LIBATOMIC : force to link with/without libatomic. Automatic.
# USE_PTHREAD_EMULATION : replace pthread's rwlocks with ours
@ -644,7 +643,7 @@ ifneq ($(USE_OPENSSL:0=),)
OPTIONS_OBJS += src/ssl_sock.o src/ssl_ckch.o src/ssl_ocsp.o src/ssl_crtlist.o \
src/ssl_sample.o src/cfgparse-ssl.o src/ssl_gencert.o \
src/ssl_utils.o src/jwt.o src/ssl_clienthello.o src/jws.o src/acme.o \
src/acme_resolvers.o src/ssl_trace.o src/jwe.o
src/ssl_trace.o src/jwe.o
endif
ifneq ($(USE_ENGINE:0=),)
@ -671,8 +670,7 @@ OPTIONS_OBJS += src/mux_quic.o src/h3.o src/quic_rx.o src/quic_tx.o \
src/quic_cc_nocc.o src/quic_cc.o src/quic_pacing.o \
src/h3_stats.o src/quic_stats.o src/qpack-enc.o \
src/qpack-tbl.o src/quic_cc_drs.o src/quic_fctl.o \
src/quic_enc.o src/mux_quic_qstrm.o src/xprt_qstrm.o \
src/mpring.o
src/quic_enc.o
endif
ifneq ($(USE_QUIC_OPENSSL_COMPAT:0=),)
@ -861,14 +859,9 @@ ifneq ($(USE_LINUX_CAP:0=),)
endif
ifneq ($(USE_OT:0=),)
$(call warning, The opentracing filter was deprecated in haproxy 3.3 and will be removed in 3.5.)
include addons/ot/Makefile
endif
ifneq ($(EXTRA_MAKE),)
include $(addsuffix /Makefile.inc,$(EXTRA_MAKE))
endif
# better keep this one close to the end, as several libs above may need it
ifneq ($(USE_DL:0=),)
DL_LDFLAGS = -ldl
@ -963,7 +956,6 @@ endif # obsolete targets
endif # TARGET
OBJS =
HATERM_OBJS =
ifneq ($(EXTRA_OBJS),)
OBJS += $(EXTRA_OBJS)
@ -1011,14 +1003,12 @@ OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \
src/http_acl.o src/dict.o src/dgram.o src/pipe.o \
src/hpack-huff.o src/hpack-enc.o src/ebtree.o src/hash.o \
src/httpclient_cli.o src/version.o src/ncbmbuf.o src/ech.o \
src/cfgparse-peers.o src/haterm.o
src/cfgparse-peers.o
ifneq ($(TRACE),)
OBJS += src/calltrace.o
endif
HATERM_OBJS += $(OBJS) src/haterm_init.o
# Used only for forced dependency checking. May be cleared during development.
INCLUDES = $(wildcard include/*/*.h)
DEP = $(INCLUDES) .build_opts
@ -1050,7 +1040,7 @@ IGNORE_OPTS=help install install-man install-doc install-bin \
uninstall clean tags cscope tar git-tar version update-version \
opts reg-tests reg-tests-help unit-tests admin/halog/halog dev/flags/flags \
dev/haring/haring dev/ncpu/ncpu dev/poll/poll dev/tcploop/tcploop \
dev/term_events/term_events dev/gdb/pm-from-core dev/gdb/libs-from-core
dev/term_events/term_events
ifneq ($(TARGET),)
ifeq ($(filter $(firstword $(MAKECMDGOALS)),$(IGNORE_OPTS)),)
@ -1066,9 +1056,6 @@ endif # non-empty target
haproxy: $(OPTIONS_OBJS) $(OBJS)
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
haterm: $(OPTIONS_OBJS) $(HATERM_OBJS)
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
objsize: haproxy
$(Q)objdump -t $^|grep ' g '|grep -F '.text'|awk '{print $$5 FS $$6}'|sort
@ -1084,12 +1071,6 @@ admin/dyncookie/dyncookie: admin/dyncookie/dyncookie.o
dev/flags/flags: dev/flags/flags.o
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
dev/gdb/libs-from-core: dev/gdb/libs-from-core.o
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
dev/gdb/pm-from-core: dev/gdb/pm-from-core.o
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
dev/haring/haring: dev/haring/haring.o
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
@ -1169,7 +1150,7 @@ uninstall:
$(Q)rm -f "$(DESTDIR)$(SBINDIR)"/haproxy
clean:
$(Q)rm -f *.[oas] src/*.[oas] haproxy haterm test .build_opts .build_opts.new
$(Q)rm -f *.[oas] src/*.[oas] haproxy test .build_opts .build_opts.new
$(Q)for dir in . src dev/* admin/* addons/* include/* doc; do rm -f $$dir/*~ $$dir/*.rej $$dir/core; done
$(Q)rm -f haproxy-$(VERSION).tar.gz haproxy-$(VERSION)$(SUBVERS)$(EXTRAVERSION).tar.gz
$(Q)rm -f haproxy-$(VERSION) haproxy-$(VERSION)$(SUBVERS)$(EXTRAVERSION) nohup.out gmon.out
@ -1188,7 +1169,7 @@ distclean: clean
$(Q)rm -f admin/dyncookie/dyncookie
$(Q)rm -f dev/haring/haring dev/ncpu/ncpu{,.so} dev/poll/poll dev/tcploop/tcploop
$(Q)rm -f dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
$(Q)rm -f dev/qpack/decode dev/gdb/pm-from-core dev/gdb/libs-from-core
$(Q)rm -f dev/qpack/decode
tags:
$(Q)find src include \( -name '*.c' -o -name '*.h' \) -print0 | \
@ -1342,8 +1323,7 @@ range:
echo "[ $$index/$$count ] $$commit #############################"; \
git checkout -q $$commit || die 1; \
$(MAKE) all || die 1; \
set -- $(TEST_CMD); \
[ "$$#" -eq 0 ] || "$$@" || die 1; \
[ -z "$(TEST_CMD)" ] || $(TEST_CMD) || die 1; \
index=$$((index + 1)); \
done; \
echo;echo "Done! $${count} commit(s) built successfully for RANGE $${RANGE}" ; \

View File

@ -1,9 +1,10 @@
# HAProxy
[![alpine/musl](https://github.com/haproxy/haproxy/actions/workflows/musl.yml/badge.svg)](https://github.com/haproxy/haproxy/actions/workflows/musl.yml)
[![AWS-LC](https://github.com/haproxy/haproxy/actions/workflows/aws-lc.yml/badge.svg)](https://github.com/haproxy/haproxy/actions/workflows/aws-lc.yml)
[![openssl no-deprecated](https://github.com/haproxy/haproxy/actions/workflows/openssl-nodeprecated.yml/badge.svg)](https://github.com/haproxy/haproxy/actions/workflows/openssl-nodeprecated.yml)
[![Illumos](https://github.com/haproxy/haproxy/actions/workflows/illumos.yml/badge.svg)](https://github.com/haproxy/haproxy/actions/workflows/illumos.yml)
[![NetBSD](https://github.com/haproxy/haproxy/actions/workflows/netbsd.yml/badge.svg)](https://github.com/haproxy/haproxy/actions/workflows/netbsd.yml)
[![CrossCompile](https://github.com/haproxy/haproxy/actions/workflows/cross-zoo.yml/badge.svg)](https://github.com/haproxy/haproxy/actions/workflows/cross-zoo.yml)
[![FreeBSD](https://api.cirrus-ci.com/github/haproxy/haproxy.svg?task=FreeBSD)](https://cirrus-ci.com/github/haproxy/haproxy/)
[![VTest](https://github.com/haproxy/haproxy/actions/workflows/vtest.yml/badge.svg)](https://github.com/haproxy/haproxy/actions/workflows/vtest.yml)

View File

@ -1,2 +1,2 @@
$Format:%ci$
2026/04/29
2026/01/22

View File

@ -1 +1 @@
3.4-dev10
3.4-dev3

View File

@ -40,7 +40,8 @@
#include <stdlib.h>
#include <inttypes.h>
#include <stdbool.h>
typedef int bool;
enum { false, true };
typedef unsigned char byte;

View File

@ -31,7 +31,6 @@ static struct {
da_atlas_t atlas;
da_evidence_id_t useragentid;
da_severity_t loglevel;
size_t maxhdrlen;
char separator;
unsigned char daset:1;
} global_deviceatlas = {
@ -43,7 +42,6 @@ static struct {
.atlasmap = NULL,
.atlasfd = -1,
.useragentid = 0,
.maxhdrlen = 0,
.daset = 0,
.separator = '|',
};
@ -59,10 +57,6 @@ static int da_json_file(char **args, int section_type, struct proxy *curpx,
return -1;
}
global_deviceatlas.jsonpath = strdup(args[1]);
if (unlikely(global_deviceatlas.jsonpath == NULL)) {
memprintf(err, "deviceatlas json file : out of memory.\n");
return -1;
}
return 0;
}
@ -79,7 +73,6 @@ static int da_log_level(char **args, int section_type, struct proxy *curpx,
loglevel = atol(args[1]);
if (loglevel < 0 || loglevel > 3) {
memprintf(err, "deviceatlas log level : expects a log level between 0 and 3, %s given.\n", args[1]);
return -1;
} else {
global_deviceatlas.loglevel = (da_severity_t)loglevel;
}
@ -108,10 +101,6 @@ static int da_properties_cookie(char **args, int section_type, struct proxy *cur
return -1;
} else {
global_deviceatlas.cookiename = strdup(args[1]);
if (unlikely(global_deviceatlas.cookiename == NULL)) {
memprintf(err, "deviceatlas cookie name : out of memory.\n");
return -1;
}
}
global_deviceatlas.cookienamelen = strlen(global_deviceatlas.cookiename);
return 0;
@ -130,7 +119,6 @@ static int da_cache_size(char **args, int section_type, struct proxy *curpx,
cachesize = atol(args[1]);
if (cachesize < 0 || cachesize > DA_CACHE_MAX) {
memprintf(err, "deviceatlas cache size : expects a cache size between 0 and %d, %s given.\n", DA_CACHE_MAX, args[1]);
return -1;
} else {
#ifdef APINOCACHE
fprintf(stdout, "deviceatlas cache size : no-op, its support is disabled.\n");
@ -177,7 +165,7 @@ static int init_deviceatlas(void)
da_status_t status;
jsonp = fopen(global_deviceatlas.jsonpath, "r");
if (unlikely(jsonp == 0)) {
if (jsonp == 0) {
ha_alert("deviceatlas : '%s' json file has invalid path or is not readable.\n",
global_deviceatlas.jsonpath);
err_code |= ERR_ALERT | ERR_FATAL;
@ -189,11 +177,9 @@ static int init_deviceatlas(void)
status = da_atlas_compile(jsonp, da_haproxy_read, da_haproxy_seek,
&global_deviceatlas.atlasimgptr, &atlasimglen);
fclose(jsonp);
if (unlikely(status != DA_OK)) {
if (status != DA_OK) {
ha_alert("deviceatlas : '%s' json file is invalid.\n",
global_deviceatlas.jsonpath);
free(global_deviceatlas.atlasimgptr);
da_fini();
err_code |= ERR_ALERT | ERR_FATAL;
goto out;
}
@ -201,10 +187,8 @@ static int init_deviceatlas(void)
status = da_atlas_open(&global_deviceatlas.atlas, extraprops,
global_deviceatlas.atlasimgptr, atlasimglen);
if (unlikely(status != DA_OK)) {
if (status != DA_OK) {
ha_alert("deviceatlas : data could not be compiled.\n");
free(global_deviceatlas.atlasimgptr);
da_fini();
err_code |= ERR_ALERT | ERR_FATAL;
goto out;
}
@ -213,28 +197,11 @@ static int init_deviceatlas(void)
if (global_deviceatlas.cookiename == 0) {
global_deviceatlas.cookiename = strdup(DA_COOKIENAME_DEFAULT);
if (unlikely(global_deviceatlas.cookiename == NULL)) {
ha_alert("deviceatlas : out of memory.\n");
da_atlas_close(&global_deviceatlas.atlas);
free(global_deviceatlas.atlasimgptr);
da_fini();
err_code |= ERR_ALERT | ERR_FATAL;
goto out;
}
global_deviceatlas.cookienamelen = strlen(global_deviceatlas.cookiename);
}
global_deviceatlas.useragentid = da_atlas_header_evidence_id(&global_deviceatlas.atlas,
"user-agent");
{
size_t hi;
global_deviceatlas.maxhdrlen = 16;
for (hi = 0; hi < global_deviceatlas.atlas.header_evidence_count; hi++) {
size_t nl = strlen(global_deviceatlas.atlas.header_priorities[hi].name);
if (nl > global_deviceatlas.maxhdrlen)
global_deviceatlas.maxhdrlen = nl;
}
}
if ((global_deviceatlas.atlasfd = shm_open(ATLASMAPNM, O_RDWR, 0660)) != -1) {
global_deviceatlas.atlasmap = mmap(NULL, ATLASTOKSZ, PROT_READ | PROT_WRITE, MAP_SHARED, global_deviceatlas.atlasfd, 0);
if (global_deviceatlas.atlasmap == MAP_FAILED) {
@ -264,22 +231,24 @@ static void deinit_deviceatlas(void)
free(global_deviceatlas.cookiename);
da_atlas_close(&global_deviceatlas.atlas);
free(global_deviceatlas.atlasimgptr);
da_fini();
}
if (global_deviceatlas.atlasfd != -1) {
munmap(global_deviceatlas.atlasmap, ATLASTOKSZ);
close(global_deviceatlas.atlasfd);
shm_unlink(ATLASMAPNM);
}
da_fini();
}
static void da_haproxy_checkinst(void)
{
if (global_deviceatlas.atlasmap != 0) {
char *base;
base = (char *)global_deviceatlas.atlasmap;
char *base;
base = (char *)global_deviceatlas.atlasmap;
if (base[0] != 0) {
if (base[0] != 0) {
FILE *jsonp;
void *cnew;
da_status_t status;
@ -289,10 +258,6 @@ static void da_haproxy_checkinst(void)
da_property_decl_t extraprops[1] = {{NULL, 0}};
#ifdef USE_THREAD
HA_SPIN_LOCK(OTHER_LOCK, &dadwsch_lock);
if (base[0] == 0) {
HA_SPIN_UNLOCK(OTHER_LOCK, &dadwsch_lock);
return;
}
#endif
strlcpy2(atlasp, base + sizeof(char), sizeof(atlasp));
jsonp = fopen(atlasp, "r");
@ -310,20 +275,10 @@ static void da_haproxy_checkinst(void)
fclose(jsonp);
if (status == DA_OK) {
if (da_atlas_open(&inst, extraprops, cnew, atlassz) == DA_OK) {
inst.config.cache_size = global_deviceatlas.cachesize;
da_atlas_close(&global_deviceatlas.atlas);
free(global_deviceatlas.atlasimgptr);
global_deviceatlas.atlasimgptr = cnew;
global_deviceatlas.atlas = inst;
{
size_t hi;
global_deviceatlas.maxhdrlen = 16;
for (hi = 0; hi < inst.header_evidence_count; hi++) {
size_t nl = strlen(inst.header_priorities[hi].name);
if (nl > global_deviceatlas.maxhdrlen)
global_deviceatlas.maxhdrlen = nl;
}
}
base[0] = 0;
ha_notice("deviceatlas : new instance, data file date `%s`.\n",
da_getdatacreationiso8601(&global_deviceatlas.atlas));
@ -331,8 +286,6 @@ static void da_haproxy_checkinst(void)
ha_alert("deviceatlas : instance update failed.\n");
free(cnew);
}
} else {
free(cnew);
}
#ifdef USE_THREAD
HA_SPIN_UNLOCK(OTHER_LOCK, &dadwsch_lock);
@ -344,7 +297,7 @@ static void da_haproxy_checkinst(void)
static int da_haproxy(const struct arg *args, struct sample *smp, da_deviceinfo_t *devinfo)
{
struct buffer *tmp;
da_propid_t prop;
da_propid_t prop, *pprop;
da_status_t status;
da_type_t proptype;
const char *propname;
@ -364,15 +317,13 @@ static int da_haproxy(const struct arg *args, struct sample *smp, da_deviceinfo_
chunk_appendf(tmp, "%c", global_deviceatlas.separator);
continue;
}
if (unlikely(da_atlas_getproptype(&global_deviceatlas.atlas, prop, &proptype) != DA_OK)) {
chunk_appendf(tmp, "%c", global_deviceatlas.separator);
continue;
}
pprop = &prop;
da_atlas_getproptype(&global_deviceatlas.atlas, *pprop, &proptype);
switch (proptype) {
case DA_TYPE_BOOLEAN: {
bool val;
status = da_getpropboolean(devinfo, prop, &val);
status = da_getpropboolean(devinfo, *pprop, &val);
if (status == DA_OK) {
chunk_appendf(tmp, "%d", val);
}
@ -381,7 +332,7 @@ static int da_haproxy(const struct arg *args, struct sample *smp, da_deviceinfo_
case DA_TYPE_INTEGER:
case DA_TYPE_NUMBER: {
long val;
status = da_getpropinteger(devinfo, prop, &val);
status = da_getpropinteger(devinfo, *pprop, &val);
if (status == DA_OK) {
chunk_appendf(tmp, "%ld", val);
}
@ -389,7 +340,7 @@ static int da_haproxy(const struct arg *args, struct sample *smp, da_deviceinfo_
}
case DA_TYPE_STRING: {
const char *val;
status = da_getpropstring(devinfo, prop, &val);
status = da_getpropstring(devinfo, *pprop, &val);
if (status == DA_OK) {
chunk_appendf(tmp, "%s", val);
}
@ -420,26 +371,29 @@ static int da_haproxy_conv(const struct arg *args, struct sample *smp, void *pri
{
da_deviceinfo_t devinfo;
da_status_t status;
char useragentbuf[1024];
const char *useragent;
char useragentbuf[1024] = { 0 };
int i;
if (unlikely(global_deviceatlas.daset == 0) || smp->data.u.str.data == 0) {
if (global_deviceatlas.daset == 0 || smp->data.u.str.data == 0) {
return 1;
}
da_haproxy_checkinst();
i = smp->data.u.str.data > sizeof(useragentbuf) - 1 ? sizeof(useragentbuf) - 1 : smp->data.u.str.data;
memcpy(useragentbuf, smp->data.u.str.area, i);
useragentbuf[i] = 0;
i = smp->data.u.str.data > sizeof(useragentbuf) ? sizeof(useragentbuf) : smp->data.u.str.data;
memcpy(useragentbuf, smp->data.u.str.area, i - 1);
useragentbuf[i - 1] = 0;
useragent = (const char *)useragentbuf;
status = da_search(&global_deviceatlas.atlas, &devinfo,
global_deviceatlas.useragentid, useragentbuf, 0);
global_deviceatlas.useragentid, useragent, 0);
return status != DA_OK ? 0 : da_haproxy(args, smp, &devinfo);
}
#define DA_MAX_HEADERS 32
#define DA_MAX_HEADERS 24
static int da_haproxy_fetch(const struct arg *args, struct sample *smp, const char *kw, void *private)
{
@ -449,10 +403,10 @@ static int da_haproxy_fetch(const struct arg *args, struct sample *smp, const ch
struct channel *chn;
struct htx *htx;
struct htx_blk *blk;
char vbuf[DA_MAX_HEADERS][1024];
char vbuf[DA_MAX_HEADERS][1024] = {{ 0 }};
int i, nbh = 0;
if (unlikely(global_deviceatlas.daset == 0)) {
if (global_deviceatlas.daset == 0) {
return 0;
}
@ -460,17 +414,18 @@ static int da_haproxy_fetch(const struct arg *args, struct sample *smp, const ch
chn = (smp->strm ? &smp->strm->req : NULL);
htx = smp_prefetch_htx(smp, chn, NULL, 1);
if (unlikely(!htx))
if (!htx)
return 0;
i = 0;
for (blk = htx_get_first_blk(htx); nbh < DA_MAX_HEADERS && blk; blk = htx_get_next_blk(htx, blk)) {
size_t vlen;
char *pval;
da_evidence_id_t evid;
enum htx_blk_type type;
struct ist n, v;
char hbuf[64];
char tval[1024];
char hbuf[24] = { 0 };
char tval[1024] = { 0 };
type = htx_get_blk_type(blk);
@ -483,18 +438,20 @@ static int da_haproxy_fetch(const struct arg *args, struct sample *smp, const ch
continue;
}
if (n.len > global_deviceatlas.maxhdrlen || n.len >= sizeof(hbuf)) {
/* The HTTP headers used by the DeviceAtlas API are not longer */
if (n.len >= sizeof(hbuf)) {
continue;
}
memcpy(hbuf, n.ptr, n.len);
hbuf[n.len] = 0;
pval = v.ptr;
vlen = v.len;
evid = -1;
i = v.len > sizeof(tval) - 1 ? sizeof(tval) - 1 : v.len;
memcpy(tval, v.ptr, i);
tval[i] = 0;
pval = tval;
vlen = i;
if (strcasecmp(hbuf, "Accept-Language") == 0) {
evid = da_atlas_accept_language_evidence_id(&global_deviceatlas.atlas);
@ -512,7 +469,7 @@ static int da_haproxy_fetch(const struct arg *args, struct sample *smp, const ch
continue;
}
vlen = pl;
vlen -= global_deviceatlas.cookienamelen - 1;
pval = p;
evid = da_atlas_clientprop_evidence_id(&global_deviceatlas.atlas);
} else {

View File

@ -141,11 +141,6 @@ enum {
DA_INITIAL_MEMORY_ESTIMATE = 1024 * 1024 * 14
};
struct header_evidence_entry {
const char *name;
da_evidence_id_t id;
};
struct da_config {
unsigned int cache_size;
unsigned int __reserved[15]; /* enough reserved keywords for future use */

View File

@ -70,4 +70,4 @@ OPTIONS_OBJS += \
addons/ot/src/vars.o
endif
OT_CFLAGS := $(OT_CFLAGS) $(OT_DEFINE)
OT_CFLAGS := $(OT_CFLAGS) -Iaddons/ot/include $(OT_DEFINE)

View File

@ -48,12 +48,13 @@ Currently, tracers that support this API include Datadog, Jaeger, LightStep
and Zipkin.
Note: The OpenTracing filter shouldn't be used for new designs as OpenTracing
itself is no longer maintained nor supported by its authors. As such
OpenTracing will be deprecated in 3.3 and removed in 3.5. A replacement
filter based on OpenTelemetry is available since 3.4 with complete build
instructions currently at:
itself is no longer maintained nor supported by its authors. A
replacement filter base on OpenTelemetry is currently under development
and is expected to be ready around HAProxy 3.2. As such OpenTracing will
be deprecated in 3.3 and removed in 3.5.
https://github.com/haproxytech/haproxy-opentelemetry/
The OT filter was primarily tested with the Jaeger tracer, while configurations
for both Datadog and Zipkin tracers were also set in the test directory.
The OT filter is a standard HAProxy filter, so what applies to others also
applies to this one (of course, by that I mean what is described in the

View File

@ -35,11 +35,11 @@
do { \
if (!(l) || (flt_ot_debug.level & (1 << (l)))) \
(void)fprintf(stderr, FLT_OT_DBG_FMT("%.*s" f "\n"), \
flt_ot_dbg_indent_level, FLT_OT_DBG_INDENT, ##__VA_ARGS__); \
dbg_indent_level, FLT_OT_DBG_INDENT, ##__VA_ARGS__); \
} while (0)
# define FLT_OT_FUNC(f, ...) do { FLT_OT_DBG(1, "%s(" f ") {", __func__, ##__VA_ARGS__); flt_ot_dbg_indent_level += 3; } while (0)
# define FLT_OT_RETURN(a) do { flt_ot_dbg_indent_level -= 3; FLT_OT_DBG(1, "}"); return a; } while (0)
# define FLT_OT_RETURN_EX(a,t,f) do { flt_ot_dbg_indent_level -= 3; { t _r = (a); FLT_OT_DBG(1, "} = " f, _r); return _r; } } while (0)
# define FLT_OT_FUNC(f, ...) do { FLT_OT_DBG(1, "%s(" f ") {", __func__, ##__VA_ARGS__); dbg_indent_level += 3; } while (0)
# define FLT_OT_RETURN(a) do { dbg_indent_level -= 3; FLT_OT_DBG(1, "}"); return a; } while (0)
# define FLT_OT_RETURN_EX(a,t,f) do { dbg_indent_level -= 3; { t _r = (a); FLT_OT_DBG(1, "} = " f, _r); return _r; } } while (0)
# define FLT_OT_RETURN_INT(a) FLT_OT_RETURN_EX((a), int, "%d")
# define FLT_OT_RETURN_PTR(a) FLT_OT_RETURN_EX((a), void *, "%p")
# define FLT_OT_DBG_IFDEF(a,b) a
@ -54,7 +54,7 @@ struct flt_ot_debug {
};
extern THREAD_LOCAL int flt_ot_dbg_indent_level;
extern THREAD_LOCAL int dbg_indent_level;
extern struct flt_ot_debug flt_ot_debug;
#else

View File

@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "../include/include.h"
#include "include.h"
/***

View File

@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "../include/include.h"
#include "include.h"
/***

View File

@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "../include/include.h"
#include "include.h"
#define FLT_OT_EVENT_DEF(a,b,c,d,e,f) { AN_##b##_##a, SMP_OPT_DIR_##b, SMP_VAL_FE_##c, SMP_VAL_BE_##d, e, f },

View File

@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "../include/include.h"
#include "include.h"
/*
@ -155,18 +155,12 @@ static void flt_ot_return_void(const struct filter *f, char **err)
*/
static int flt_ot_init(struct proxy *p, struct flt_conf *fconf)
{
static int warnings_emitted = 0;
struct flt_ot_conf *conf = FLT_OT_DEREF(fconf, conf, NULL);
char *err = NULL;
int retval = FLT_OT_RET_ERROR;
FLT_OT_FUNC("%p, %p", p, fconf);
if (!warnings_emitted && !deprecated_directives_allowed) {
warnings_emitted++;
ha_warning("The opentracing filter was deprecated in haproxy 3.3 and will be removed in 3.5.\n");
}
if (conf == NULL)
FLT_OT_RETURN_INT(retval);

View File

@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "../include/include.h"
#include "include.h"
#define FLT_OT_GROUP_DEF(a,b,c) { a, b, c },

View File

@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "../include/include.h"
#include "include.h"
#ifdef DEBUG_OT
@ -261,7 +261,7 @@ int flt_ot_http_header_set(struct channel *chn, const char *prefix, const char *
if (value == NULL) {
/* Do nothing. */
}
else if (http_add_header(htx, ist_name, ist(value), 1) == 1) {
else if (http_add_header(htx, ist_name, ist(value)) == 1) {
retval = 0;
FLT_OT_DBG(3, "HTTP header '%s: %s' added", ist_name.ptr, value);

View File

@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "../include/include.h"
#include "include.h"
static struct pool_head *pool_head_ot_span_context __read_mostly = NULL;

View File

@ -17,12 +17,12 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "../include/include.h"
#include "include.h"
#ifdef DEBUG_OT
struct flt_ot_debug flt_ot_debug;
THREAD_LOCAL int flt_ot_dbg_indent_level = 0;
THREAD_LOCAL int dbg_indent_level = 0;
#endif
#ifdef OTC_DBG_MEM
@ -359,7 +359,8 @@ static int flt_ot_parse_cfg_sample(const char *file, int linenum, char **args, s
*/
static int flt_ot_parse_cfg_str(const char *file, int linenum, char **args, struct list *head, char **err)
{
int i, retval = ERR_NONE;
struct flt_ot_conf_str *str = NULL;
int i, retval = ERR_NONE;
FLT_OT_FUNC("\"%s\", %d, %p, %p, %p:%p", file, linenum, args, head, FLT_OT_DPTR_ARGS(err));
@ -367,6 +368,9 @@ static int flt_ot_parse_cfg_str(const char *file, int linenum, char **args, stru
if (flt_ot_conf_str_init(args[i], linenum, head, err) == NULL)
retval |= ERR_ABORT | ERR_ALERT;
if (retval & ERR_CODE)
flt_ot_conf_str_free(&str);
FLT_OT_RETURN_INT(retval);
}
@ -640,7 +644,7 @@ static int flt_ot_parse_cfg_group(const char *file, int linenum, char **args, in
if (pdata->keyword == FLT_OT_PARSE_GROUP_ID) {
flt_ot_current_group = flt_ot_conf_group_init(args[1], linenum, &(flt_ot_current_config->groups), &err);
if (flt_ot_current_group == NULL)
if (flt_ot_current_config == NULL)
retval |= ERR_ABORT | ERR_ALERT;
}
else if (pdata->keyword == FLT_OT_PARSE_GROUP_SCOPES) {

View File

@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "../include/include.h"
#include "include.h"
/***

View File

@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "../include/include.h"
#include "include.h"
static struct pool_head *pool_head_ot_scope_span __read_mostly = NULL;

View File

@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "../include/include.h"
#include "include.h"
#ifdef DEBUG_OT
@ -41,7 +41,7 @@ void flt_ot_args_dump(char **args)
argc = flt_ot_args_count(args);
(void)fprintf(stderr, FLT_OT_DBG_FMT("%.*sargs[%d]: { '%s' "), flt_ot_dbg_indent_level, FLT_OT_DBG_INDENT, argc, args[0]);
(void)fprintf(stderr, FLT_OT_DBG_FMT("%.*sargs[%d]: { '%s' "), dbg_indent_level, FLT_OT_DBG_INDENT, argc, args[0]);
for (i = 1; i < argc; i++)
(void)fprintf(stderr, "'%s' ", args[i]);

View File

@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "../include/include.h"
#include "include.h"
#ifdef DEBUG_OT
@ -46,10 +46,10 @@ static void flt_ot_vars_scope_dump(struct vars *vars, const char *scope)
vars_rdlock(vars);
for (i = 0; i < VAR_NAME_ROOTS; i++) {
struct ceb_node *node = cebu64_imm_first(&(vars->name_root[i]));
struct ceb_node *node = cebu64_first(&(vars->name_root[i]));
for ( ; node != NULL; node = cebu64_imm_next(&(vars->name_root[i]), node)) {
struct var *var = container_of(node, struct var, name_node);
for ( ; node != NULL; node = cebu64_next(&(vars->name_root[i]), node)) {
struct var *var = container_of(node, struct var, node);
FLT_OT_DBG(2, "'%s.%016" PRIx64 "' -> '%.*s'", scope, var->name_hash, (int)b_data(&(var->data.u.str)), b_orig(&(var->data.u.str)));
}

View File

@ -407,7 +407,6 @@ listed below. Metrics from extra counters are not listed.
+----------------------------------------------------+
| haproxy_sticktable_size |
| haproxy_sticktable_used |
| haproxy_sticktable_local_updates |
+----------------------------------------------------+
* Resolvers metrics

View File

@ -36,7 +36,6 @@
#include <haproxy/stats.h>
#include <haproxy/stconn.h>
#include <haproxy/stream.h>
#include <haproxy/stress.h>
#include <haproxy/task.h>
#include <haproxy/tools.h>
#include <haproxy/version.h>
@ -83,7 +82,6 @@ struct promex_ctx {
unsigned field_num; /* current field number (ST_I_PX_* etc) */
unsigned mod_field_num; /* first field number of the current module (ST_I_PX_* etc) */
int obj_state; /* current state among PROMEX_{FRONT|BACK|SRV|LI}_STATE_* */
struct watcher px_watch; /* watcher to automatically update next pointer */
struct watcher srv_watch; /* watcher to automatically update next pointer */
struct list modules; /* list of promex modules to export */
struct eb_root filters; /* list of filters to apply on metrics name */
@ -349,10 +347,6 @@ static int promex_dump_ts(struct appctx *appctx, struct ist prefix,
istcat(&n, prefix, PROMEX_MAX_NAME_LEN);
istcat(&n, name, PROMEX_MAX_NAME_LEN);
/* In stress mode, force yielding on each metric. */
if (STRESS_RUN1(istlen(*out), 0))
goto full;
if ((ctx->flags & PROMEX_FL_METRIC_HDR) &&
!promex_dump_ts_header(n, desc, type, out, max))
goto full;
@ -632,6 +626,8 @@ static int promex_dump_front_metrics(struct appctx *appctx, struct htx *htx)
}
list_for_each_entry_from(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
void *counters;
if (!(stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_FE))
continue;
@ -668,7 +664,8 @@ static int promex_dump_front_metrics(struct appctx *appctx, struct htx *htx)
if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_FE))
goto next_px2;
if (!mod->fill_stats(mod, px->extra_counters_fe, stats + ctx->field_num, &ctx->mod_field_num))
counters = EXTRA_COUNTERS_GET(px->extra_counters_fe, mod);
if (!mod->fill_stats(counters, stats + ctx->field_num, &ctx->mod_field_num))
return -1;
val = stats[ctx->field_num + ctx->mod_field_num];
@ -820,6 +817,8 @@ static int promex_dump_listener_metrics(struct appctx *appctx, struct htx *htx)
}
list_for_each_entry_from(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
void *counters;
if (!(stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_LI))
continue;
@ -865,7 +864,8 @@ static int promex_dump_listener_metrics(struct appctx *appctx, struct htx *htx)
labels[lb_idx+1].name = ist("mod");
labels[lb_idx+1].value = ist2(mod->name, strlen(mod->name));
if (!mod->fill_stats(mod, li->extra_counters, stats + ctx->field_num, &ctx->mod_field_num))
counters = EXTRA_COUNTERS_GET(li->extra_counters, mod);
if (!mod->fill_stats(counters, stats + ctx->field_num, &ctx->mod_field_num))
return -1;
val = stats[ctx->field_num + ctx->mod_field_num];
@ -941,6 +941,9 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
if (promex_filter_metric(appctx, prefix, name))
continue;
if (!px)
px = proxies_list;
while (px) {
struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
unsigned int srv_state_count[PROMEX_SRV_STATE_COUNT] = { 0 };
@ -1095,16 +1098,9 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
&val, labels, &out, max))
goto full;
next_px:
px = watcher_next(&ctx->px_watch, px->next);
px = px->next;
}
watcher_detach(&ctx->px_watch);
ctx->flags |= PROMEX_FL_METRIC_HDR;
/* Prepare a new iteration for the next stat column.
* Update ctx.p[0] via watcher.
*/
watcher_attach(&ctx->px_watch, proxies_list);
px = proxies_list;
}
/* Skip extra counters */
@ -1117,6 +1113,8 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
}
list_for_each_entry_from(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
void *counters;
if (!(stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_BE))
continue;
@ -1127,6 +1125,9 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
if (promex_filter_metric(appctx, prefix, name))
continue;
if (!px)
px = proxies_list;
while (px) {
struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
struct promex_metric metric;
@ -1150,7 +1151,8 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_BE))
goto next_px2;
if (!mod->fill_stats(mod, px->extra_counters_be, stats + ctx->field_num, &ctx->mod_field_num))
counters = EXTRA_COUNTERS_GET(px->extra_counters_be, mod);
if (!mod->fill_stats(counters, stats + ctx->field_num, &ctx->mod_field_num))
return -1;
val = stats[ctx->field_num + ctx->mod_field_num];
@ -1161,39 +1163,25 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
goto full;
next_px2:
px = watcher_next(&ctx->px_watch, px->next);
px = px->next;
}
watcher_detach(&ctx->px_watch);
ctx->flags |= PROMEX_FL_METRIC_HDR;
/* Prepare a new iteration for the next stat column.
* Update ctx.p[0] via watcher.
*/
watcher_attach(&ctx->px_watch, proxies_list);
px = proxies_list;
}
ctx->field_num += mod->stats_count;
ctx->mod_field_num = 0;
}
px = NULL;
mod = NULL;
end:
if (ret) {
watcher_detach(&ctx->px_watch);
mod = NULL;
}
if (out.len) {
if (!htx_add_data_atonce(htx, out)) {
watcher_detach(&ctx->px_watch);
if (!htx_add_data_atonce(htx, out))
return -1; /* Unexpected and unrecoverable error */
}
}
/* Save pointers of the current context for dump resumption :
* 0=current proxy, 1=current stats module
* Note that p[0] is already automatically updated via px_watch.
*/
/* Save pointers (0=current proxy, 1=current stats module) of the current context */
ctx->p[0] = px;
ctx->p[1] = mod;
return ret;
full:
@ -1235,6 +1223,9 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
if (promex_filter_metric(appctx, prefix, name))
continue;
if (!px)
px = proxies_list;
while (px) {
struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
enum promex_mt_type type;
@ -1254,12 +1245,17 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_BE))
goto next_px;
if (!sv) {
watcher_attach(&ctx->srv_watch, px->srv);
sv = px->srv;
}
while (sv) {
labels[lb_idx].name = ist("server");
labels[lb_idx].value = ist2(sv->id, strlen(sv->id));
if (!stats_fill_sv_line(px, sv, 0, stats, ST_I_PX_MAX, &(ctx->field_num)))
goto error;
return -1;
if ((ctx->flags & PROMEX_FL_NO_MAINT_SRV) && (sv->cur_admin & SRV_ADMF_MAINT))
goto next_sv;
@ -1409,25 +1405,9 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
next_px:
watcher_detach(&ctx->srv_watch);
px = watcher_next(&ctx->px_watch, px->next);
if (px) {
/* Update ctx.p[1] via watcher. */
watcher_attach(&ctx->srv_watch, px->srv);
sv = ctx->p[1];
}
px = px->next;
}
watcher_detach(&ctx->px_watch);
ctx->flags |= PROMEX_FL_METRIC_HDR;
/* Prepare a new iteration for the next stat column.
* Update ctx.p[0]/p[1] via px_watch/srv_watch.
*/
watcher_attach(&ctx->px_watch, proxies_list);
px = proxies_list;
if (likely(px)) {
watcher_attach(&ctx->srv_watch, px->srv);
sv = ctx->p[1];
}
}
/* Skip extra counters */
@ -1440,6 +1420,8 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
}
list_for_each_entry_from(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
void *counters;
if (!(stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_SRV))
continue;
@ -1450,6 +1432,9 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
if (promex_filter_metric(appctx, prefix, name))
continue;
if (!px)
px = proxies_list;
while (px) {
struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
struct promex_metric metric;
@ -1470,6 +1455,11 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_BE))
goto next_px2;
if (!sv) {
watcher_attach(&ctx->srv_watch, px->srv);
sv = px->srv;
}
while (sv) {
labels[lb_idx].name = ist("server");
labels[lb_idx].value = ist2(sv->id, strlen(sv->id));
@ -1481,8 +1471,9 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
goto next_sv2;
if (!mod->fill_stats(mod, sv->extra_counters, stats + ctx->field_num, &ctx->mod_field_num))
goto error;
counters = EXTRA_COUNTERS_GET(sv->extra_counters, mod);
if (!mod->fill_stats(counters, stats + ctx->field_num, &ctx->mod_field_num))
return -1;
val = stats[ctx->field_num + ctx->mod_field_num];
metric.type = ((val.type == FN_GAUGE) ? PROMEX_MT_GAUGE : PROMEX_MT_COUNTER);
@ -1497,57 +1488,33 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
next_px2:
watcher_detach(&ctx->srv_watch);
px = watcher_next(&ctx->px_watch, px->next);
if (px) {
/* Update ctx.p[1] via watcher. */
watcher_attach(&ctx->srv_watch, px->srv);
sv = ctx->p[1];
}
px = px->next;
}
watcher_detach(&ctx->px_watch);
ctx->flags |= PROMEX_FL_METRIC_HDR;
/* Prepare a new iteration for the next stat column.
* Update ctx.p[0]/p[1] via px_watch/srv_watch.
*/
watcher_attach(&ctx->px_watch, proxies_list);
px = proxies_list;
if (likely(px)) {
watcher_attach(&ctx->srv_watch, px->srv);
sv = ctx->p[1];
}
}
ctx->field_num += mod->stats_count;
ctx->mod_field_num = 0;
}
end:
if (ret) {
watcher_detach(&ctx->px_watch);
watcher_detach(&ctx->srv_watch);
mod = NULL;
}
px = NULL;
sv = NULL;
mod = NULL;
end:
if (out.len) {
if (!htx_add_data_atonce(htx, out))
return -1; /* Unexpected and unrecoverable error */
}
/* Save pointers of the current context for dump resumption :
* 0=current proxy, 1=current server, 2=current stats module
* Note that p[0]/p[1] are already automatically updated via px_watch/srv_watch.
*/
/* Save pointers (0=current proxy, 1=current server, 2=current stats module) of the current context */
ctx->p[0] = px;
ctx->p[1] = sv;
ctx->p[2] = mod;
return ret;
full:
ret = 0;
goto end;
error:
watcher_detach(&ctx->px_watch);
watcher_detach(&ctx->srv_watch);
return -1;
}
/* Dump metrics of module <mod>. It returns 1 on success, 0 if <out> is full and
@ -1768,11 +1735,6 @@ static int promex_dump_metrics(struct appctx *appctx, struct htx *htx)
ctx->field_num = ST_I_PX_PXNAME;
ctx->mod_field_num = 0;
appctx->st1 = PROMEX_DUMPER_BACK;
if (ctx->flags & PROMEX_FL_SCOPE_BACK) {
/* Update ctx.p[0] via watcher. */
watcher_attach(&ctx->px_watch, proxies_list);
}
__fallthrough;
case PROMEX_DUMPER_BACK:
@ -1790,15 +1752,6 @@ static int promex_dump_metrics(struct appctx *appctx, struct htx *htx)
ctx->field_num = ST_I_PX_PXNAME;
ctx->mod_field_num = 0;
appctx->st1 = PROMEX_DUMPER_SRV;
if (ctx->flags & PROMEX_FL_SCOPE_SERVER) {
/* Update ctx.p[0] via watcher. */
watcher_attach(&ctx->px_watch, proxies_list);
if (likely(proxies_list)) {
/* Update ctx.p[1] via watcher. */
watcher_attach(&ctx->srv_watch, proxies_list->srv);
}
}
__fallthrough;
case PROMEX_DUMPER_SRV:
@ -2076,7 +2029,6 @@ static int promex_appctx_init(struct appctx *appctx)
LIST_INIT(&ctx->modules);
ctx->filters = EB_ROOT;
appctx->st0 = PROMEX_ST_INIT;
watcher_init(&ctx->px_watch, &ctx->p[0], offsetof(struct proxy, watcher_list));
watcher_init(&ctx->srv_watch, &ctx->p[1], offsetof(struct server, watcher_list));
return 0;
}
@ -2091,11 +2043,6 @@ static void promex_appctx_release(struct appctx *appctx)
struct promex_metric_filter *flt;
struct eb32_node *node, *next;
if (appctx->st1 == PROMEX_DUMPER_BACK ||
appctx->st1 == PROMEX_DUMPER_SRV) {
watcher_detach(&ctx->px_watch);
}
if (appctx->st1 == PROMEX_DUMPER_SRV)
watcher_detach(&ctx->srv_watch);

View File

@ -149,7 +149,7 @@ usage() {
echo "Options:"
echo " -S, --master-socket <path> Use the master socket at <path> (default: ${SOCKET})"
echo " -s, --socket <path> Use the stats socket at <path>"
echo " -p, --path <path> Specify a base path for relative files (default: ${BASEPATH})"
echo " -p, --path <path> Specifiy a base path for relative files (default: ${BASEPATH})"
echo " -n, --dry-run Read certificates on the socket but don't dump them"
echo " -d, --debug Debug mode, set -x"
echo " -v, --verbose Verbose mode"

View File

@ -1,10 +1,11 @@
#!/bin/sh
#!/bin/bash
set -e
export VERBOSE=1
export TIMEOUT=90
export MASTER_SOCKET="${MASTER_SOCKET:-/var/run/haproxy-master.sock}"
export MASTER_SOCKET=${MASTER_SOCKET:-/var/run/haproxy-master.sock}
export RET=
alert() {
if [ "$VERBOSE" -ge "1" ]; then
@ -14,38 +15,32 @@ alert() {
reload() {
if [ -S "$MASTER_SOCKET" ]; then
socat_addr="UNIX-CONNECT:${MASTER_SOCKET}"
else
case "$MASTER_SOCKET" in
*:[0-9]*)
socat_addr="TCP:${MASTER_SOCKET}"
;;
*)
alert "Invalid master socket address '${MASTER_SOCKET}': expected a UNIX socket file or <host>:<port>"
return 1
;;
esac
fi
while read -r line; do
echo "reload" | socat -t"${TIMEOUT}" "$socat_addr" - | {
read -r status || { alert "No status received (connection error or timeout after ${TIMEOUT}s)."; exit 1; }
case "$status" in
"Success=1") ret=0 ;;
"Success=0") ret=1 ;;
*) alert "Unexpected response: '$status'"; exit 1 ;;
esac
read -r _ # consume "--"
if [ "$VERBOSE" -ge 3 ] || { [ "$ret" = 1 ] && [ "$VERBOSE" -ge 2 ]; }; then
cat >&2
if [ "$line" = "Success=0" ]; then
RET=1
elif [ "$line" = "Success=1" ]; then
RET=0
elif [ "$line" = "Another reload is still in progress." ]; then
alert "$line"
elif [ "$line" = "--" ]; then
continue;
else
cat >/dev/null
if [ "$RET" = 1 ] && [ "$VERBOSE" = "2" ]; then
echo "$line" >&2
elif [ "$VERBOSE" = "3" ]; then
echo "$line" >&2
fi
fi
exit "$ret"
}
done < <(echo "reload" | socat -t"${TIMEOUT}" "${MASTER_SOCKET}" -)
if [ -z "$RET" ]; then
alert "Couldn't finish the reload before the timeout (${TIMEOUT})."
return 1
fi
return "$RET"
}
usage() {
@ -57,12 +52,12 @@ usage() {
echo " EXPERIMENTAL script!"
echo ""
echo "Options:"
echo " -S, --master-socket <addr> Unix socket path or <host>:<port> (default: ${MASTER_SOCKET})"
echo " -S, --master-socket <path> Use the master socket at <path> (default: ${MASTER_SOCKET})"
echo " -d, --debug Debug mode, set -x"
echo " -t, --timeout Timeout (socat -t) (default: ${TIMEOUT})"
echo " -s, --silent Silent mode (no output)"
echo " -v, --verbose Verbose output (output from haproxy on failure)"
echo " -vv --verbose=all Very verbose output (output from haproxy on success and failure)"
echo " -vv Even more verbose output (output from haproxy on success and failure)"
echo " -h, --help This help"
echo ""
echo "Examples:"
@ -89,7 +84,7 @@ main() {
VERBOSE=2
shift
;;
-vv|--verbose=all)
-vv|--verbose)
VERBOSE=3
shift
;;

View File

@ -1,162 +0,0 @@
/*
* Extracts the libs archives from a core dump
*
* Copyright (C) 2026 Willy Tarreau <w@1wt.eu>
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/* Note: builds with no option under glibc, and can be built as a minimal
* uploadable static executable using nolibc as well:
gcc -o libs-from-core -nostdinc -nostdlib -s -Os -static -fno-ident \
-fno-exceptions -fno-asynchronous-unwind-tables -fno-unwind-tables \
-Wl,--gc-sections,--orphan-handling=discard,-znoseparate-code \
-I /path/to/nolibc-sysroot/include libs-from-core.c
*/
#define _GNU_SOURCE
#include <sys/mman.h>
#include <sys/stat.h>
#include <elf.h>
#include <fcntl.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
void usage(const char *progname)
{
const char *slash = strrchr(progname, '/');
if (slash)
progname = slash + 1;
fprintf(stderr,
"Usage: %s [-q] <core_file>\n"
"Locate a libs archive from an haproxy core dump and dump it to stdout.\n"
"Arguments:\n"
" -q Query mode: only report offset and length, do not dump\n"
" core_file Core dump produced by haproxy\n",
progname);
}
int main(int argc, char **argv)
{
Elf64_Ehdr *ehdr;
Elf64_Phdr *phdr;
struct stat st;
uint8_t *mem;
int i, fd;
const char *fname;
int quiet = 0;
int arg;
for (arg = 1; arg < argc; arg++) {
if (*argv[arg] != '-')
break;
if (strcmp(argv[arg], "-q") == 0)
quiet = 1;
else if (strcmp(argv[arg], "--") == 0) {
arg++;
break;
}
}
if (arg < argc) {
fname = argv[arg];
} else {
usage(argv[0]);
exit(1);
}
fd = open(fname, O_RDONLY);
/* Let's just map the core dump as an ELF header */
fstat(fd, &st);
mem = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
if (mem == MAP_FAILED) {
perror("mmap()");
exit(1);
}
/* get the program headers */
ehdr = (Elf64_Ehdr *)mem;
/* check that it's really a core. Should be "\x7fELF" */
if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) {
fprintf(stderr, "ELF magic not found.\n");
exit(1);
}
if (ehdr->e_ident[EI_CLASS] != ELFCLASS64) {
fprintf(stderr, "Only 64-bit ELF supported.\n");
exit(1);
}
if (ehdr->e_type != ET_CORE) {
fprintf(stderr, "ELF type %d, not a core dump.\n", ehdr->e_type);
exit(1);
}
/* OK we can safely go with program headers */
phdr = (Elf64_Phdr *)(mem + ehdr->e_phoff);
for (i = 0; i < ehdr->e_phnum; i++) {
uint64_t size = phdr[i].p_filesz;
uint64_t offset = phdr[i].p_offset;
int ret = 0;
if (phdr[i].p_type != PT_LOAD)
continue;
//fprintf(stderr, "Scanning segment %d...\n", ehdr->e_phnum);
//fprintf(stderr, "\r%-5d: off=%lx va=%lx sz=%lx ", i, (long)offset, (long)phdr[i].p_vaddr, (long)size);
if (!size)
continue;
if (size < 512) // minimum for a tar header
continue;
/* tar magic */
if (memcmp(mem + offset + 257, "ustar\0""00", 8) != 0)
continue;
/* uid, gid */
if (memcmp(mem + offset + 108, "0000000\0""0000000\0", 16) != 0)
continue;
/* link name */
if (memcmp(mem + offset + 157, "haproxy-libs-dump\0", 18) != 0)
continue;
/* OK that's really it */
if (quiet)
printf("offset=%#lx size=%#lx\n", offset, size);
else
ret = (write(1, mem + offset, size) == size) ? 0 : 1;
return ret;
}
//fprintf(stderr, "\r%75s\n", "\r");
fprintf(stderr, "libs archive not found. Was 'set-dumpable' set to 'libs' ?\n");
return 1;
}

View File

@ -1,141 +0,0 @@
/*
* Find the post-mortem offset from a core dump
*
* Copyright (C) 2026 Willy Tarreau <w@1wt.eu>
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/* Note: builds with no option under glibc, and can be built as a minimal
* uploadable static executable using nolibc as well:
gcc -o pm-from-core -nostdinc -nostdlib -s -Os -static -fno-ident \
-fno-exceptions -fno-asynchronous-unwind-tables -fno-unwind-tables \
-Wl,--gc-sections,--orphan-handling=discard,-znoseparate-code \
-I /path/to/nolibc-sysroot/include pm-from-core.c
*/
#define _GNU_SOURCE
#include <sys/mman.h>
#include <sys/stat.h>
#include <elf.h>
#include <fcntl.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#if defined(__GLIBC__)
# define my_memmem memmem
#else
void *my_memmem(const void *haystack, size_t haystacklen,
const void *needle, size_t needlelen)
{
while (haystacklen >= needlelen) {
if (!memcmp(haystack, needle, needlelen))
return (void*)haystack;
haystack++;
haystacklen--;
}
return NULL;
}
#endif
#define MAGIC "POST-MORTEM STARTS HERE+7654321\0"
int main(int argc, char **argv)
{
Elf64_Ehdr *ehdr;
Elf64_Phdr *phdr;
struct stat st;
uint8_t *mem;
int i, fd;
if (argc < 2) {
printf("Usage: %s <core_file>\n", argv[0]);
exit(1);
}
fd = open(argv[1], O_RDONLY);
/* Let's just map the core dump as an ELF header */
fstat(fd, &st);
mem = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
if (mem == MAP_FAILED) {
perror("mmap()");
exit(1);
}
/* get the program headers */
ehdr = (Elf64_Ehdr *)mem;
/* check that it's really a core. Should be "\x7fELF" */
if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) {
fprintf(stderr, "ELF magic not found.\n");
exit(1);
}
if (ehdr->e_ident[EI_CLASS] != ELFCLASS64) {
fprintf(stderr, "Only 64-bit ELF supported.\n");
exit(1);
}
if (ehdr->e_type != ET_CORE) {
fprintf(stderr, "ELF type %d, not a core dump.\n", ehdr->e_type);
exit(1);
}
/* OK we can safely go with program headers */
phdr = (Elf64_Phdr *)(mem + ehdr->e_phoff);
for (i = 0; i < ehdr->e_phnum; i++) {
uint64_t size = phdr[i].p_filesz;
uint64_t offset = phdr[i].p_offset;
uint64_t vaddr = phdr[i].p_vaddr;
uint64_t found_ofs;
uint8_t *found;
if (phdr[i].p_type != PT_LOAD)
continue;
//printf("Scanning segment %d...\n", ehdr->e_phnum);
//printf("\r%-5d: off=%lx va=%lx sz=%lx ", i, (long)offset, (long)vaddr, (long)size);
if (!size)
continue;
if (size >= 1048576) // don't scan large segments
continue;
found = my_memmem(mem + offset, size, MAGIC, sizeof(MAGIC) - 1);
if (!found)
continue;
found_ofs = found - (mem + offset);
printf("Found post-mortem magic in segment %d:\n", i);
printf(" Core File Offset: 0x%lx (0x%lx + 0x%lx)\n", offset + found_ofs, offset, found_ofs);
printf(" Runtime VAddr: 0x%lx (0x%lx + 0x%lx)\n", vaddr + found_ofs, vaddr, found_ofs);
printf(" Segment Size: 0x%lx\n", size);
printf("\nIn gdb, copy-paste this line:\n\n pm_init 0x%lx\n\n", vaddr + found_ofs);
return 0;
}
//printf("\r%75s\n", "\r");
printf("post-mortem magic not found\n");
return 1;
}

View File

@ -14,8 +14,8 @@ define pools_dump
set $idx=$idx + 1
end
set $mem = (unsigned long)$total * $e->size
printf "list=%#lx pool_head=%p name=%s size=%u alloc=%u used=%u mem=%lu\n", $p, $e, $e->name, $e->size, $total, $used, $mem
set $mem = $total * $e->size
printf "list=%#lx pool_head=%p name=%s size=%u alloc=%u used=%u mem=%u\n", $p, $e, $e->name, $e->size, $total, $used, $mem
set $p = *(void **)$p
end
end

View File

@ -86,7 +86,7 @@ maintenance model and what the user wants is passed, then the LLM is invited to
provide its opinion on the need for a backport and an explanation of the reason
for its choice. This often helps the user to find a quick summary about the
patch. All these outputs are then converted to a long HTML page with colors and
radio buttons, where patches are preselected based on this classification,
radio buttons, where patches are pre-selected based on this classification,
that the user can consult and adjust, read the commits if needed, and the
selected patches finally provide some copy-pastable commands in a text-area to
select commit IDs to work on, typically in a form that's suitable for a simple

View File

@ -30,10 +30,10 @@ static const char *tevt_fd_types[16] = {
};
static const char *tevt_hs_types[16] = {
[ 0] = "-", [ 1] = "-", [ 2] = "-", [ 3] = "-",
[ 4] = "snd_err", [ 5] = "truncated_shutr", [ 6] = "truncated_rcv_err", [ 7] = "-",
[ 8] = "-", [ 9] = "-", [10] = "-", [11] = "-",
[12] = "-", [13] = "-", [14] = "-", [15] = "-",
[ 0] = "-", [ 1] = "-", [ 2] = "-", [ 3] = "rcv_err",
[ 4] = "snd_err", [ 5] = "-", [ 6] = "-", [ 7] = "-",
[ 8] = "-", [ 9] = "-", [10] = "-", [11] = "-",
[12] = "-", [13] = "-", [14] = "-", [15] = "-",
};
static const char *tevt_xprt_types[16] = {

File diff suppressed because it is too large Load Diff

View File

@ -1,140 +0,0 @@
------
HATerm
------
HAProxy's dummy HTTP
server for benchmarks
1. Background
-------------
HATerm is a dummy HTTP server that leverages the flexible and scalable
architecture of HAProxy to ease benchmarking of HTTP agents in all versions of
HTTP currently supported by HAProxy (HTTP/1, HTTP/2, HTTP/3), and both in clear
and TLS / QUIC. It follows the same principle as its ancestor HTTPTerm [1],
consisting in producing HTTP responses entirely configured by the request
parameters (size, response time, status etc). It also preserves the spirit
HTTPTerm which does not require any configuration beyond an optional listening
address and a port number, though it also supports advanced configurations with
the full spectrum of HAProxy features for specific testing. The goal remains
to make it almost as fast as the original HTTPTerm so that it can become a
de-facto replacement, with a compatible command line and request parameters
that will not change users' habits.
[1] https://github.com/wtarreau/httpterm
2. Compilation
--------------
HATerm may be compiled in the same way as HAProxy but with "haterm" as Makefile
target to provide on the "make" command line as follows:
$ make -j $(nproc) TARGET=linux-glibc haterm
HATerm supports HTTPS/SSL/TCP:
$ make TARGET=linux-glibc USE_OPENSSL=1
It also supports QUIC:
$ make -j $(nproc) TARGET=linux-glibc USE_OPENSSL=1 USE_QUIC=1 haterm
Technically speaking, it uses the regular HAProxy source and object code with a
different command line parser. As such, all build options supported by HAProxy
also apply to HATerm. See INSTALL for more details about how to compile them.
3. Execution
------------
HATerm is a very easy to use HTTP server with supports for all the HTTP
versions. It displays its usage when run without argument or wrong arguments:
$ ./haterm
Usage : haterm -L [<ip>]:<clear port>[:<TCP&QUIC SSL port>] [-L...]* [opts]
where <opts> may be any combination of:
-G <line> : multiple option; append <line> to the "global" section
-F <line> : multiple option; append <line> to the "frontend" section
-T <line> : multiple option; append <line> to the "traces" section
-C : dump the configuration and exit
-D : goes daemon
-b <keysize> : RSA key size in bits (ex: "2048", "4096"...)
-c <curves> : ECDSA curves (ex: "P-256", "P-384"...)
-v : shows version
-d : enable the traces for all http protocols
--quic-bind-opts <opts> : append options to QUIC "bind" lines
--tcp-bind-opts <opts> : append options to TCP "bind" lines
Arguments -G, -F, -T permit to append one or multiple lines at the end of their
respective sections. A tab character ('\t') is prepended at the beginning of
the argument, and a line feed ('\n') is appended at the end. It is also
possible to insert multiple lines at once using escape sequences '\n' and '\t'
inside the string argument.
As HAProxy, HATerm may listen on several TCP/UDP addresses which can be
provided by multiple "-L" options. To be functional, it needs at least one
correct "-L" option to be set.
Examples:
$ ./haterm -L 127.0.0.1:8888 # listen on 127.0.0.1:8888 TCP address
$ ./haterm -L 127.0.0.1:8888:8889 # listen on 127.0.0.1:8888 TCP address,
# 127.0.01:8889 SSL/TCP address,
# and 127.0.01:8889 QUIC/UDP address
$ ./haterm -L 127.0.0.1:8888:8889 -L [::1]:8888:8889
With USE_QUIC_OPENSSL_COMPAT support, the user must configure a global
section as for HAProxy. HATerm sets internally its configuration in.
memory as this is done by HAProxy from configuration files:
$ ./haterm -L 127.0.0.1:8888:8889
[NOTICE] (1371578) : haproxy version is 3.4-dev4-ba5eab-28
[NOTICE] (1371578) : path to executable is ./haterm
[ALERT] (1371578) : Binding [haterm cfgfile:12] for frontend
___haterm_frontend___: this SSL library does not
support the QUIC protocol. A limited compatibility
layer may be enabled using the "limited-quic" global
option if desired.
Such an alert may be fixed with "-G' option:
$ ./haterm -L 127.0.0.1:8888:8889 -G "limited-quic"
When the SSL support is not compiled in, the second port is ignored. This is
also the case for the QUIC support.
HATerm adjusts its responses depending on the requests it receives. An empty
query string provides the information about how the URIs are understood by
HATerm:
$ curl http://127.0.0.1:8888/?
HAProxy's dummy HTTP server for benchmarks - version 3.4-dev4.
All integer argument values are in the form [digits]*[kmgr] (r=random(0..1))
The following arguments are supported to override the default objects :
- /?s=<size> return <size> bytes.
E.g. /?s=20k
- /?r=<retcode> present <retcode> as the HTTP return code.
E.g. /?r=404
- /?c=<cache> set the return as not cacheable if <1.
E.g. /?c=0
- /?A=<req-after> drain the request body after sending the response.
E.g. /?A=1
- /?C=<close> force the response to use close if >0.
E.g. /?C=1
- /?K=<keep-alive> force the response to use keep-alive if >0.
E.g. /?K=1
- /?t=<time> wait <time> milliseconds before responding.
E.g. /?t=500
- /?k=<enable> Enable transfer encoding chunked with only one chunk
if >0.
- /?R=<enable> Enable sending random data if >0.
Note that those arguments may be cumulated on one line separated by a set of
delimiters among [&?,;/] :
- GET /?s=20k&c=1&t=700&K=30r HTTP/1.0
- GET /?r=500?s=0?c=0?t=1000 HTTP/1.0

View File

@ -1,5 +1,5 @@
-----------------------------------------
Filters Guide - version 3.4
Filters Guide - version 2.9
( Last update: 2021-02-24 )
------------------------------------------
Author : Christopher Faulet
@ -738,10 +738,10 @@ For instance :
switch (an_bit) {
case AN_REQ_WAIT_HTTP:
if (/* A test on received headers before any other treatment */) {
msg = ((chn->flags & CF_ISRESP) ? &s->txn.http->rsp : &s->txn.http->req);
msg = ((chn->flags & CF_ISRESP) ? &s->txn->rsp : &s->txn->req);
txn->status = 400;
msg->msg_state = HTTP_MSG_ERROR;
http_reply_and_close(s, s->txn.http->status, http_error_message(s));
http_reply_and_close(s, s->txn->status, http_error_message(s));
return -1; /* This is an error ! */
}
break;
@ -1161,7 +1161,7 @@ Then, to finish, there are 2 informational callbacks :
if we're retrying to send the request to the server after it failed. It
could be useful to reset the filter context before receiving the true
response.
By checking s->txn.http->status, it is possible to know why this callback is
By checking s->txn->status, it is possible to know why this callback is
called. If it's a 1xx, we're called because of an informational
message. Otherwise, it is a L7 retry.

View File

@ -539,22 +539,10 @@ message. These functions are used by HTX analyzers or by multiplexers.
with the first block not removed, or NULL if everything was removed, and
the amount of data drained.
- htx_xfer() transfers HTX blocks from an HTX message to another, stopping
when a specific amount of bytes, including meta-data, was copied. If the
tail block is a DATA block, it may be partially copied. All other block
are transferred at once. By default, copied blocks are removed from the
original HTX message and headers and trailers parts cannot be partially
copied. But flags can be set to change the default behavior:
- HTX_XFER_KEEP_SRC_BLKS: source blocks are not removed
- HTX_XFER_PARTIAL_HDRS_COPY: partial headers and trailers
part can be xferred
- HTX_XFER_HDRS_ONLY: Only the headers part is xferred
- htx_xfer_blks() [DEPRECATED] transfers HTX blocks from an HTX message to
another, stopping after the first block of a specified type is transferred
or when a specific amount of bytes, including meta-data, was moved. If the
tail block is a DATA block, it may be partially moved. All other block are
- htx_xfer_blks() transfers HTX blocks from an HTX message to another,
stopping after the first block of a specified type is transferred or when
a specific amount of bytes, including meta-data, was moved. If the tail
block is a DATA block, it may be partially moved. All other block are
transferred at once or kept. This function returns a mixed value, with the
last block moved, or NULL if nothing was moved, and the amount of data
transferred. When HEADERS or TRAILERS blocks must be transferred, this

View File

@ -11,7 +11,7 @@ default init, this was controversial but fedora and archlinux already uses it.
At this time HAProxy still had a multi-process model, and the way haproxy is
working was incompatible with the daemon mode.
Systemd is compatible with traditional forking services, but somehow HAProxy
Systemd is compatible with traditionnal forking services, but somehow HAProxy
is different. To work correctly, systemd needs a main PID, this is the PID of
the process that systemd will supervises.
@ -45,7 +45,7 @@ However the wrapper suffered from several problems:
### mworker V1
HAProxy 1.8 got rid of the wrapper which was replaced by the master worker
HAProxy 1.8 got ride of the wrapper which was replaced by the master worker
mode. This first version was basically a reintegration of the wrapper features
within HAProxy. HAProxy is launched with the -W flag, read the configuration and
then fork. In mworker mode, the master is usually launched as a root process,
@ -86,7 +86,7 @@ retrieved automatically.
The master is supervising the workers, when a current worker (not a previous one
from before the reload) is exiting without being asked for a reload, the master
will emit an "exit-on-failure" error and will kill every workers with a SIGTERM
and exits with the same error code than the failed worker, this behavior can be
and exits with the same error code than the failed master, this behavior can be
changed by using the "no exit-on-failure" option in the global section.
While the master is supervising the workers using the wait() function, the
@ -186,8 +186,8 @@ number that can be found in HAPROXY_PROCESSES. With this change the stats socket
in the configuration is less useful and everything can be done from the master
CLI.
With 2.7, the reload mechanism of the master CLI evolved, with previous versions,
this mechanism was asynchronous, so once the `reload` command was received, the
With 2.7, the reload mecanism of the master CLI evolved, with previous versions,
this mecanism was asynchronous, so once the `reload` command was received, the
master would reload, the active master CLI connection was closed, and there was
no way to return a status as a response to the `reload` command. To achieve a
synchronous reload, a dedicated sockpair is used, one side uses a master CLI
@ -208,38 +208,3 @@ starts with -st to achieve a hard stop on the previous worker.
Version 3.0 got rid of the libsystemd dependencies for sd_notify() after the
events of xz/openssh, the function is now implemented directly in haproxy in
src/systemd.c.
### mworker V3
This version was implemented with HAProxy 3.1, the goal was to stop parsing and
applying the configuration in the master process.
One of the caveats of the previous implementation was that the parser could take
a lot of time, and the master process would be stuck in the parser instead of
handling its polling loop, signals etc. Some parts of the configuration parsing
could also be less reliable with third-party code (EXTRA_OBJS), it could, for
example, allow opening FDs and not closing them before the reload which
would crash the master after a few reloads.
The startup of the master-worker was reorganized this way:
- the "discovery" mode, which is a lighter configuration parsing step, only
applies the configuration which need to be effective for the master process.
For example, "master-worker", "mworker-max-reloads" and less than 20 other
keywords that are identified by KWF_DISCOVERY in the code. It is really fast
as it don't need all the configuration to be applied in the master process.
- the master will then fork a worker, with a PROC_O_INIT flag. This worker has
a temporary sockpair connected to the master CLI. Once the worker is forked,
the master initializes its configuration and starts its polling loop.
- The newly forked worker will try to parse the configuration, which could
result in a failure (exit 1), or any bad error code. In case of success, the
worker will send a "READY" message to the master CLI then close this FD. At
this step everything was initialized and the worker can enter its polling
loop.
- The master then waits for the worker, it could:
* receive the READY message over the mCLI, resulting in a successful loading
of haproxy
* receive a SIGCHLD, meaning the worker exited and couldn't load

View File

@ -114,7 +114,7 @@ SHUT RDY ACT
1 1 1 => shut pending
PB: we can land into final shut if one thread disables the FD while another
one that was waiting on it reports it as shut. Theoretically it should be
one that was waiting on it reports it as shut. Theorically it should be
implicitly ready though, since reported. But if no data is reported, it
will be reportedly shut only. And no event will be reported then. This
might still make sense since it's not active, thus we don't want events.

View File

@ -1,50 +0,0 @@
2026-03-12 - thread execution context
Thread execution context (thread_exec_ctx) is a combination of type and pointer
that are set in the current running thread at th_ctx->exec_ctx when entering
certain processing (tasks, sample fetch functions, actions, CLI keywords etc).
They're refined along execution, so that a task such as process_stream could
temporarily switch to a converter while evaluating an expression and switch
back to process_stream. They are reported in thread dumps and are mixed with
caller locations for memory profiling. As such they are intentionally not too
precise in order to avoid an explosion of the number of buckets. At the moment,
the level of granularity it provides is sufficient to try to narrow a
misbehaving origin down to a list of keywords. The context types can currently
be:
- something registered via an initcall, with the initcall's location
- something registered via an ha_caller, with the caller's location
- an explicit sample fetch / converter / action / CLI keyword list
- an explicit function (mainly used for actions without keywords)
- a task / tasklet (no distinction is made), using the ->process pointer
- a filter (e.g. compression), via flt_conf, reporting name
- a mux (via the mux_ops, reporting the name)
- an applet (e.g. cache, stats, CLI)
A macro EXEC_CTX_MAKE(type, pointer) makes a thread_exec_ctx from such
values.
A macro EXEC_CTX_NO_RET(ctx, statement) calls a void statement under the
specified context.
A macro EXEC_CTX_WITH_RET(ctx, expr) calls an expression under the specified
context.
Most locations were modified to directly use these macros on the fly, by
retrieving the context from where it was set on the element being evaluated
(e.g. an action rule contains the context inherited by the action keyword
that was used to create it).
In tools.c, chunk_append_thread_ctx() tries to decode the given exec_ctx and
appends it into the provided buffer. It's used by ha_thread_dump_one() and
cli_io_handler_show_activity() for memory profiling. In this latter case,
the detected thread_ctx are reported in the output under brackets prefixed
with "[via ...]" to distinguish call paths to the same allocators.
A good way to test if a context is properly reported is to place a bleeding
malloc() call into one of the monitored functions, e.g.:
DISGUISE(malloc(8));
and issue "show profiling memory" after stressing the function. Its context
must appear on the right with the number of calls.

View File

@ -1646,20 +1646,16 @@ a payload, it needs to end with an empty line.
The payload pattern can be customized in order to change the way the payload
ends. In order to end a payload with something else than an empty line, a
customized pattern can be set between '<<' and '\n'. Up to 64 characters can be
used in addition to '<<', otherwise this won't be considered a payload. It
should be enough to use random payload patterns. For example, to use a PEM file
that contains empty lines and comments:
customized pattern can be set between '<<' and '\n'. Only 7 characters can be
used in addiction to '<<', otherwise this won't be considered a payload.
For example, to use a PEM file that contains empty lines and comments:
# echo -e "set ssl cert common.pem <<%EOF%\n$(cat common.pem)\n%EOF%\n" | \
socat /var/run/haproxy.stat -
Limitations do exist: The pattern "<<" must not be glued to the last word of the
line. The length of a command line must not be greater than tune.bufsize,
including the pattern starting the payload, but excluding the payload
itself. The payload size is limited to 128KB by default. This can be changed by
setting "tune.cli.max-payload-size" global parameter, with some cautions. Note
the pattern marking the end of the payload is part of this limit.
Limitations do exist: the length of the whole buffer passed to the CLI must
not be greater than tune.bfsize and the pattern "<<" must not be glued to the
last word of the line.
When entering a payload while in interactive mode, the prompt will change from
"> " to "+ ".
@ -1729,27 +1725,6 @@ add acl [@<ver>] <acl> <pattern>
This command cannot be used if the reference <acl> is a name also used with
a map. In this case, the "add map" command must be used instead.
add backend <name> from <defproxy> [mode <mode>] [guid <guid>] [ EXPERIMENTAL ]
Instantiate a new backend proxy with the name <name>.
Only TCP or HTTP proxies can be created. All of the settings are inherited
from <defproxy> default proxy instance. By default, it is mandatory to
specify the backend mode via the argument of the same name, unless <defproxy>
already defines it explicitly. It is also possible to use an optional GUID
argument if wanted.
Servers can be added via the command "add server". The backend is initialized
in the unpublished state. Once considered ready for traffic, use "publish
backend" to expose the newly created instance.
All named default proxies can be used, given that they validate the same
inheritance rules applied during configuration parsing. There is some
exceptions though, for example when the mode is neither TCP nor HTTP.
This command is restricted and can only be issued on sockets configured for
level "admin". Moreover, this feature is still considered in development so it
also requires experimental mode (see "experimental-mode on").
add map [@<ver>] <map> <key> <value>
add map [@<ver>] <map> <payload>
Add an entry into the map <map> to associate the value <value> to the key
@ -2125,30 +2100,6 @@ del acl <acl> [<key>|#<ref>]
listing the content of the acl. Note that if the reference <acl> is a name and
is shared with a map, the entry will be also deleted in the map.
del backend <name>
Removes the backend proxy with the name <name>.
This operation is only possible for TCP or HTTP proxies. To succeed, the
backend instance must have been first unpublished. Also, all of its servers
must first be removed (via "del server" CLI). Finally, no stream must still
be attached to the backend instance.
There is additional restrictions which prevent backend removal. First, a
backend cannot be removed if it is explicitly referenced by config elements,
for example via a use_backend rule or in sample expressions. Some proxies
options are also incompatible with runtime deletion. Currently, this is the
case when deprecated dispatch or option transparent are used. Also, a backend
cannot be removed if there is a stick-table declared in it. Finally, it is
impossible for now to remove a backend if QUIC servers were present in it.
It can be useful to use "wait be-removable" prior to this command to check
for the aforementioned requisites. This also provides a method to wait for
the final closure of the streams attached to the target backend.
This command is restricted and can only be issued on sockets configured for
level "admin". Moreover, this feature is still considered in development so it
also requires experimental mode (see "experimental-mode on").
del map <map> [<key>|#<ref>]
Delete all the map entries from the map <map> corresponding to the key <key>.
<map> is the #<id> or the <name> returned by "show map". If the <ref> is used,
@ -2583,8 +2534,7 @@ set maxconn global <maxconn>
delayed until the threshold is reached. A value of zero restores the initial
setting.
set profiling memory { on | off }
set profiling tasks { auto | on | off | lock | no-lock | memory | no-memory }
set profiling { tasks | memory } { auto | on | off }
Enables or disables CPU or memory profiling for the indicated subsystem. This
is equivalent to setting or clearing the "profiling" settings in the "global"
section of the configuration file. Please also see "show profiling". Note
@ -2594,13 +2544,6 @@ set profiling tasks { auto | on | off | lock | no-lock | memory | no-memory }
on the linux-glibc target), and requires USE_MEMORY_PROFILING to be set at
compile time.
. For tasks profiling, it is possible to enable or disable the collection of
per-task lock and memory timings at runtime, but the change is only taken
into account next time the profiler switches from off/auto to on (either
automatically or manually). Thus when using "no-lock" to disable per-task
lock profiling and save CPU cycles, it is recommended to flip the task
profiling off then on to commit the change.
set rate-limit connections global <value>
Change the process-wide connection rate limit, which is set by the global
'maxconnrate' setting. A value of zero disables the limitation. This limit
@ -3360,7 +3303,7 @@ show pools [byname|bysize|byusage] [detailed] [match <pfx>] [<nb>]
- Pool quic_conn_c (152 bytes) : 1337 allocated (203224 bytes), ...
Total: 15 pools, 109578176 bytes allocated, 109578176 used ...
show profiling [{all | status | tasks | memory}] [byaddr|bytime|byctx|aggr|<max_lines>]*
show profiling [{all | status | tasks | memory}] [byaddr|bytime|aggr|<max_lines>]*
Dumps the current profiling settings, one per line, as well as the command
needed to change them. When tasks profiling is enabled, some per-function
statistics collected by the scheduler will also be emitted, with a summary
@ -3369,15 +3312,14 @@ show profiling [{all | status | tasks | memory}] [byaddr|bytime|byctx|aggr|<max_
allocations/releases and their sizes will be reported. It is possible to
limit the dump to only the profiling status, the tasks, or the memory
profiling by specifying the respective keywords; by default all profiling
information are dumped. It is also possible to limit the number of lines of
information are dumped. It is also possible to limit the number of lines
of output of each category by specifying a numeric limit. If is possible to
request that the output is sorted by address, by total execution time, or by
calling context instead of usage, e.g. to ease comparisons between subsequent
calls or to check what needs to be optimized, and to aggregate task activity
by called function instead of seeing the details. Please note that profiling
is essentially aimed at developers since it gives hints about where CPU
cycles or memory are wasted in the code. There is nothing useful to monitor
there.
request that the output is sorted by address or by total execution time
instead of usage, e.g. to ease comparisons between subsequent calls or to
check what needs to be optimized, and to aggregate task activity by called
function instead of seeing the details. Please note that profiling is
essentially aimed at developers since it gives hints about where CPU cycles
or memory are wasted in the code. There is nothing useful to monitor there.
show resolvers [<resolvers section id>]
Dump statistics for the given resolvers section, or all resolvers sections
@ -4552,13 +4494,6 @@ wait { -h | <delay> } [<condition> [<args>...]]
specified condition to be satisfied, to unrecoverably fail, or to remain
unsatisfied for the whole <delay> duration. The supported conditions are:
- be-removable <proxy> : this will wait for the specified proxy backend to be
removable by the "del backend" command. Some conditions will never be
accepted (e.g. backend not yet unpublished or with servers in it) and will
cause the report of a specific error message indicating what condition is
not met. If everything is OK before the delay, a success is returned and
the operation is terminated.
- srv-removable <proxy>/<server> : this will wait for the specified server to
be removable by the "del server" command, i.e. be in maintenance and no
longer have any connection on it (neither active or idle). Some conditions

View File

@ -627,10 +627,7 @@ For the type PP2_TYPE_SSL, the value is itself a defined like this :
uint8_t client;
uint32_t verify;
struct pp2_tlv sub_tlv[0];
} __attribute__((packed));
Note the "packed" attribute which indicates that each field starts immediately
after the previous one (i.e. without type-specific alignment nor padding).
};
The <verify> field will be zero if the client presented a certificate
and it was successfully verified, and non-zero otherwise.

View File

@ -24,7 +24,7 @@ vtest installation
------------------------
To use vtest you will have to download and compile the recent vtest
sources found at https://github.com/vtest/VTest2.
sources found at https://github.com/vtest/VTest.
To compile vtest:

View File

@ -1,69 +0,0 @@
# Example: log HTTP traffic and TLS session keys to separate destinations
#
# "option httpslog" sends HTTP access logs to the /dev/log syslog server.
# TLS session keys are written to 2 ring buffers.
#
# Requirements:
# - HAProxy built with OpenSSL support
# - "tune.ssl.keylog on" in the global section
#
# Retrieve TLS session keys from the ring buffer via the CLI:
# For frontend connections:
#
# (echo "show events keylog-fc -w"; read) | socat /tmp/worker.socket -
#
# For backend connections:
#
# (echo "show events keylog-bc -w"; read) | socat /tmp/worker.socket -
#
# The result is in SSLKEYLOGFILE format and can be saved to a file and loaded
# into Wireshark to decrypt captured TLS traffic.
global
stats socket /tmp/worker.socket mode 0660
tune.ssl.keylog on
# Ring buffer for TLS session keys.
# "format raw" stores only the log message text, without any syslog envelope,
# producing output in the SSLKEYLOGFILE format directly.
ring keylog-fc
description "TLS session key frontend log"
format raw
maxlen 2048
size 1M
ring keylog-bc
description "TLS session key backend log"
format raw
maxlen 2048
size 1M
defaults
mode http
timeout client 30s
timeout server 30s
timeout connect 5s
log-profile keylog-fc
on any format "${HAPROXY_KEYLOG_FC_LOG_FMT}"
log-profile keylog-bc
on any format "${HAPROXY_KEYLOG_BC_LOG_FMT}"
frontend https-in
bind :443 ssl crt "common.pem"
option httpslog
# HTTPs access logs sent to the syslog server
log /dev/log format raw local0
# TLS session keys written to the ring buffer
log ring@keylog-fc len 2048 profile keylog-fc local1
log ring@keylog-bc len 2048 profile keylog-bc local1
default_backend be1
backend be1
server s1 10.0.0.123:443 ssl verify none

View File

@ -2,30 +2,17 @@
#ifndef _ACME_T_H_
#define _ACME_T_H_
#include <haproxy/acme_resolvers-t.h>
#include <haproxy/istbuf.h>
#include <haproxy/openssl-compat.h>
#if defined(HAVE_ACME)
#define ACME_RETRY 5
/* Readiness requirements for challenge */
#define ACME_RDY_NONE 0x00
#define ACME_RDY_CLI 0x01
#define ACME_RDY_DNS 0x02
#define ACME_RDY_DELAY 0x04
#define ACME_RDY_INITIAL_DNS 0x08
/* acme section configuration */
struct acme_cfg {
char *filename; /* config filename */
int linenum; /* config linenum */
char *name; /* section name */
int reuse_key; /* do we need to renew the private key */
int cond_ready; /* ready condition */
unsigned int dns_delay; /* delay in seconds before re-triggering DNS resolution (default: 300) */
unsigned int dns_timeout; /* time after which the DNS check shouldn't be retried (default: 600) */
char *directory; /* directory URL */
char *map; /* storage for tokens + thumbprint */
struct {
@ -41,7 +28,6 @@ struct acme_cfg {
int curves; /* NID of curves */
} key;
char *challenge; /* HTTP-01, DNS-01, etc */
char *profile; /* ACME profile */
char *vars; /* variables put in the dpapi sink */
char *provider; /* DNS provider put in the dpapi sink */
struct acme_cfg *next;
@ -54,13 +40,6 @@ enum acme_st {
ACME_NEWACCOUNT,
ACME_NEWORDER,
ACME_AUTH,
ACME_INITIAL_RSLV_TRIGGER, /* opportunistic DNS check to avoid cond_ready steps */
ACME_INITIAL_RSLV_READY,
ACME_CLI_WAIT, /* wait for the ACME_RDY_CLI */
ACME_INITIAL_DELAY,
ACME_RSLV_RETRY_DELAY,
ACME_RSLV_TRIGGER,
ACME_RSLV_READY,
ACME_CHALLENGE,
ACME_CHKCHALLENGE,
ACME_FINALIZE,
@ -79,8 +58,6 @@ struct acme_auth {
struct ist auth; /* auth URI */
struct ist chall; /* challenge URI */
struct ist token; /* token */
int validated; /* already validated */
struct acme_rslv *rslv; /* acme dns-01 resolver */
int ready; /* is the challenge ready ? */
void *next;
};
@ -107,8 +84,6 @@ struct acme_ctx {
X509_REQ *req;
struct ist finalize;
struct ist certificate;
unsigned int dnstasks; /* number of DNS tasks running for this ctx */
unsigned int dnsstarttime; /* time at which we started the DNS checks */
struct task *task;
struct ebmb_node node;
char name[VAR_ARRAY];
@ -126,6 +101,4 @@ struct acme_ctx {
#define ACME_VERB_ADVANCED 4
#define ACME_VERB_COMPLETE 5
#endif /* ! HAVE_ACME */
#endif

View File

@ -1,27 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
#ifndef _HAPROXY_ACME_RESOLVERS_T_H
#define _HAPROXY_ACME_RESOLVERS_T_H
#include <haproxy/obj_type-t.h>
#include <haproxy/resolvers-t.h>
struct dns_counters;
/* TXT records for dns-01 */
struct acme_rslv {
enum obj_type obj_type; /* OBJ_TYPE_ACME_RSLV */
unsigned int *dnstasks; /* number of running DNS resolution for the same acme_task */
char *hostname_dn;
int hostname_dn_len;
struct resolvers *resolvers;
struct resolv_requester *requester;
int result; /* RSLV_STATUS_* — NONE until done */
int error_code; /* RSLV_RESP_* from the error callback */
struct task *acme_task; /* ACME task to wake on completion, or NULL */
struct ist txt; /* first TXT record found */
int (*success_cb)(struct resolv_requester *, struct dns_counters *);
int (*error_cb)(struct resolv_requester *, int);
};
#endif /* _HAPROXY_ACME_RESOLVERS_T_H */

View File

@ -1,18 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
#ifndef _HAPROXY_ACME_RESOLVERS_H
#define _HAPROXY_ACME_RESOLVERS_H
#include <haproxy/openssl-compat.h>
#if defined(HAVE_ACME)
#include <haproxy/acme_resolvers-t.h>
#include <haproxy/acme-t.h>
#include <haproxy/resolvers-t.h>
struct acme_rslv *acme_rslv_start(struct acme_auth *auth, unsigned int *dnstasks, const char *challenge_type, char **errmsg);
void acme_rslv_free(struct acme_rslv *rslv);
#endif
#endif /* _HAPROXY_ACME_RESOLVERS_H */

View File

@ -102,10 +102,7 @@ enum act_name {
/* Timeout name valid for a set-timeout rule */
enum act_timeout_name {
ACT_TIMEOUT_CONNECT,
ACT_TIMEOUT_SERVER,
ACT_TIMEOUT_QUEUE,
ACT_TIMEOUT_TARPIT,
ACT_TIMEOUT_TUNNEL,
ACT_TIMEOUT_CLIENT,
};
@ -151,7 +148,6 @@ struct act_rule {
struct ist str; /* string param (reason, header name, ...) */
struct lf_expr fmt; /* log-format compatible expression */
struct my_regex *re; /* used by replace-header/value/uri/path */
struct sample_expr *expr; /* sample expression used by HTTP action */
} http; /* args used by some HTTP rules */
struct http_reply *http_reply; /* HTTP response to be used by return/deny/tarpit rules */
struct redirect_rule *redir; /* redirect rule or "http-request redirect" */
@ -199,11 +195,6 @@ struct act_rule {
struct server *srv; /* target server to attach the connection */
struct sample_expr *name; /* used to differentiate idle connections */
} attach_srv; /* 'attach-srv' rule */
struct {
enum log_orig_id orig;
char *profile_name;
struct log_profile *profile;
} do_log; /* 'do-log' action */
struct {
int value;
struct sample_expr *expr;
@ -212,7 +203,6 @@ struct act_rule {
void *p[4];
} act; /* generic pointers to be used by custom actions */
} arg; /* arguments used by some actions */
struct thread_exec_ctx exec_ctx; /* execution context */
struct {
char *file; /* file name where the rule appears (or NULL) */
int line; /* line number where the rule appears */
@ -224,9 +214,7 @@ struct action_kw {
enum act_parse_ret (*parse)(const char **args, int *cur_arg, struct proxy *px,
struct act_rule *rule, char **err);
int flags;
/* 4 bytes here */
void *private;
struct thread_exec_ctx exec_ctx; /* execution context */
};
struct action_kw_list {

View File

@ -35,7 +35,6 @@ int act_resolution_cb(struct resolv_requester *requester, struct dns_counters *c
int act_resolution_error_cb(struct resolv_requester *requester, int error_code);
const char *action_suggest(const char *word, const struct list *keywords, const char **extra);
void free_act_rule(struct act_rule *rule);
void act_add_list(struct list *head, struct action_kw_list *kw_list);
static inline struct action_kw *action_lookup(struct list *keywords, const char *kw)
{

View File

@ -24,7 +24,6 @@
#include <haproxy/api-t.h>
#include <haproxy/freq_ctr-t.h>
#include <haproxy/tinfo-t.h>
/* bit fields for the "profiling" global variable */
#define HA_PROF_TASKS_OFF 0x00000000 /* per-task CPU profiling forced disabled */
@ -34,8 +33,6 @@
#define HA_PROF_TASKS_MASK 0x00000003 /* per-task CPU profiling mask */
#define HA_PROF_MEMORY 0x00000004 /* memory profiling */
#define HA_PROF_TASKS_MEM 0x00000008 /* per-task CPU profiling with memory */
#define HA_PROF_TASKS_LOCK 0x00000010 /* per-task CPU profiling with locks */
#ifdef USE_MEMORY_PROFILING
@ -85,7 +82,6 @@ struct memprof_stats {
unsigned long long alloc_tot;
unsigned long long free_tot;
void *info; // for pools, ptr to the pool
struct thread_exec_ctx exec_ctx;
};
#endif

View File

@ -107,7 +107,7 @@ struct appctx {
enum obj_type obj_type; /* OBJ_TYPE_APPCTX */
/* 3 unused bytes here */
unsigned int st0; /* Main applet state. May be used by any applet */
unsigned int st1; /* Applet substate. May be used by any applet */
unsigned int st1; /* Applet substate. Mau be used by any applet */
unsigned int flags; /* APPCTX_FL_* */
struct buffer inbuf;
@ -120,17 +120,16 @@ struct appctx {
struct {
struct buffer *cmdline; /* used to store unfinished commands */
struct buffer payload; /* used to store the payload */
int severity_output; /* used within the cli_io_handler to format severity output of informational feedback */
int level; /* the level of CLI which can be lowered dynamically */
char *payload_pat; /* Pointer to the payload pattern. NULL if no payload */
uint32_t max_payload_sz;/* Max size allowed for dynamic payload. 0 if not allowed */
char payload_pat[8]; /* Payload pattern */
char *payload; /* Pointer on the payload. NULL if no payload */
uint32_t anon_key; /* the key to anonymise with the hash in cli */
/* XXX 4 unused bytes here */
int (*io_handler)(struct appctx *appctx); /* used within the cli_io_handler when st0 = CLI_ST_CALLBACK */
void (*io_release)(struct appctx *appctx); /* used within the cli_io_handler when st0 = CLI_ST_CALLBACK,
if the command is terminated or the session released */
struct cli_kw *kw; /* the keyword being processed */
} cli_ctx; /* context dedicated to the CLI applet */
struct buffer_wait buffer_wait; /* position in the list of objects waiting for a buffer */
@ -148,6 +147,7 @@ struct appctx {
/* here we have the service's context (CLI command, applet, etc) */
void *svcctx; /* pointer to a context used by the command, e.g. <storage> below */
struct {
void *shadow; /* shadow of svcctx above, do not use! */
char storage[APPLET_MAX_SVCCTX]; /* storage of svcctx above */
} svc; /* generic storage for most commands */
};

View File

@ -62,13 +62,6 @@ ssize_t applet_append_line(void *ctx, struct ist v1, struct ist v2, size_t ofs,
static forceinline void applet_fl_set(struct appctx *appctx, uint on);
static forceinline void applet_fl_clr(struct appctx *appctx, uint off);
/* macros to switch the calling context to the applet during a call. There's
* one with a return value for most calls, and one without for the few like
* fct(), shut(), or release() with no return.
*/
#define CALL_APPLET_WITH_RET(applet, func) EXEC_CTX_WITH_RET(EXEC_CTX_MAKE(TH_EX_CTX_APPLET, (applet)), (applet)->func)
#define CALL_APPLET_NO_RET(applet, func) EXEC_CTX_NO_RET(EXEC_CTX_MAKE(TH_EX_CTX_APPLET, (applet)), (applet)->func)
static forceinline uint appctx_app_test(const struct appctx *appctx, uint test)
{
@ -133,7 +126,7 @@ static inline int appctx_init(struct appctx *appctx)
task_set_thread(appctx->t, tid);
if (appctx->applet->init)
return CALL_APPLET_WITH_RET(appctx->applet, init(appctx));
return appctx->applet->init(appctx);
return 0;
}

View File

@ -92,7 +92,6 @@ enum {
ARGC_TCK, /* tcp-check expression */
ARGC_CFG, /* configuration expression */
ARGC_CLI, /* CLI expression*/
ARGC_OTEL, /* opentelemetry scope args */
};
/* flags used when compiling and executing regex */

View File

@ -150,24 +150,6 @@ struct lbprm_per_tgrp {
struct lb_fwrr_per_tgrp fwrr;
};
};
/* Call backs for some LB actions. Any of them may be NULL (thus should be ignored).
* Those marked "srvlock" will need to be called with the server lock held.
* The other ones might take it themselves if needed.
*/
struct lb_ops {
int (*proxy_init)(struct proxy *); /* set up per-proxy LB state at config time; <0=fail */
void (*update_server_eweight)(struct server *); /* to be called after eweight change // srvlock */
void (*set_server_status_up)(struct server *); /* to be called after status changes to UP // srvlock */
void (*set_server_status_down)(struct server *); /* to be called after status changes to DOWN // srvlock */
void (*server_take_conn)(struct server *); /* to be called when connection is assigned */
void (*server_drop_conn)(struct server *); /* to be called when connection is dropped */
void (*server_requeue)(struct server *); /* function used to place the server where it must be */
void (*proxy_deinit)(struct proxy *); /* to be called when we're destroying the proxy */
void (*server_deinit)(struct server *); /* to be called when we're destroying the server */
int (*server_init)(struct server *); /* initialize a freshly added server (runtime); <0=fail. */
};
/* LB parameters for all algorithms */
struct lbprm {
union { /* LB parameters depending on the algo type */
@ -197,7 +179,19 @@ struct lbprm {
struct mt_list lb_free_list; /* LB tree elements available */
__decl_thread(HA_RWLOCK_T lock);
struct server *fbck; /* first backup server when !PR_O_USE_ALL_BK, or NULL */
const struct lb_ops *ops; /* algo-specific operations; NULL = no LB algo selected */
/* Call backs for some actions. Any of them may be NULL (thus should be ignored).
* Those marked "srvlock" will need to be called with the server lock held.
* The other ones might take it themselves if needed.
*/
void (*update_server_eweight)(struct server *); /* to be called after eweight change // srvlock */
void (*set_server_status_up)(struct server *); /* to be called after status changes to UP // srvlock */
void (*set_server_status_down)(struct server *); /* to be called after status changes to DOWN // srvlock */
void (*server_take_conn)(struct server *); /* to be called when connection is assigned */
void (*server_drop_conn)(struct server *); /* to be called when connection is dropped */
void (*server_requeue)(struct server *); /* function used to place the server where it must be */
void (*proxy_deinit)(struct proxy *); /* to be called when we're destroying the proxy */
void (*server_deinit)(struct server *); /* to be called when we're destroying the server */
};
#endif /* _HAPROXY_BACKEND_T_H */

View File

@ -69,7 +69,6 @@ int backend_parse_balance(const char **args, char **err, struct proxy *curproxy)
int tcp_persist_rdp_cookie(struct stream *s, struct channel *req, int an_bit);
int be_downtime(struct proxy *px);
int be_supports_dynamic_srv(struct proxy *px, char **msg);
void recount_servers(struct proxy *px);
void update_backend_weight(struct proxy *px);
@ -99,11 +98,8 @@ static inline int be_is_eligible(const struct proxy *be)
/* set the time of last session on the backend */
static inline void be_set_sess_last(struct proxy *be)
{
uint now_sec = ns_to_sec(now_ns);
if (be->be_counters.shared.tg)
if (HA_ATOMIC_LOAD(&be->be_counters.shared.tg[tgid - 1]->last_sess) != now_sec)
HA_ATOMIC_STORE(&be->be_counters.shared.tg[tgid - 1]->last_sess, now_sec);
HA_ATOMIC_STORE(&be->be_counters.shared.tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
}
/* This function returns non-zero if the designated server will be

View File

@ -24,7 +24,6 @@
#include <haproxy/api-t.h>
#include <haproxy/buf-t.h>
#include <haproxy/filters-t.h>
#include <haproxy/show_flags-t.h>
/* The CF_* macros designate Channel Flags, which may be ORed in the bit field
@ -206,7 +205,6 @@ struct channel {
unsigned char xfer_large; /* number of consecutive large xfers */
unsigned char xfer_small; /* number of consecutive small xfers */
int analyse_exp; /* expiration date for current analysers (if set) */
struct chn_flt flt; /* current state of filters active on this channel */
};

View File

@ -376,7 +376,6 @@ static inline void channel_add_input(struct channel *chn, unsigned int len)
c_adv(chn, fwd);
}
/* notify that some data was read */
chn_prod(chn)->bytes_in += len;
chn->flags |= CF_READ_EVENT;
}
@ -788,12 +787,8 @@ static inline int channel_recv_max(const struct channel *chn)
*/
static inline size_t channel_data_limit(const struct channel *chn)
{
size_t max = (global.tune.bufsize - global.tune.maxrewrite);
size_t max;
if (!c_size(chn))
return 0;
max = (c_size(chn) - global.tune.maxrewrite);
if (IS_HTX_STRM(chn_strm(chn)))
max -= HTX_BUF_OVERHEAD;
return max;

View File

@ -24,7 +24,6 @@
#include <haproxy/connection-t.h>
#include <haproxy/dynbuf-t.h>
#include <haproxy/obj_type-t.h>
#include <haproxy/tools-t.h>
#include <haproxy/vars-t.h>
/* Please note: this file tends to commonly be part of circular dependencies,
@ -60,7 +59,6 @@ enum chk_result {
#define CHK_ST_FASTINTER 0x0400 /* force fastinter check */
#define CHK_ST_READY 0x0800 /* check ready to migrate or run, see below */
#define CHK_ST_SLEEPING 0x1000 /* check was sleeping, i.e. not currently bound to a thread, see below */
#define CHK_ST_USE_SMALL_BUFF 0x2000 /* Use small buffers if possible for the request */
/* 4 possible states for CHK_ST_SLEEPING and CHK_ST_READY:
* SLP RDY State Description
@ -156,7 +154,7 @@ enum {
};
struct tcpcheck_rule;
struct tcpcheck;
struct tcpcheck_rules;
struct check {
enum obj_type obj_type; /* object type == OBJ_TYPE_CHECK */
@ -175,7 +173,7 @@ struct check {
signed char use_ssl; /* use SSL for health checks (1: on, 0: server mode, -1: off) */
int send_proxy; /* send a PROXY protocol header with checks */
int reuse_pool; /* try to reuse idle connections */
struct tcpcheck *tcpcheck; /* tcp-check to use to perform a health-check */
struct tcpcheck_rules *tcpcheck_rules; /* tcp-check send / expect rules */
struct tcpcheck_rule *current_step; /* current step when using tcpcheck */
int inter, fastinter, downinter; /* checks: time in milliseconds */
enum chk_result result; /* health-check result : CHK_RES_* */
@ -190,8 +188,6 @@ struct check {
char **envp; /* the environment to use if running a process-based check */
struct pid_list *curpid; /* entry in pid_list used for current process-based test, or -1 if not in test */
struct sockaddr_storage addr; /* the address to check */
struct net_addr_type addr_type; /* Address type (dgram/stream for both protocol and XPRT) */
int alt_proto; /* Needed to know exactly which protocol we are after */
char *pool_conn_name; /* conn name used on reuse */
char *sni; /* Server name */
char *alpn_str; /* ALPN to use for checks */
@ -199,7 +195,6 @@ struct check {
const struct mux_proto_list *mux_proto; /* the mux to use for all outgoing connections (specified by the "proto" keyword) */
struct list check_queue; /* entry in the check queue. Not empty = in queue. */
int via_socks4; /* check the connection via socks4 proxy */
struct ist unique_id; /* custom unique ID, same as in struct stream */
};
#endif /* _HAPROXY_CHECKS_T_H */

View File

@ -26,7 +26,6 @@
#include <haproxy/proxy-t.h>
#include <haproxy/server-t.h>
#include <haproxy/trace-t.h>
#include <haproxy/log.h>
extern struct trace_source trace_check;
@ -79,21 +78,12 @@ struct task *process_chk(struct task *t, void *context, unsigned int state);
struct task *srv_chk_io_cb(struct task *t, void *ctx, unsigned int state);
int check_buf_available(void *target);
struct buffer *check_get_buf(struct check *check, struct buffer *bptr, unsigned int small_buffer);
struct buffer *check_get_buf(struct check *check, struct buffer *bptr);
void check_release_buf(struct check *check, struct buffer *bptr);
static inline struct ist check_generate_unique_id(struct check *check, struct lf_expr *format)
{
if (!isttest(check->unique_id)) {
generate_unique_id(&check->unique_id, check->sess, NULL, format);
}
return check->unique_id;
}
const char *init_check(struct check *check, int type);
void free_check(struct check *check);
void check_purge(struct check *check);
int wake_srv_chk(struct stconn *sc);
int init_srv_check(struct server *srv);
int init_srv_agent_check(struct server *srv);

View File

@ -32,8 +32,6 @@
extern struct pool_head *pool_head_trash;
extern struct pool_head *pool_head_large_trash;
extern struct pool_head *pool_head_small_trash;
/* function prototypes */
@ -48,10 +46,6 @@ int chunk_asciiencode(struct buffer *dst, struct buffer *src, char qc);
int chunk_strcmp(const struct buffer *chk, const char *str);
int chunk_strcasecmp(const struct buffer *chk, const char *str);
struct buffer *get_trash_chunk(void);
struct buffer *get_large_trash_chunk(void);
struct buffer *get_small_trash_chunk(void);
struct buffer *get_trash_chunk_sz(size_t size);
struct buffer *get_larger_trash_chunk(struct buffer *chunk);
int init_trash_buffers(int first);
static inline void chunk_reset(struct buffer *chk)
@ -112,80 +106,12 @@ static forceinline struct buffer *alloc_trash_chunk(void)
return chunk;
}
/*
* Allocate a large trash chunk from the reentrant pool. The buffer starts at
* the end of the chunk. This chunk must be freed using free_trash_chunk(). This
* call may fail and the caller is responsible for checking that the returned
* pointer is not NULL.
*/
static forceinline struct buffer *alloc_large_trash_chunk(void)
{
struct buffer *chunk;
if (!pool_head_large_trash)
return NULL;
chunk = pool_alloc(pool_head_large_trash);
if (chunk) {
char *buf = (char *)chunk + sizeof(struct buffer);
*buf = 0;
chunk_init(chunk, buf,
pool_head_large_trash->size - sizeof(struct buffer));
}
return chunk;
}
/*
* Allocate a small trash chunk from the reentrant pool. The buffer starts at
* the end of the chunk. This chunk must be freed using free_trash_chunk(). This
* call may fail and the caller is responsible for checking that the returned
* pointer is not NULL.
*/
static forceinline struct buffer *alloc_small_trash_chunk(void)
{
struct buffer *chunk;
if (!pool_head_small_trash)
return NULL;
chunk = pool_alloc(pool_head_small_trash);
if (chunk) {
char *buf = (char *)chunk + sizeof(struct buffer);
*buf = 0;
chunk_init(chunk, buf,
pool_head_small_trash->size - sizeof(struct buffer));
}
return chunk;
}
/*
* Allocate a trash chunk accordingly to the requested size. This chunk must be
* freed using free_trash_chunk(). This call may fail and the caller is
* responsible for checking that the returned pointer is not NULL.
*/
static forceinline struct buffer *alloc_trash_chunk_sz(size_t size)
{
if (pool_head_small_trash && size <= pool_head_small_trash->size)
return alloc_small_trash_chunk();
else if (size <= pool_head_trash->size)
return alloc_trash_chunk();
else if (pool_head_large_trash && size <= pool_head_large_trash->size)
return alloc_large_trash_chunk();
else
return NULL;
}
/*
* free a trash chunk allocated by alloc_trash_chunk(). NOP on NULL.
*/
static forceinline void free_trash_chunk(struct buffer *chunk)
{
if (pool_head_small_trash && chunk && chunk->size == pool_head_small_trash->size - sizeof(struct buffer))
pool_free(pool_head_small_trash, chunk);
else if (pool_head_large_trash && chunk && chunk->size == pool_head_large_trash->size - sizeof(struct buffer))
pool_free(pool_head_large_trash, chunk);
else
pool_free(pool_head_trash, chunk);
pool_free(pool_head_trash, chunk);
}
/* copies chunk <src> into <chk>. Returns 0 in case of failure. */

View File

@ -23,7 +23,6 @@
#define _HAPROXY_CLI_T_H
#include <haproxy/applet-t.h>
#include <haproxy/tinfo-t.h>
/* Access level for a stats socket (appctx->cli_ctx.level) */
#define ACCESS_LVL_NONE 0x0000
@ -49,7 +48,6 @@
#define APPCTX_CLI_ST1_PROMPT (1 << 4) /* display prompt */
#define APPCTX_CLI_ST1_TIMED (1 << 5) /* display timer in prompt */
#define APPCTX_CLI_ST1_YIELD (1 << 6) /* forced yield between commands */
#define APPCTX_CLI_ST1_DYN_PAYLOAD (1 << 7) /* the payload was dynamically allocated */
#define CLI_PREFIX_KW_NB 5
#define CLI_MAX_MATCHES 5
@ -102,7 +100,6 @@ enum cli_wait_err {
enum cli_wait_cond {
CLI_WAIT_COND_NONE, // no condition to wait on
CLI_WAIT_COND_SRV_UNUSED,// wait for server to become unused
CLI_WAIT_COND_BE_UNUSED, // wait for backend to become unused
};
struct cli_wait_ctx {
@ -113,13 +110,6 @@ struct cli_wait_ctx {
const char *msg; // static error message for failures if not NULL
};
struct pcli_txn {
int next_pid; /* next target PID to use for the CLI proxy */
int flags; /* flags for CLI proxy */
char payload_pat[65]; /* payload pattern for the CLI proxy, including trailing \0 */
};
struct cli_kw {
const char *str_kw[CLI_PREFIX_KW_NB]; /* keywords ended by NULL, limited to CLI_PREFIX_KW_NB
separated keywords combination */
@ -129,8 +119,6 @@ struct cli_kw {
void (*io_release)(struct appctx *appctx);
void *private;
int level; /* this is the level needed to show the keyword usage and to use it */
/* 4-byte hole here */
struct thread_exec_ctx exec_ctx; /* execution context */
};
struct cli_kw_list {

View File

@ -47,13 +47,10 @@ int mworker_cli_global_proxy_new_listener(struct mworker_proc *proc);
void mworker_cli_proxy_stop(void);
extern struct bind_conf *mcli_reload_bind_conf;
extern struct pool_head *pool_head_pcli_txn;
/* proxy mode cli functions */
/* analyzers */
struct pcli_txn *pcli_create_txn(struct stream *s);
void pcli_destroy_txn(struct stream *s);
int pcli_wait_for_request(struct stream *s, struct channel *req, int an_bit);
int pcli_wait_for_response(struct stream *s, struct channel *rep, int an_bit);

View File

@ -130,8 +130,7 @@ enum {
CO_FL_OPT_TOS = 0x00000020, /* connection has a special sockopt tos */
CO_FL_QSTRM_SEND = 0x00000040, /* connection uses QMux protocol, needs to exchange transport parameters before starting mux layer */
CO_FL_QSTRM_RECV = 0x00000080, /* connection uses QMux protocol, needs to exchange transport parameters before starting mux layer */
/* unused : 0x00000040, 0x00000080 */
/* These flags indicate whether the Control and Transport layers are initialized */
CO_FL_CTRL_READY = 0x00000100, /* FD was registered, fd_delete() needed */
@ -213,14 +212,13 @@ static forceinline char *conn_show_flags(char *buf, size_t len, const char *deli
/* flags */
_(CO_FL_SAFE_LIST, _(CO_FL_IDLE_LIST, _(CO_FL_CTRL_READY,
_(CO_FL_REVERSED, _(CO_FL_ACT_REVERSING, _(CO_FL_OPT_MARK, _(CO_FL_OPT_TOS,
_(CO_FL_QSTRM_SEND, _(CO_FL_QSTRM_RECV,
_(CO_FL_XPRT_READY, _(CO_FL_WANT_DRAIN, _(CO_FL_WAIT_ROOM, _(CO_FL_SSL_NO_CACHED_INFO, _(CO_FL_EARLY_SSL_HS,
_(CO_FL_EARLY_DATA, _(CO_FL_SOCKS4_SEND, _(CO_FL_SOCKS4_RECV, _(CO_FL_SOCK_RD_SH,
_(CO_FL_SOCK_WR_SH, _(CO_FL_ERROR, _(CO_FL_FDLESS, _(CO_FL_WAIT_L4_CONN,
_(CO_FL_WAIT_L6_CONN, _(CO_FL_SEND_PROXY, _(CO_FL_ACCEPT_PROXY, _(CO_FL_ACCEPT_CIP,
_(CO_FL_SSL_WAIT_HS, _(CO_FL_PRIVATE, _(CO_FL_RCVD_PROXY, _(CO_FL_SESS_IDLE,
_(CO_FL_XPRT_TRACKED
)))))))))))))))))))))))))))))));
)))))))))))))))))))))))))))));
/* epilogue */
_(~0U);
return buf;
@ -285,8 +283,6 @@ enum {
CO_ER_SSL_FATAL, /* SSL fatal error during a SSL_read or SSL_write */
CO_ER_QSTRM, /* QMux transport parameter exchange failure */
CO_ER_REVERSE, /* Error during reverse connect */
CO_ER_POLLERR, /* we only noticed POLLERR */
@ -349,7 +345,6 @@ enum {
XPRT_SSL = 1,
XPRT_HANDSHAKE = 2,
XPRT_QUIC = 3,
XPRT_QSTRM = 4,
XPRT_ENTRIES /* must be last one */
};
@ -361,7 +356,6 @@ enum {
MX_FL_NO_UPG = 0x00000004, /* set if mux does not support any upgrade */
MX_FL_FRAMED = 0x00000008, /* mux working on top of a framed transport layer (QUIC) */
MX_FL_REVERSABLE = 0x00000010, /* mux supports connection reversal */
MX_FL_EXPERIMENTAL = 0x00000020, /* requires experimental support directives */
};
/* PROTO token registration */

View File

@ -34,7 +34,6 @@
#include <haproxy/listener-t.h>
#include <haproxy/obj_type.h>
#include <haproxy/pool-t.h>
#include <haproxy/protocol.h>
#include <haproxy/server.h>
#include <haproxy/session-t.h>
#include <haproxy/task-t.h>
@ -50,13 +49,6 @@ extern struct mux_stopping_data mux_stopping_data[MAX_THREADS];
#define IS_HTX_CONN(conn) ((conn)->mux && ((conn)->mux->flags & MX_FL_HTX))
/* macros to switch the calling context to the mux during a call. There's one
* with a return value for most calls, and one without for the few like shut(),
* detach() or destroy() with no return.
*/
#define CALL_MUX_WITH_RET(mux, func) EXEC_CTX_WITH_RET(EXEC_CTX_MAKE(TH_EX_CTX_MUX, (mux)), (mux)->func)
#define CALL_MUX_NO_RET(mux, func) EXEC_CTX_NO_RET(EXEC_CTX_MAKE(TH_EX_CTX_MUX, (mux)), (mux)->func)
/* receive a PROXY protocol header over a connection */
int conn_recv_proxy(struct connection *conn, int flag);
int conn_send_proxy(struct connection *conn, unsigned int flag);
@ -488,7 +480,7 @@ static inline int conn_install_mux(struct connection *conn, const struct mux_ops
conn->mux = mux;
conn->ctx = ctx;
ret = mux->init ? CALL_MUX_WITH_RET(mux, init(conn, prx, sess, &BUF_NULL)) : 0;
ret = mux->init ? mux->init(conn, prx, sess, &BUF_NULL) : 0;
if (ret < 0) {
conn->mux = NULL;
conn->ctx = NULL;
@ -610,17 +602,16 @@ void list_mux_proto(FILE *out);
*/
static inline const struct mux_proto_list *conn_get_best_mux_entry(
const struct ist mux_proto,
int proto_side, int proto_is_quic, int proto_mode)
int proto_side, int proto_mode)
{
struct mux_proto_list *item;
struct mux_proto_list *fallback = NULL;
list_for_each_entry(item, &mux_proto_list.list, list) {
if (!(item->side & proto_side) || !(item->mode & proto_mode) || ((proto_is_quic != 0) != ((item->mux->flags & MX_FL_FRAMED) != 0)))
if (!(item->side & proto_side) || !(item->mode & proto_mode))
continue;
if (istlen(mux_proto) && isteq(mux_proto, item->token)) {
if (istlen(mux_proto) && isteq(mux_proto, item->token))
return item;
}
else if (!istlen(item->token)) {
if (!fallback || (item->mode == proto_mode && fallback->mode != proto_mode))
fallback = item;
@ -642,7 +633,7 @@ static inline const struct mux_ops *conn_get_best_mux(struct connection *conn,
{
const struct mux_proto_list *item;
item = conn_get_best_mux_entry(mux_proto, proto_side, proto_is_quic(conn->ctrl), proto_mode);
item = conn_get_best_mux_entry(mux_proto, proto_side, proto_mode);
return item ? item->mux : NULL;
}
@ -692,12 +683,6 @@ static inline int conn_is_ssl(struct connection *conn)
return !!conn_get_ssl_sock_ctx(conn);
}
/* Returns true if connection runs over QUIC. */
static inline int conn_is_quic(const struct connection *conn)
{
return conn->flags & CO_FL_FDLESS;
}
/* Returns true if connection must be reversed. */
static inline int conn_is_reverse(const struct connection *conn)
{

View File

@ -185,29 +185,6 @@ struct be_counters {
} p; /* protocol-specific stats */
};
/* extra counters that are registered at boot by various modules */
enum counters_type {
COUNTERS_FE = 0,
COUNTERS_BE,
COUNTERS_SV,
COUNTERS_LI,
COUNTERS_RSLV,
COUNTERS_OFF_END /* must always be last */
};
struct extra_counters {
char **datap; /* points to pointer to heap containing counters allocated in a linear fashion */
size_t size; /* size of allocated data */
size_t tgrp_step; /* distance in words between two datap for consecutive tgroups, 0 for single */
uint nbtgrp; /* number of thread groups accessing these counters */
enum counters_type type; /* type of object containing the counters */
};
#define EXTRA_COUNTERS(name) \
struct extra_counters *name
#endif /* _HAPROXY_COUNTERS_T_H */
/*

View File

@ -26,9 +26,6 @@
#include <haproxy/counters-t.h>
#include <haproxy/guid-t.h>
#include <haproxy/global.h>
extern THREAD_LOCAL void *trash_counters;
int counters_fe_shared_prepare(struct fe_counters_shared *counters, const struct guid_node *guid, char **errmsg);
int counters_be_shared_prepare(struct be_counters_shared *counters, const struct guid_node *guid, char **errmsg);
@ -104,106 +101,4 @@ void counters_be_shared_drop(struct be_counters_shared *counters);
__ret; \
})
#define COUNTERS_UPDATE_MAX(counter, count) \
do { \
if (!(global.tune.options & GTUNE_NO_MAX_COUNTER)) \
HA_ATOMIC_UPDATE_MAX(counter, count); \
} while (0)
/* Manipulation of extra_counters, for boot-time registrable modules */
/* retrieve the base storage of extra counters (first tgroup if any) */
#define EXTRA_COUNTERS_BASE(counters, mod) \
(likely(counters) ? \
((void *)(*(counters)->datap + (mod)->counters_off[(counters)->type])) : \
(trash_counters))
/* retrieve the pointer to the extra counters storage for module <mod> for the
* current TGID.
*/
#define EXTRA_COUNTERS_GET(counters, mod) \
(likely(counters) ? \
((void *)(counters)->datap[(counters)->tgrp_step * (tgid - 1)] + \
(mod)->counters_off[(counters)->type]) : \
(trash_counters))
#define EXTRA_COUNTERS_REGISTER(counters, ctype, alloc_failed_label, storage, step) \
do { \
typeof(*counters) _ctr; \
_ctr = calloc(1, sizeof(*_ctr)); \
if (!_ctr) \
goto alloc_failed_label; \
_ctr->type = (ctype); \
_ctr->tgrp_step = (step); \
_ctr->datap = (storage); \
*(counters) = _ctr; \
} while (0)
#define EXTRA_COUNTERS_ADD(mod, counters, new_counters, csize) \
do { \
typeof(counters) _ctr = (counters); \
(mod)->counters_off[_ctr->type] = _ctr->size; \
_ctr->size += (csize); \
} while (0)
#define EXTRA_COUNTERS_ALLOC(counters, alloc_failed_label, nbtg) \
do { \
typeof(counters) _ctr = (counters); \
char **datap = _ctr->datap; \
uint tgrp; \
_ctr->nbtgrp = _ctr->tgrp_step ? (nbtg) : 1; \
for (tgrp = 0; tgrp < _ctr->nbtgrp; tgrp++) { \
*datap = malloc((_ctr)->size); \
if (!*_ctr->datap) \
goto alloc_failed_label; \
datap += _ctr->tgrp_step; \
} \
} while (0)
#define EXTRA_COUNTERS_INIT(counters, mod, init_counters, init_counters_size) \
do { \
typeof(counters) _ctr = (counters); \
char **datap = _ctr->datap; \
uint tgrp; \
for (tgrp = 0; tgrp < _ctr->nbtgrp; tgrp++) { \
memcpy(*datap + mod->counters_off[_ctr->type], \
(init_counters), (init_counters_size)); \
datap += _ctr->tgrp_step; \
} \
} while (0)
#define EXTRA_COUNTERS_FREE(counters) \
do { \
typeof(counters) _ctr = (counters); \
if (_ctr) { \
char **datap = _ctr->datap; \
uint tgrp; \
for (tgrp = 0; tgrp < _ctr->nbtgrp; tgrp++) { \
ha_free(datap); \
datap += _ctr->tgrp_step; \
} \
free(_ctr); \
} \
} while (0)
/* aggregate all values of <metricp> over the thread groups handled by
* <counters>. <metricp> MUST correspond to an entry of the first tgrp of
* <counters>. The number of groups and the step are found in <counters>. The
* type of the return value is the same as <metricp>, and must be a scalar so
* that values are summed before being returned.
*/
#define EXTRA_COUNTERS_AGGR(counters, metricp) \
({ \
typeof(counters) _ctr = (counters); \
typeof(metricp) *valp, _ret = 0; \
if (_ctr) { \
size_t ofs = (char *)&metricp - _ctr->datap[0]; \
uint tgrp; \
for (tgrp = 0; tgrp < _ctr->nbtgrp; tgrp++) { \
valp = (typeof(valp))(_ctr->datap[tgrp * (counters)->tgrp_step] + ofs); \
_ret += HA_ATOMIC_LOAD(valp); \
} \
} \
_ret; \
})
#endif /* _HAPROXY_COUNTERS_H */

View File

@ -34,7 +34,6 @@
#define MAX_TGROUPS 1
#define MAX_THREADS_PER_GROUP 1
#define DEF_MAX_THREADS_PER_GROUP 1
#else
@ -50,15 +49,6 @@
#define MAX_THREADS_PER_GROUP __WORDSIZE
/* Default value for the maximum number of threads per group. Thread counts
* beyond this value will induce the creation of new thread groups and thus
* limit contention on highly accessed areas. The value may be changed between
* 1 and MAX_THREADS_PER_GROUP via the global "max-threads-per-group" setting.
*/
#ifndef DEF_MAX_THREADS_PER_GROUP
#define DEF_MAX_THREADS_PER_GROUP 16
#endif
/* threads enabled, max_threads defaults to long bits for 1 tgroup or 4 times
* long bits if more tgroups are enabled.
*/
@ -546,11 +536,6 @@
#define TIME_STATS_SAMPLES 512
#endif
/* number of samples used to measure the load in the run queue */
#ifndef RQ_LOAD_SAMPLES
#define RQ_LOAD_SAMPLES 512
#endif
/* max ocsp cert id asn1 encoded length */
#ifndef OCSP_MAX_CERTID_ASN1_LENGTH
#define OCSP_MAX_CERTID_ASN1_LENGTH 128
@ -616,7 +601,7 @@
* store stats.
*/
#ifndef MEMPROF_HASH_BITS
# define MEMPROF_HASH_BITS 12
# define MEMPROF_HASH_BITS 10
#endif
#define MEMPROF_HASH_BUCKETS (1U << MEMPROF_HASH_BITS)

View File

@ -24,12 +24,12 @@
#include <import/ebtree-t.h>
#include <haproxy/buf-t.h>
#include <haproxy/connection-t.h>
#include <haproxy/counters-t.h>
#include <haproxy/buf-t.h>
#include <haproxy/dgram-t.h>
#include <haproxy/dns_ring-t.h>
#include <haproxy/obj_type-t.h>
#include <haproxy/stats-t.h>
#include <haproxy/task-t.h>
#include <haproxy/thread.h>
@ -152,7 +152,6 @@ struct dns_nameserver {
struct dns_stream_server *stream; /* used for tcp dns */
EXTRA_COUNTERS(extra_counters);
char *extra_counters_storage; /* storage used for extra_counters above */
struct dns_counters *counters;
struct list list; /* nameserver chained list */

View File

@ -36,8 +36,6 @@
#include <haproxy/pool.h>
extern struct pool_head *pool_head_buffer;
extern struct pool_head *pool_head_large_buffer;
extern struct pool_head *pool_head_small_buffer;
int init_buffer(void);
void buffer_dump(FILE *o, struct buffer *b, int from, int to);
@ -55,42 +53,6 @@ static inline int buffer_almost_full(const struct buffer *buf)
return b_almost_full(buf);
}
/* Return 1 if <sz> is the default buffer size */
static inline int b_is_default_sz(size_t sz)
{
return (sz == pool_head_buffer->size);
}
/* Return 1 if <sz> is the size of a large buffer (alwoys false is large buffers are not configured) */
static inline int b_is_large_sz(size_t sz)
{
return (pool_head_large_buffer && sz == pool_head_large_buffer->size);
}
/* Return 1 if <sz> is the size of a small buffer */
static inline int b_is_small_sz(size_t sz)
{
return (pool_head_small_buffer && sz == pool_head_small_buffer->size);
}
/* Return 1 if <bug> is a default buffer */
static inline int b_is_default(struct buffer *buf)
{
return b_is_default_sz(b_size(buf));
}
/* Return 1 if <buf> is a large buffer (alwoys 0 is large buffers are not configured) */
static inline int b_is_large(struct buffer *buf)
{
return b_is_large_sz(b_size(buf));
}
/* Return 1 if <buf> is a small buffer */
static inline int b_is_small(struct buffer *buf)
{
return b_is_small_sz(b_size(buf));
}
/**************************************************/
/* Functions below are used for buffer allocation */
/**************************************************/
@ -174,20 +136,13 @@ static inline char *__b_get_emergency_buf(void)
#define __b_free(_buf) \
do { \
char *area = (_buf)->area; \
size_t sz = (_buf)->size; \
\
/* let's first clear the area to save an occasional "show sess all" \
* glancing over our shoulder from getting a dangling pointer. \
*/ \
*(_buf) = BUF_NULL; \
__ha_barrier_store(); \
/* if enabled, large buffers are always strictly greater \
* than the default buffers */ \
if (unlikely(b_is_large_sz(sz))) \
pool_free(pool_head_large_buffer, area); \
else if (unlikely(b_is_small_sz(sz))) \
pool_free(pool_head_small_buffer, area); \
else if (th_ctx->emergency_bufs_left < global.tune.reserved_bufs) \
if (th_ctx->emergency_bufs_left < global.tune.reserved_bufs) \
th_ctx->emergency_bufs[th_ctx->emergency_bufs_left++] = area; \
else \
pool_free(pool_head_buffer, area); \
@ -200,35 +155,6 @@ static inline char *__b_get_emergency_buf(void)
__b_free((_buf)); \
} while (0)
static inline struct buffer *b_alloc_small(struct buffer *buf)
{
char *area = NULL;
if (!buf->size) {
area = pool_alloc(pool_head_small_buffer);
if (!area)
return NULL;
buf->area = area;
buf->size = global.tune.bufsize_small;
}
return buf;
}
static inline struct buffer *b_alloc_large(struct buffer *buf)
{
char *area = NULL;
if (!buf->size) {
area = pool_alloc(pool_head_large_buffer);
if (!area)
return NULL;
buf->area = area;
buf->size = global.tune.bufsize_large;
}
return buf;
}
/* Offer one or multiple buffer currently belonging to target <from> to whoever
* needs one. Any pointer is valid for <from>, including NULL. Its purpose is
* to avoid passing a buffer to oneself in case of failed allocations (e.g.

View File

@ -143,7 +143,7 @@ struct flt_kw_list {
* otherwise.
* - http_reset : Called when the HTTP message is reset. It happens
* either when a 100-continue response is received.
* that can be detected if s->txn.http->status is 10X, or
* that can be detected if s->txn->status is 10X, or
* if we're attempting a L7 retry.
* Returns nothing.
* - http_reply : Called when, at any time, HAProxy decides to stop
@ -207,7 +207,6 @@ struct flt_ops {
* accessible from a filter when instantiated in a stream
*/
struct flt_conf {
const char *name; /* The filter name (same name used to select the filter from config) */
const char *id; /* The filter id */
struct flt_ops *ops; /* The filter callbacks */
void *conf; /* The filter configuration */
@ -215,12 +214,6 @@ struct flt_conf {
unsigned int flags; /* FLT_CFG_FL_* */
};
struct filter_sequence_elt {
char *flt_name; /* filter name (set during parsing) */
struct flt_conf *flt_conf; /* associated filter conf (set after parsing) */
struct list list; /* list element */
};
/*
* Structure reprensenting a filter instance attached to a stream
*
@ -239,28 +232,22 @@ struct filter {
* 0: request channel, 1: response channel */
unsigned int pre_analyzers; /* bit field indicating analyzers to pre-process */
unsigned int post_analyzers; /* bit field indicating analyzers to post-process */
struct list list; /* Filter list for the stream */
/* req_list and res_list are exactly equivalent, except the order may differ */
struct list req_list; /* Filter list for request channel */
struct list res_list; /* Filter list for response channel */
struct list list; /* Next filter for the same proxy/stream */
};
/*
* Structure reprensenting the "global" state of filters attached to a stream.
* Doesn't hold much information, as the channel themselves hold chn_flt struct
* which contains the per-channel members.
*/
struct strm_flt {
struct list filters; /* List of filters attached to a stream */
struct filter *current[2]; /* From which filter resume processing, for a specific channel.
* This is used for resumable callbacks only,
* If NULL, we start from the first filter.
* 0: request channel, 1: response channel */
unsigned short flags; /* STRM_FL_* */
};
/* structure holding filter state for some members that are channel oriented */
struct chn_flt {
struct list filters; /* List of filters attached to a channel */
struct filter *current; /* From which filter resume processing, for a specific channel. */
unsigned char nb_data_filters; /* Number of data filters registered on channel */
unsigned long long offset;
unsigned char nb_req_data_filters; /* Number of data filters registered on the request channel */
unsigned char nb_rsp_data_filters; /* Number of data filters registered on the response channel */
unsigned long long offset[2];
};
#endif /* _HAPROXY_FILTERS_T_H */

View File

@ -28,9 +28,7 @@
#include <haproxy/stream-t.h>
extern const char *trace_flt_id;
extern const char *http_comp_req_flt_id;
extern const char *http_comp_res_flt_id;
extern const char *http_comp_flt_id;
extern const char *cache_store_flt_id;
extern const char *spoe_filter_id;
extern const char *fcgi_flt_id;
@ -42,13 +40,13 @@ extern const char *fcgi_flt_id;
/* Useful macros to access per-channel values. It can be safely used inside
* filters. */
#define CHN_IDX(chn) (((chn)->flags & CF_ISRESP) == CF_ISRESP)
#define FLT_STRM_OFF(s, chn) (chn->flt.offset)
#define FLT_STRM_OFF(s, chn) (strm_flt(s)->offset[CHN_IDX(chn)])
#define FLT_OFF(flt, chn) ((flt)->offset[CHN_IDX(chn)])
#define HAS_FILTERS(strm) ((strm)->strm_flt.flags & STRM_FLT_FL_HAS_FILTERS)
#define HAS_REQ_DATA_FILTERS(strm) ((strm)->req.flt.nb_data_filters != 0)
#define HAS_RSP_DATA_FILTERS(strm) ((strm)->res.flt.nb_data_filters != 0)
#define HAS_REQ_DATA_FILTERS(strm) ((strm)->strm_flt.nb_req_data_filters != 0)
#define HAS_RSP_DATA_FILTERS(strm) ((strm)->strm_flt.nb_rsp_data_filters != 0)
#define HAS_DATA_FILTERS(strm, chn) (((chn)->flags & CF_ISRESP) ? HAS_RSP_DATA_FILTERS(strm) : HAS_REQ_DATA_FILTERS(strm))
#define IS_REQ_DATA_FILTER(flt) ((flt)->flags & FLT_FL_IS_REQ_DATA_FILTER)
@ -139,11 +137,14 @@ static inline void
register_data_filter(struct stream *s, struct channel *chn, struct filter *filter)
{
if (!IS_DATA_FILTER(filter, chn)) {
if (chn->flags & CF_ISRESP)
if (chn->flags & CF_ISRESP) {
filter->flags |= FLT_FL_IS_RSP_DATA_FILTER;
else
strm_flt(s)->nb_rsp_data_filters++;
}
else {
filter->flags |= FLT_FL_IS_REQ_DATA_FILTER;
chn->flt.nb_data_filters++;
strm_flt(s)->nb_req_data_filters++;
}
}
}
@ -152,66 +153,18 @@ static inline void
unregister_data_filter(struct stream *s, struct channel *chn, struct filter *filter)
{
if (IS_DATA_FILTER(filter, chn)) {
if (chn->flags & CF_ISRESP)
if (chn->flags & CF_ISRESP) {
filter->flags &= ~FLT_FL_IS_RSP_DATA_FILTER;
else
strm_flt(s)->nb_rsp_data_filters--;
}
else {
filter->flags &= ~FLT_FL_IS_REQ_DATA_FILTER;
chn->flt.nb_data_filters--;
strm_flt(s)->nb_req_data_filters--;
}
}
}
/*
* flt_list_start() and flt_list_next() can be used to iterate over the list of filters
* for a given <strm> and <chn> combination. It will automatically choose the proper
* list to iterate from depending on the context.
*
* flt_list_start() has to be called exactly once to get the first value from the list
* to get the following values, use flt_list_next() until NULL is returned.
*
* Example:
*
* struct filter *filter;
*
* for (filter = flt_list_start(stream, channel); filter;
* filter = flt_list_next(stream, channel, filter)) {
* ...
* }
*/
static inline struct filter *flt_list_start(struct stream *strm, struct channel *chn)
{
struct filter *filter;
if (chn->flags & CF_ISRESP) {
filter = LIST_NEXT(&chn->flt.filters, struct filter *, res_list);
if (&filter->res_list == &chn->flt.filters)
filter = NULL; /* empty list */
}
else {
filter = LIST_NEXT(&chn->flt.filters, struct filter *, req_list);
if (&filter->req_list == &chn->flt.filters)
filter = NULL; /* empty list */
}
return filter;
}
static inline struct filter *flt_list_next(struct stream *strm, struct channel *chn,
struct filter *filter)
{
if (chn->flags & CF_ISRESP) {
filter = LIST_NEXT(&filter->res_list, struct filter *, res_list);
if (&filter->res_list == &chn->flt.filters)
filter = NULL; /* end of list */
}
else {
filter = LIST_NEXT(&filter->req_list, struct filter *, req_list);
if (&filter->req_list == &chn->flt.filters)
filter = NULL; /* end of list */
}
return filter;
}
/* This function must be called when a filter alter payload data. It updates
* offsets of all previous filters. Do not call this function when a filter
* change the size of payload data leads to an undefined behavior.
@ -224,8 +177,7 @@ flt_update_offsets(struct filter *filter, struct channel *chn, int len)
struct stream *s = chn_strm(chn);
struct filter *f;
for (f = flt_list_start(s, chn); f;
f = flt_list_next(s, chn, f)) {
list_for_each_entry(f, &strm_flt(s)->filters, list) {
if (f == filter)
break;
FLT_OFF(f, chn) += len;

View File

@ -403,25 +403,6 @@ static inline uint swrate_add_scaled_opportunistic(uint *sum, uint n, uint v, ui
return new_sum;
}
/* Like swrate_add() except that if <v> is beyond the current average, the
* average is replaced by the peak. This is essentially used to measure peak
* loads in the scheduler, reason why it is provided as a local variant that
* does not involve atomic operations.
*/
static inline uint swrate_add_peak_local(uint *sum, uint n, uint v)
{
uint old_sum, new_sum;
old_sum = *sum;
if (v * n > old_sum)
new_sum = v * n;
else
new_sum = old_sum - (old_sum + n - 1) / n + v;
*sum = new_sum;
return new_sum;
}
/* Returns the average sample value for the sum <sum> over a sliding window of
* <n> samples. Better if <n> is a power of two. It must be the same <n> as the
* one used above in all additions.

View File

@ -79,14 +79,13 @@
#define GTUNE_DISABLE_H2_WEBSOCKET (1<<21)
#define GTUNE_DISABLE_ACTIVE_CLOSE (1<<22)
#define GTUNE_QUICK_EXIT (1<<23)
#define GTUNE_COLLECT_LIBS (1<<24)
/* (1<<24) unused */
/* (1<<25) unused */
#define GTUNE_USE_FAST_FWD (1<<26)
#define GTUNE_LISTENER_MQ_FAIR (1<<27)
#define GTUNE_LISTENER_MQ_OPT (1<<28)
#define GTUNE_LISTENER_MQ_ANY (GTUNE_LISTENER_MQ_FAIR | GTUNE_LISTENER_MQ_OPT)
#define GTUNE_NO_KTLS (1<<29)
#define GTUNE_NO_MAX_COUNTER (1<<30)
/* subsystem-specific debugging options for tune.debug */
#define GDBG_CPU_AFFINITY (1U<< 0)
@ -180,7 +179,6 @@ struct global {
uint recv_enough; /* how many input bytes at once are "enough" */
uint bufsize; /* buffer size in bytes, defaults to BUFSIZE */
uint bufsize_small;/* small buffer size in bytes */
uint bufsize_large;/* large buffer size in bytes */
int maxrewrite; /* buffer max rewrite size in bytes, defaults to MAXREWRITE */
int reserved_bufs; /* how many buffers can only be allocated for response */
int buf_limit; /* if not null, how many total buffers may only be allocated */
@ -215,7 +213,6 @@ struct global {
int default_shards; /* default shards for listeners, or -1 (by-thread) or -2 (by-group) */
uint max_checks_per_thread; /* if >0, no more than this concurrent checks per thread */
uint ring_queues; /* if >0, #ring queues, otherwise equals #thread groups */
uint cli_max_payload_sz; /* The max payload size for the CLI */
enum threadgroup_takeover tg_takeover; /* Policy for threadgroup takeover */
} tune;
struct {

View File

@ -24,7 +24,6 @@
#include <haproxy/api-t.h>
#include <haproxy/global-t.h>
#include <haproxy/cfgparse.h>
extern struct global global;
extern int pid; /* current process id */
@ -55,12 +54,6 @@ extern char **old_argv;
extern const char *old_unixsocket;
extern int daemon_fd[2];
extern int devnullfd;
extern int fileless_mode;
extern struct cfgfile fileless_cfg;
/* storage for collected libs */
extern void *lib_storage;
extern size_t lib_size;
struct proxy;
struct server;

View File

@ -99,7 +99,7 @@ enum h1m_state {
#define H1_MF_TE_CHUNKED 0x00010000 // T-E "chunked"
#define H1_MF_TE_OTHER 0x00020000 // T-E other than supported ones found (only "chunked" is supported for now)
#define H1_MF_UPG_H2C 0x00040000 // "h2c" or "h2" used as upgrade token
#define H1_MF_NOT_HTTP 0x00080000 // Not an HTTP message (e.g "RTSP", only possible if invalid message are accepted)
/* Mask to use to reset H1M flags when we restart headers parsing.
*
* WARNING: Don't forget to update it if a new flag must be preserved when
@ -263,8 +263,6 @@ static inline int h1_parse_chunk_size(const struct buffer *buf, int start, int s
const char *ptr_old = ptr;
const char *end = b_wrap(buf);
uint64_t chunk = 0;
int backslash = 0;
int quote = 0;
stop -= start; // bytes left
start = stop; // bytes to transfer
@ -329,37 +327,13 @@ static inline int h1_parse_chunk_size(const struct buffer *buf, int start, int s
if (--stop == 0)
return 0;
/* The loop seeks the first CRLF or non-tab CTL char
* and stops there. If a backslash/quote is active,
* it's an error. If none, we assume it's the CRLF
* and go back to the top of the loop checking for
* CR then LF. This way CTLs, lone LF etc are handled
* in the fallback path. This allows to protect
* remotes against their own possibly non-compliant
* chunk-ext parser which could mistakenly skip a
* quoted CRLF. Chunk-ext are not used anyway, except
* by attacks.
*/
while (!HTTP_IS_CTL(*ptr) || HTTP_IS_SPHT(*ptr)) {
if (backslash)
backslash = 0; // escaped char
else if (*ptr == '\\' && quote)
backslash = 1;
else if (*ptr == '\\') // backslash not permitted outside quotes
goto error;
else if (*ptr == '"') // begin/end of quoted-pair
quote = !quote;
while (!HTTP_IS_CRLF(*ptr)) {
if (++ptr >= end)
ptr = b_orig(buf);
if (--stop == 0)
return 0;
}
/* mismatched quotes / backslashes end here */
if (quote || backslash)
goto error;
/* CTLs (CRLF) fall to the common check */
/* we have a CRLF now, loop above */
continue;
}
else

View File

@ -222,7 +222,6 @@ struct hlua_proxy_list {
};
struct hlua_proxy_list_iterator_context {
struct watcher px_watch; /* watcher to automatically update next pointer on backend deletion */
struct proxy *next;
char capabilities;
};

View File

@ -164,7 +164,7 @@ static inline int hpack_encode_int_status(struct buffer *out, unsigned int statu
goto fail;
/* basic encoding of the status code */
out->area[len - 5] = 0x48; // literal with incremental indexing, name=":status" (idx 8)
out->area[len - 5] = 0x48; // indexed name -- name=":status" (idx 8)
out->area[len - 4] = 0x03; // 3 bytes status
out->area[len - 3] = '0' + status / 100;
out->area[len - 2] = '0' + status / 10 % 10;

View File

@ -78,7 +78,7 @@ static inline const struct hpack_dte *hpack_get_dte(const struct hpack_dht *dht,
/* returns non-zero if <idx> is valid for table <dht> */
static inline int hpack_valid_idx(const struct hpack_dht *dht, uint32_t idx)
{
return idx > 0 && idx < dht->used + HPACK_SHT_SIZE;
return idx < dht->used + HPACK_SHT_SIZE;
}
/* return a pointer to the header name for entry <dte>. */

View File

@ -1,32 +0,0 @@
#ifndef _HAPROXY_HSTREAM_T_H
#define _HAPROXY_HSTREAM_T_H
#include <haproxy/dynbuf-t.h>
#include <haproxy/http-t.h>
#include <haproxy/obj_type-t.h>
/* hastream stream */
struct hstream {
enum obj_type obj_type;
struct session *sess;
struct stconn *sc;
struct task *task;
struct buffer req;
struct buffer res;
unsigned long long to_write; /* #of response data bytes to write after headers */
struct buffer_wait buf_wait; /* Wait list for buffer allocation */
int flags;
int ka; /* .0: keep-alive .1: forced .2: http/1.1, .3: was_reused */
unsigned long long req_size; /* values passed in the URI to override the server's */
unsigned long long req_body; /* remaining body to be consumed from the request */
int req_code;
int res_wait; /* time to wait before replying in ms */
int res_time;
enum http_meth_t req_meth;
};
#endif /* _HAPROXY_HSTREAM_T_H */

Some files were not shown because too many files have changed in this diff Show More