Compare commits

..

No commits in common. "master" and "v3.0-dev10" have entirely different histories.

960 changed files with 36885 additions and 109284 deletions

View File

@ -1,15 +1,15 @@
FreeBSD_task: FreeBSD_task:
freebsd_instance: freebsd_instance:
matrix: matrix:
image_family: freebsd-14-3 image_family: freebsd-13-2
only_if: $CIRRUS_BRANCH =~ 'master|next' only_if: $CIRRUS_BRANCH =~ 'master|next'
install_script: install_script:
- pkg update -f && pkg upgrade -y && pkg install -y openssl git gmake lua54 socat pcre2 - pkg update -f && pkg upgrade -y && pkg install -y openssl git gmake lua53 socat pcre
script: script:
- sudo sysctl kern.corefile=/tmp/%N.%P.core - sudo sysctl kern.corefile=/tmp/%N.%P.core
- sudo sysctl kern.sugid_coredump=1 - sudo sysctl kern.sugid_coredump=1
- scripts/build-vtest.sh - scripts/build-vtest.sh
- gmake CC=clang V=1 ERR=1 TARGET=freebsd USE_ZLIB=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_OPENSSL=1 USE_LUA=1 LUA_INC=/usr/local/include/lua54 LUA_LIB=/usr/local/lib LUA_LIB_NAME=lua-5.4 - gmake CC=clang V=1 ERR=1 TARGET=freebsd USE_ZLIB=1 USE_PCRE=1 USE_OPENSSL=1 USE_LUA=1 LUA_INC=/usr/local/include/lua53 LUA_LIB=/usr/local/lib LUA_LIB_NAME=lua-5.3
- ./haproxy -vv - ./haproxy -vv
- ldd haproxy - ldd haproxy
test_script: test_script:

View File

@ -1,34 +0,0 @@
name: 'setup VTest'
description: 'ssss'
runs:
using: "composite"
steps:
- name: Setup coredumps
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
shell: bash
run: |
sudo sysctl -w fs.suid_dumpable=1
sudo sysctl kernel.core_pattern=/tmp/core.%h.%e.%t
- name: Setup ulimit for core dumps
shell: bash
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
ulimit -c unlimited
- name: Install VTest
shell: bash
run: |
scripts/build-vtest.sh
- name: Install problem matcher for VTest
shell: bash
# This allows one to more easily see which tests fail.
run: echo "::add-matcher::.github/vtest.json"

View File

@ -19,9 +19,9 @@ defaults
frontend h2 frontend h2
mode http mode http
bind 127.0.0.1:8443 ssl crt reg-tests/ssl/certs/common.pem alpn h2,http/1.1 bind 127.0.0.1:8443 ssl crt reg-tests/ssl/common.pem alpn h2,http/1.1
default_backend h2b default_backend h2
backend h2b backend h2
errorfile 200 .github/errorfile errorfile 200 .github/errorfile
http-request deny deny_status 200 http-request deny deny_status 200

77
.github/matrix.py vendored
View File

@ -67,37 +67,6 @@ def determine_latest_aws_lc(ssl):
latest_tag = max(valid_tags, key=aws_lc_version_string_to_num) latest_tag = max(valid_tags, key=aws_lc_version_string_to_num)
return "AWS_LC_VERSION={}".format(latest_tag[1:]) return "AWS_LC_VERSION={}".format(latest_tag[1:])
def aws_lc_fips_version_string_to_num(version_string):
return tuple(map(int, version_string[12:].split('.')))
def aws_lc_fips_version_valid(version_string):
return re.match('^AWS-LC-FIPS-[0-9]+(\.[0-9]+)*$', version_string)
@functools.lru_cache(5)
def determine_latest_aws_lc_fips(ssl):
# the AWS-LC-FIPS tags are at the end of the list, so let's get a lot
tags = get_all_github_tags("https://api.github.com/repos/aws/aws-lc/tags?per_page=200")
if not tags:
return "AWS_LC_FIPS_VERSION=failed_to_detect"
valid_tags = list(filter(aws_lc_fips_version_valid, tags))
latest_tag = max(valid_tags, key=aws_lc_fips_version_string_to_num)
return "AWS_LC_FIPS_VERSION={}".format(latest_tag[12:])
def wolfssl_version_string_to_num(version_string):
return tuple(map(int, version_string[1:].removesuffix('-stable').split('.')))
def wolfssl_version_valid(version_string):
return re.match('^v[0-9]+(\.[0-9]+)*-stable$', version_string)
@functools.lru_cache(5)
def determine_latest_wolfssl(ssl):
tags = get_all_github_tags("https://api.github.com/repos/wolfssl/wolfssl/tags")
if not tags:
return "WOLFSSL_VERSION=failed_to_detect"
valid_tags = list(filter(wolfssl_version_valid, tags))
latest_tag = max(valid_tags, key=wolfssl_version_string_to_num)
return "WOLFSSL_VERSION={}".format(latest_tag[1:].removesuffix('-stable'))
@functools.lru_cache(5) @functools.lru_cache(5)
def determine_latest_libressl(ssl): def determine_latest_libressl(ssl):
try: try:
@ -125,11 +94,9 @@ def main(ref_name):
# Ubuntu # Ubuntu
if "haproxy-" in ref_name: if "haproxy-" in ref_name:
os = "ubuntu-24.04" # stable branch os = "ubuntu-22.04" # stable branch
os_arm = "ubuntu-24.04-arm" # stable branch
else: else:
os = "ubuntu-24.04" # development branch os = "ubuntu-latest" # development branch
os_arm = "ubuntu-24.04-arm" # development branch
TARGET = "linux-glibc" TARGET = "linux-glibc"
for CC in ["gcc", "clang"]: for CC in ["gcc", "clang"]:
@ -156,10 +123,11 @@ def main(ref_name):
"OT_INC=${HOME}/opt-ot/include", "OT_INC=${HOME}/opt-ot/include",
"OT_LIB=${HOME}/opt-ot/lib", "OT_LIB=${HOME}/opt-ot/lib",
"OT_RUNPATH=1", "OT_RUNPATH=1",
"USE_PCRE2=1", "USE_PCRE=1",
"USE_PCRE2_JIT=1", "USE_PCRE_JIT=1",
"USE_LUA=1", "USE_LUA=1",
"USE_OPENSSL=1", "USE_OPENSSL=1",
"USE_SYSTEMD=1",
"USE_WURFL=1", "USE_WURFL=1",
"WURFL_INC=addons/wurfl/dummy", "WURFL_INC=addons/wurfl/dummy",
"WURFL_LIB=addons/wurfl/dummy", "WURFL_LIB=addons/wurfl/dummy",
@ -174,11 +142,10 @@ def main(ref_name):
# ASAN # ASAN
for os_asan in [os, os_arm]:
matrix.append( matrix.append(
{ {
"name": "{}, {}, ASAN, all features".format(os_asan, CC), "name": "{}, {}, ASAN, all features".format(os, CC),
"os": os_asan, "os": os,
"TARGET": TARGET, "TARGET": TARGET,
"CC": CC, "CC": CC,
"FLAGS": [ "FLAGS": [
@ -190,10 +157,11 @@ def main(ref_name):
"OT_INC=${HOME}/opt-ot/include", "OT_INC=${HOME}/opt-ot/include",
"OT_LIB=${HOME}/opt-ot/lib", "OT_LIB=${HOME}/opt-ot/lib",
"OT_RUNPATH=1", "OT_RUNPATH=1",
"USE_PCRE2=1", "USE_PCRE=1",
"USE_PCRE2_JIT=1", "USE_PCRE_JIT=1",
"USE_LUA=1", "USE_LUA=1",
"USE_OPENSSL=1", "USE_OPENSSL=1",
"USE_SYSTEMD=1",
"USE_WURFL=1", "USE_WURFL=1",
"WURFL_INC=addons/wurfl/dummy", "WURFL_INC=addons/wurfl/dummy",
"WURFL_LIB=addons/wurfl/dummy", "WURFL_LIB=addons/wurfl/dummy",
@ -221,10 +189,9 @@ def main(ref_name):
"stock", "stock",
"OPENSSL_VERSION=1.0.2u", "OPENSSL_VERSION=1.0.2u",
"OPENSSL_VERSION=1.1.1s", "OPENSSL_VERSION=1.1.1s",
"OPENSSL_VERSION=3.5.1",
"QUICTLS=yes", "QUICTLS=yes",
"WOLFSSL_VERSION=5.7.0", "WOLFSSL_VERSION=5.6.6",
"AWS_LC_VERSION=1.39.0", "AWS_LC_VERSION=1.16.0",
# "BORINGSSL=yes", # "BORINGSSL=yes",
] ]
@ -236,7 +203,8 @@ def main(ref_name):
for ssl in ssl_versions: for ssl in ssl_versions:
flags = ["USE_OPENSSL=1"] flags = ["USE_OPENSSL=1"]
skipdup=0 if ssl == "BORINGSSL=yes" or ssl == "QUICTLS=yes" or "LIBRESSL" in ssl or "WOLFSSL" in ssl or "AWS_LC" in ssl:
flags.append("USE_QUIC=1")
if "WOLFSSL" in ssl: if "WOLFSSL" in ssl:
flags.append("USE_OPENSSL_WOLFSSL=1") flags.append("USE_OPENSSL_WOLFSSL=1")
if "AWS_LC" in ssl: if "AWS_LC" in ssl:
@ -246,23 +214,8 @@ def main(ref_name):
flags.append("SSL_INC=${HOME}/opt/include") flags.append("SSL_INC=${HOME}/opt/include")
if "LIBRESSL" in ssl and "latest" in ssl: if "LIBRESSL" in ssl and "latest" in ssl:
ssl = determine_latest_libressl(ssl) ssl = determine_latest_libressl(ssl)
skipdup=1
if "OPENSSL" in ssl and "latest" in ssl: if "OPENSSL" in ssl and "latest" in ssl:
ssl = determine_latest_openssl(ssl) ssl = determine_latest_openssl(ssl)
skipdup=1
# if "latest" equals a version already in the list
if ssl in ssl_versions and skipdup == 1:
continue
openssl_supports_quic = False
try:
openssl_supports_quic = version.Version(ssl.split("OPENSSL_VERSION=",1)[1]) >= version.Version("3.5.0")
except:
pass
if ssl == "BORINGSSL=yes" or ssl == "QUICTLS=yes" or "LIBRESSL" in ssl or "WOLFSSL" in ssl or "AWS_LC" in ssl or openssl_supports_quic:
flags.append("USE_QUIC=1")
matrix.append( matrix.append(
{ {
@ -280,7 +233,7 @@ def main(ref_name):
if "haproxy-" in ref_name: if "haproxy-" in ref_name:
os = "macos-13" # stable branch os = "macos-13" # stable branch
else: else:
os = "macos-26" # development branch os = "macos-14" # development branch
TARGET = "osx" TARGET = "osx"
for CC in ["clang"]: for CC in ["clang"]:

View File

@ -1,12 +0,0 @@
name: AWS-LC-FIPS
on:
schedule:
- cron: "0 0 * * 4"
workflow_dispatch:
jobs:
test:
uses: ./.github/workflows/aws-lc-template.yml
with:
command: "from matrix import determine_latest_aws_lc_fips; print(determine_latest_aws_lc_fips(''))"

View File

@ -1,94 +0,0 @@
name: AWS-LC template
on:
workflow_call:
inputs:
command:
required: true
type: string
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- name: Determine latest AWS-LC release
id: get_aws_lc_release
run: |
result=$(cd .github && python3 -c "${{ inputs.command }}")
echo $result
echo "result=$result" >> $GITHUB_OUTPUT
- name: Cache AWS-LC
id: cache_aws_lc
uses: actions/cache@v4
with:
path: '~/opt/'
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb jose
- name: Install AWS-LC
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-vtest
- name: Run VTest for HAProxy
id: vtest
run: |
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi
- name: Show Unit-Tests results
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
run: |
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
printf "::group::"
cat $result
echo "::endgroup::"
done
exit 1

View File

@ -5,8 +5,62 @@ on:
- cron: "0 0 * * 4" - cron: "0 0 * * 4"
workflow_dispatch: workflow_dispatch:
permissions:
contents: read
jobs: jobs:
test: test:
uses: ./.github/workflows/aws-lc-template.yml runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install VTest
run: |
scripts/build-vtest.sh
- name: Determine latest AWS-LC release
id: get_aws_lc_release
run: |
result=$(cd .github && python3 -c "from matrix import determine_latest_aws_lc; print(determine_latest_aws_lc(''))")
echo $result
echo "result=$result" >> $GITHUB_OUTPUT
- name: Cache AWS-LC
id: cache_aws_lc
uses: actions/cache@v4
with: with:
command: "from matrix import determine_latest_aws_lc; print(determine_latest_aws_lc(''))" path: '~/opt/'
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
- name: Install AWS-LC
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) CC=gcc TARGET=linux-glibc \
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Install problem matcher for VTest
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy
id: vtest
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1

View File

@ -3,7 +3,6 @@ name: Spelling Check
on: on:
schedule: schedule:
- cron: "0 0 * * 2" - cron: "0 0 * * 2"
workflow_dispatch:
permissions: permissions:
contents: read contents: read
@ -11,12 +10,12 @@ permissions:
jobs: jobs:
codespell: codespell:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }} if: ${{ github.repository_owner == 'haproxy' }}
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- uses: codespell-project/codespell-problem-matcher@v1.2.0 - uses: codespell-project/codespell-problem-matcher@v1
- uses: codespell-project/actions-codespell@master - uses: codespell-project/actions-codespell@master
with: with:
skip: CHANGELOG,Makefile,*.fig,*.pem,./doc/design-thoughts,./doc/internals skip: CHANGELOG,Makefile,*.fig,*.pem,./doc/design-thoughts,./doc/internals
ignore_words_list: pres,ist,ists,hist,wan,ca,cas,que,ans,te,nd,referer,ot,uint,iif,fo,keep-alives,dosen,ifset,thrid,strack,ba,chck,hel,unx,mor,clen,collet,bu,htmp,siz,experim ignore_words_list: ist,ists,hist,wan,ca,cas,que,ans,te,nd,referer,ot,uint,iif,fo,keep-alives,dosen,ifset,thrid,strack,ba,chck,hel,unx,mor,clen,collet,bu,htmp,siz,experim
uri_ignore_words_list: trafic,ressources uri_ignore_words_list: trafic,ressources

View File

@ -11,10 +11,15 @@ permissions:
jobs: jobs:
h2spec: h2spec:
name: h2spec name: h2spec
runs-on: ubuntu-latest runs-on: ${{ matrix.os }}
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }} strategy:
matrix:
include:
- TARGET: linux-glibc
CC: gcc
os: ubuntu-latest
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: Install h2spec - name: Install h2spec
id: install-h2spec id: install-h2spec
run: | run: |
@ -23,12 +28,12 @@ jobs:
tar xvf h2spec.tar.gz tar xvf h2spec.tar.gz
sudo install -m755 h2spec /usr/local/bin/h2spec sudo install -m755 h2spec /usr/local/bin/h2spec
echo "version=${H2SPEC_VERSION}" >> $GITHUB_OUTPUT echo "version=${H2SPEC_VERSION}" >> $GITHUB_OUTPUT
- name: Compile HAProxy with gcc - name: Compile HAProxy with ${{ matrix.CC }}
run: | run: |
make -j$(nproc) all \ make -j$(nproc) all \
ERR=1 \ ERR=1 \
TARGET=linux-glibc \ TARGET=${{ matrix.TARGET }} \
CC=gcc \ CC=${{ matrix.CC }} \
DEBUG="-DDEBUG_POOL_INTEGRITY" \ DEBUG="-DDEBUG_POOL_INTEGRITY" \
USE_OPENSSL=1 USE_OPENSSL=1
sudo make install sudo make install

View File

@ -10,7 +10,7 @@ jobs:
build: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: Compile admin/halog/halog - name: Compile admin/halog/halog
run: | run: |
make admin/halog/halog make admin/halog/halog

View File

@ -15,15 +15,14 @@ permissions:
jobs: jobs:
scan: scan:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }} if: ${{ github.repository_owner == 'haproxy' }}
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: Install apt dependencies - name: Install apt dependencies
run: | run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none sudo apt-get update
sudo apt-get --no-install-recommends -y install \ sudo apt-get install -y \
liblua5.4-dev \ liblua5.3-dev \
libpcre2-dev \
libsystemd-dev libsystemd-dev
- name: Install QUICTLS - name: Install QUICTLS
run: | run: |
@ -38,7 +37,7 @@ jobs:
- name: Build with Coverity build tool - name: Build with Coverity build tool
run: | run: |
export PATH=`pwd`/coverity_tool/bin:$PATH export PATH=`pwd`/coverity_tool/bin:$PATH
cov-build --dir cov-int make CC=clang TARGET=linux-glibc USE_ZLIB=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_LUA=1 USE_OPENSSL=1 USE_QUIC=1 USE_WURFL=1 WURFL_INC=addons/wurfl/dummy WURFL_LIB=addons/wurfl/dummy USE_DEVICEATLAS=1 DEVICEATLAS_SRC=addons/deviceatlas/dummy USE_51DEGREES=1 51DEGREES_SRC=addons/51degrees/dummy/pattern ADDLIB=\"-Wl,-rpath,$HOME/opt/lib/\" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include DEBUG+=-DDEBUG_STRICT=2 DEBUG+=-DDEBUG_USE_ABORT=1 cov-build --dir cov-int make CC=clang TARGET=linux-glibc USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 USE_OPENSSL=1 USE_QUIC=1 USE_SYSTEMD=1 USE_WURFL=1 WURFL_INC=addons/wurfl/dummy WURFL_LIB=addons/wurfl/dummy USE_DEVICEATLAS=1 DEVICEATLAS_SRC=addons/deviceatlas/dummy USE_51DEGREES=1 51DEGREES_SRC=addons/51degrees/dummy/pattern ADDLIB=\"-Wl,-rpath,$HOME/opt/lib/\" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include DEBUG+=-DDEBUG_STRICT=1 DEBUG+=-DDEBUG_USE_ABORT=1
- name: Submit build result to Coverity Scan - name: Submit build result to Coverity Scan
run: | run: |
tar czvf cov.tar.gz cov-int tar czvf cov.tar.gz cov-int

View File

@ -6,7 +6,6 @@ name: Cross Compile
on: on:
schedule: schedule:
- cron: "0 0 21 * *" - cron: "0 0 21 * *"
workflow_dispatch:
permissions: permissions:
contents: read contents: read
@ -91,15 +90,15 @@ jobs:
} }
] ]
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }} if: ${{ github.repository_owner == 'haproxy' }}
steps: steps:
- name: install packages - name: install packages
run: | run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none sudo apt-get update
sudo apt-get -yq --force-yes install \ sudo apt-get -yq --force-yes install \
gcc-${{ matrix.platform.arch }} \ gcc-${{ matrix.platform.arch }} \
${{ matrix.platform.libs }} ${{ matrix.platform.libs }}
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: install quictls - name: install quictls

View File

@ -3,7 +3,6 @@ name: Fedora/Rawhide/QuicTLS
on: on:
schedule: schedule:
- cron: "0 0 25 * *" - cron: "0 0 25 * *"
workflow_dispatch:
permissions: permissions:
contents: read contents: read
@ -18,19 +17,19 @@ jobs:
{ name: x86, cc: gcc, QUICTLS_EXTRA_ARGS: "-m32 linux-generic32", ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" }, { name: x86, cc: gcc, QUICTLS_EXTRA_ARGS: "-m32 linux-generic32", ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" },
{ name: x86, cc: clang, QUICTLS_EXTRA_ARGS: "-m32 linux-generic32", ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" } { name: x86, cc: clang, QUICTLS_EXTRA_ARGS: "-m32 linux-generic32", ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" }
] ]
fail-fast: false
name: ${{ matrix.platform.cc }}.${{ matrix.platform.name }} name: ${{ matrix.platform.cc }}.${{ matrix.platform.name }}
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }} if: ${{ github.repository_owner == 'haproxy' }}
container: container:
image: fedora:rawhide image: fedora:rawhide
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: Install dependencies - name: Install dependencies
run: | run: |
dnf -y install awk diffutils git pcre-devel zlib-devel pcre2-devel 'perl(FindBin)' perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils systemd-devel clang dnf -y install diffutils git pcre-devel zlib-devel pcre2-devel 'perl(FindBin)' perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils systemd-devel clang
dnf -y install 'perl(FindBin)' 'perl(File::Compare)' perl-IPC-Cmd 'perl(File::Copy)' glibc-devel.i686 lua-devel.i686 lua-devel.x86_64 systemd-devel.i686 zlib-ng-compat-devel.i686 pcre-devel.i686 libatomic.i686 dnf -y install 'perl(FindBin)' 'perl(File::Compare)' perl-IPC-Cmd 'perl(File::Copy)' glibc-devel.i686 lua-devel.i686 lua-devel.x86_64 systemd-devel.i686 zlib-ng-compat-devel.i686 pcre-devel.i686 libatomic.i686
- uses: ./.github/actions/setup-vtest - name: Install VTest
run: scripts/build-vtest.sh
- name: Install QuicTLS - name: Install QuicTLS
run: QUICTLS=yes QUICTLS_EXTRA_ARGS="${{ matrix.platform.QUICTLS_EXTRA_ARGS }}" scripts/build-ssl.sh run: QUICTLS=yes QUICTLS_EXTRA_ARGS="${{ matrix.platform.QUICTLS_EXTRA_ARGS }}" scripts/build-ssl.sh
- name: Build contrib tools - name: Build contrib tools
@ -41,7 +40,7 @@ jobs:
make dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht make dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
- name: Compile HAProxy with ${{ matrix.platform.cc }} - name: Compile HAProxy with ${{ matrix.platform.cc }}
run: | run: |
make -j3 CC=${{ matrix.platform.cc }} V=1 ERR=1 TARGET=linux-glibc DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" USE_OPENSSL=1 USE_QUIC=1 USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 ADDLIB="${{ matrix.platform.ADDLIB_ATOMIC }} -Wl,-rpath,${HOME}/opt/lib" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include ARCH_FLAGS="${{ matrix.platform.ARCH_FLAGS }}" make -j3 CC=${{ matrix.platform.cc }} V=1 ERR=1 TARGET=linux-glibc USE_OPENSSL=1 USE_QUIC=1 USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 USE_SYSTEMD=1 ADDLIB="${{ matrix.platform.ADDLIB_ATOMIC }} -Wl,-rpath,${HOME}/opt/lib" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include ARCH_FLAGS="${{ matrix.platform.ARCH_FLAGS }}"
make install make install
- name: Show HAProxy version - name: Show HAProxy version
id: show-version id: show-version
@ -58,13 +57,9 @@ jobs:
- name: Show VTest results - name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }} if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: | run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do for folder in ${TMPDIR}/haregtests-*/vtc.*; do
printf "::group::" printf "::group::"
cat $folder/INFO cat $folder/INFO
cat $folder/LOG cat $folder/LOG
echo "::endgroup::" echo "::endgroup::"
done done
- name: Run Unit tests
id: unittests
run: |
make unit-tests

View File

@ -1,24 +0,0 @@
name: Illumos
on:
schedule:
- cron: "0 0 25 * *"
workflow_dispatch:
jobs:
gcc:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
permissions:
contents: read
steps:
- name: "Checkout repository"
uses: actions/checkout@v5
- name: "Build on VM"
uses: vmactions/solaris-vm@v1
with:
prepare: |
pkg install gcc make
run: |
gmake CC=gcc TARGET=solaris USE_OPENSSL=1 USE_PROMEX=1

View File

@ -20,13 +20,13 @@ jobs:
run: | run: |
ulimit -c unlimited ulimit -c unlimited
echo '/tmp/core/core.%h.%e.%t' > /proc/sys/kernel/core_pattern echo '/tmp/core/core.%h.%e.%t' > /proc/sys/kernel/core_pattern
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: Install dependencies - name: Install dependencies
run: apk add gcc gdb make tar git python3 libc-dev linux-headers pcre-dev pcre2-dev openssl-dev lua5.3-dev grep socat curl musl-dbg lua5.3-dbg jose run: apk add gcc gdb make tar git python3 libc-dev linux-headers pcre-dev pcre2-dev openssl-dev lua5.3-dev grep socat curl musl-dbg lua5.3-dbg
- name: Install VTest - name: Install VTest
run: scripts/build-vtest.sh run: scripts/build-vtest.sh
- name: Build - name: Build
run: make -j$(nproc) TARGET=linux-musl DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" ARCH_FLAGS='-ggdb3' CC=cc V=1 USE_LUA=1 LUA_INC=/usr/include/lua5.3 LUA_LIB=/usr/lib/lua5.3 USE_OPENSSL=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_PROMEX=1 run: make -j$(nproc) TARGET=linux-musl ARCH_FLAGS='-ggdb3' CC=cc V=1 USE_LUA=1 LUA_INC=/usr/include/lua5.3 LUA_LIB=/usr/lib/lua5.3 USE_OPENSSL=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_PROMEX=1
- name: Show version - name: Show version
run: ./haproxy -vv run: ./haproxy -vv
- name: Show linked libraries - name: Show linked libraries
@ -37,10 +37,6 @@ jobs:
- name: Run VTest - name: Run VTest
id: vtest id: vtest
run: make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel run: make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show coredumps - name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }} if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: | run: |
@ -64,13 +60,3 @@ jobs:
cat $folder/LOG cat $folder/LOG
echo "::endgroup::" echo "::endgroup::"
done done
- name: Show Unit-Tests results
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
run: |
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
printf "::group::"
cat $result
echo "::endgroup::"
done
exit 1

View File

@ -3,17 +3,15 @@ name: NetBSD
on: on:
schedule: schedule:
- cron: "0 0 25 * *" - cron: "0 0 25 * *"
workflow_dispatch:
jobs: jobs:
gcc: gcc:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
permissions: permissions:
contents: read contents: read
steps: steps:
- name: "Checkout repository" - name: "Checkout repository"
uses: actions/checkout@v5 uses: actions/checkout@v4
- name: "Build on VM" - name: "Build on VM"
uses: vmactions/netbsd-vm@v1 uses: vmactions/netbsd-vm@v1
@ -21,4 +19,4 @@ jobs:
prepare: | prepare: |
/usr/sbin/pkg_add gmake curl /usr/sbin/pkg_add gmake curl
run: | run: |
gmake CC=gcc TARGET=netbsd ERR=1 USE_OPENSSL=1 USE_LUA=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_PROMEX=1 USE_ZLIB=1 gmake CC=gcc TARGET=netbsd USE_OPENSSL=1 USE_LUA=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_PROMEX=1 USE_ZLIB=1

View File

@ -1,82 +0,0 @@
name: openssl ECH
on:
schedule:
- cron: "0 3 * * *"
workflow_dispatch:
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- name: Install VTest
run: |
scripts/build-vtest.sh
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb
sudo apt-get --no-install-recommends -y install libpsl-dev
- name: Install OpenSSL+ECH
run: env OPENSSL_VERSION="git-feature/ech" GIT_TYPE="branch" scripts/build-ssl.sh
- name: Install curl+ECH
run: env SSL_LIB=${HOME}/opt/ scripts/build-curl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) CC=gcc TARGET=linux-glibc \
USE_QUIC=1 USE_OPENSSL=1 USE_ECH=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
ARCH_FLAGS="-ggdb3 -fsanitize=address"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Install problem matcher for VTest
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy
id: vtest
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
# allow to catch coredumps
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi

View File

@ -1,77 +0,0 @@
name: openssl master
on:
schedule:
- cron: "0 3 * * *"
workflow_dispatch:
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb
sudo apt-get --no-install-recommends -y install libpsl-dev
- uses: ./.github/actions/setup-vtest
- name: Install OpenSSL master
run: env OPENSSL_VERSION="git-master" GIT_TYPE="branch" scripts/build-ssl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_QUIC=1 USE_OPENSSL=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Install problem matcher for VTest
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy
id: vtest
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
# allow to catch coredumps
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi

View File

@ -0,0 +1,33 @@
#
# special purpose CI: test against OpenSSL built in "no-deprecated" mode
# let us run those builds weekly
#
# for example, OpenWRT uses such OpenSSL builds (those builds are smaller)
#
#
# some details might be found at NL: https://www.mail-archive.com/haproxy@formilux.org/msg35759.html
# GH: https://github.com/haproxy/haproxy/issues/367
name: openssl no-deprecated
on:
schedule:
- cron: "0 0 * * 4"
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install VTest
run: |
scripts/build-vtest.sh
- name: Compile HAProxy
run: |
make DEFINE="-DOPENSSL_API_COMPAT=0x10100000L -DOPENSSL_NO_DEPRECATED" -j3 CC=gcc ERR=1 TARGET=linux-glibc USE_OPENSSL=1
- name: Run VTest
run: |
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel

View File

@ -1,104 +0,0 @@
#
# goodput,crosstraffic are not run on purpose, those tests are intended to bandwidth measurement, we currently do not want to use GitHub runners for that
#
name: QUIC Interop AWS-LC
on:
workflow_dispatch:
schedule:
- cron: "0 0 * * 2"
jobs:
build:
runs-on: ubuntu-24.04
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
permissions:
contents: read
packages: write
steps:
- uses: actions/checkout@v5
- name: Log in to the Container registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Docker image
id: push
uses: docker/build-push-action@v5
with:
context: https://github.com/haproxytech/haproxy-qns.git
push: true
build-args: |
SSLLIB=AWS-LC
tags: ghcr.io/${{ github.repository }}:aws-lc
- name: Cleanup registry
uses: actions/delete-package-versions@v5
with:
owner: ${{ github.repository_owner }}
package-name: 'haproxy'
package-type: container
min-versions-to-keep: 1
delete-only-untagged-versions: 'true'
run:
needs: build
strategy:
matrix:
suite: [
{ client: chrome, tests: "http3" },
{ client: picoquic, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" },
{ client: quic-go, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" },
{ client: ngtcp2, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" }
]
fail-fast: false
name: ${{ matrix.suite.client }}
runs-on: ubuntu-24.04
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- name: Log in to the Container registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Install tshark
run: |
sudo apt-get update
sudo apt-get -y install tshark
- name: Pull image
run: |
docker pull ghcr.io/${{ github.repository }}:aws-lc
- name: Run
run: |
git clone https://github.com/quic-interop/quic-interop-runner
cd quic-interop-runner
pip install -r requirements.txt --break-system-packages
python run.py -j result.json -l logs -r haproxy=ghcr.io/${{ github.repository }}:aws-lc -t ${{ matrix.suite.tests }} -c ${{ matrix.suite.client }} -s haproxy
- name: Delete succeeded logs
if: failure()
run: |
cd quic-interop-runner/logs/haproxy_${{ matrix.suite.client }}
cat ../../result.json | jq -r '.results[][] | select(.result=="succeeded") | .name' | xargs rm -rf
- name: Logs upload
if: failure()
uses: actions/upload-artifact@v4
with:
name: logs-${{ matrix.suite.client }}
path: quic-interop-runner/logs/
retention-days: 6

View File

@ -1,102 +0,0 @@
#
# goodput,crosstraffic are not run on purpose, those tests are intended to bandwidth measurement, we currently do not want to use GitHub runners for that
#
name: QUIC Interop LibreSSL
on:
workflow_dispatch:
schedule:
- cron: "0 0 * * 2"
jobs:
build:
runs-on: ubuntu-24.04
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
permissions:
contents: read
packages: write
steps:
- uses: actions/checkout@v5
- name: Log in to the Container registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Docker image
id: push
uses: docker/build-push-action@v5
with:
context: https://github.com/haproxytech/haproxy-qns.git
push: true
build-args: |
SSLLIB=LibreSSL
tags: ghcr.io/${{ github.repository }}:libressl
- name: Cleanup registry
uses: actions/delete-package-versions@v5
with:
owner: ${{ github.repository_owner }}
package-name: 'haproxy'
package-type: container
min-versions-to-keep: 1
delete-only-untagged-versions: 'true'
run:
needs: build
strategy:
matrix:
suite: [
{ client: picoquic, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,http3,blackhole,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,v2" },
{ client: quic-go, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,http3,blackhole,amplificationlimit,transferloss,transfercorruption,v2" }
]
fail-fast: false
name: ${{ matrix.suite.client }}
runs-on: ubuntu-24.04
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- name: Log in to the Container registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Install tshark
run: |
sudo apt-get update
sudo apt-get -y install tshark
- name: Pull image
run: |
docker pull ghcr.io/${{ github.repository }}:libressl
- name: Run
run: |
git clone https://github.com/quic-interop/quic-interop-runner
cd quic-interop-runner
pip install -r requirements.txt --break-system-packages
python run.py -j result.json -l logs -r haproxy=ghcr.io/${{ github.repository }}:libressl -t ${{ matrix.suite.tests }} -c ${{ matrix.suite.client }} -s haproxy
- name: Delete succeeded logs
if: failure()
run: |
cd quic-interop-runner/logs/haproxy_${{ matrix.suite.client }}
cat ../../result.json | jq -r '.results[][] | select(.result=="succeeded") | .name' | xargs rm -rf
- name: Logs upload
if: failure()
uses: actions/upload-artifact@v4
with:
name: logs-${{ matrix.suite.client }}
path: quic-interop-runner/logs/
retention-days: 6

View File

@ -1,74 +0,0 @@
#
# weekly run against modern QuicTLS branch, i.e. https://github.com/quictls/quictls
#
name: QuicTLS
on:
schedule:
- cron: "0 0 * * 4"
workflow_dispatch:
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb
- name: Install QuicTLS
run: env QUICTLS=yes QUICTLS_URL=https://github.com/quictls/quictls scripts/build-ssl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_QUIC=1 USE_OPENSSL=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
ARCH_FLAGS="-ggdb3 -fsanitize=address"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-vtest
- name: Run VTest for HAProxy
id: vtest
run: |
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi

View File

@ -23,7 +23,7 @@ jobs:
outputs: outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }} matrix: ${{ steps.set-matrix.outputs.matrix }}
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: Generate Build Matrix - name: Generate Build Matrix
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@ -44,10 +44,16 @@ jobs:
TMPDIR: /tmp TMPDIR: /tmp
OT_CPP_VERSION: 1.6.0 OT_CPP_VERSION: 1.6.0
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
with: with:
fetch-depth: 100 fetch-depth: 100
- name: Setup coredumps
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
run: |
sudo sysctl -w fs.suid_dumpable=1
sudo sysctl kernel.core_pattern=/tmp/core.%h.%e.%t
# #
# Github Action cache key cannot contain comma, so we calculate it based on job name # Github Action cache key cannot contain comma, so we calculate it based on job name
# #
@ -70,24 +76,26 @@ jobs:
uses: actions/cache@v4 uses: actions/cache@v4
with: with:
path: '~/opt-ot/' path: '~/opt-ot/'
key: ${{ matrix.os }}-ot-${{ matrix.CC }}-${{ env.OT_CPP_VERSION }}-${{ contains(matrix.name, 'ASAN') }} key: ot-${{ matrix.CC }}-${{ env.OT_CPP_VERSION }}-${{ contains(matrix.name, 'ASAN') }}
- name: Install apt dependencies - name: Install apt dependencies
if: ${{ startsWith(matrix.os, 'ubuntu-') }} if: ${{ startsWith(matrix.os, 'ubuntu-') }}
run: | run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none sudo apt-get update
sudo apt-get --no-install-recommends -y install \ sudo apt-get install -y \
${{ contains(matrix.FLAGS, 'USE_LUA=1') && 'liblua5.4-dev' || '' }} \ liblua5.3-dev \
${{ contains(matrix.FLAGS, 'USE_PCRE2=1') && 'libpcre2-dev' || '' }} \ libpcre2-dev \
${{ contains(matrix.ssl, 'BORINGSSL=yes') && 'ninja-build' || '' }} \ libsystemd-dev \
ninja-build \
socat \ socat \
gdb \ gdb
jose
- name: Install brew dependencies - name: Install brew dependencies
if: ${{ startsWith(matrix.os, 'macos-') }} if: ${{ startsWith(matrix.os, 'macos-') }}
run: | run: |
brew install socat brew install socat
brew install lua brew install lua
- uses: ./.github/actions/setup-vtest - name: Install VTest
run: |
scripts/build-vtest.sh
- name: Install SSL ${{ matrix.ssl }} - name: Install SSL ${{ matrix.ssl }}
if: ${{ matrix.ssl && matrix.ssl != 'stock' && steps.cache_ssl.outputs.cache-hit != 'true' }} if: ${{ matrix.ssl && matrix.ssl != 'stock' && steps.cache_ssl.outputs.cache-hit != 'true' }}
run: env ${{ matrix.ssl }} scripts/build-ssl.sh run: env ${{ matrix.ssl }} scripts/build-ssl.sh
@ -110,19 +118,10 @@ jobs:
ERR=1 \ ERR=1 \
TARGET=${{ matrix.TARGET }} \ TARGET=${{ matrix.TARGET }} \
CC=${{ matrix.CC }} \ CC=${{ matrix.CC }} \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \ DEBUG="-DDEBUG_POOL_INTEGRITY" \
${{ join(matrix.FLAGS, ' ') }} \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install-bin
- name: Compile admin/halog/halog
run: |
make -j$(nproc) admin/halog/halog \
ERR=1 \
TARGET=${{ matrix.TARGET }} \
CC=${{ matrix.CC }} \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
${{ join(matrix.FLAGS, ' ') }} \ ${{ join(matrix.FLAGS, ' ') }} \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install
- name: Show HAProxy version - name: Show HAProxy version
id: show-version id: show-version
run: | run: |
@ -137,33 +136,45 @@ jobs:
echo "::endgroup::" echo "::endgroup::"
haproxy -vv haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Install problem matcher for VTest
# This allows one to more easily see which tests fail.
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }} - name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
id: vtest id: vtest
env:
# Force ASAN output into asan.log to make the output more readable.
ASAN_OPTIONS: log_path=asan.log
run: | run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Config syntax check memleak smoke testing
if: ${{ contains(matrix.name, 'ASAN') }}
run: |
./haproxy -dI -f .github/h2spec.config -c
./haproxy -dI -f examples/content-sw-sample.cfg -c
./haproxy -dI -f examples/option-http_proxy.cfg -c
./haproxy -dI -f examples/quick-test.cfg -c
./haproxy -dI -f examples/transparent_proxy.cfg -c
- name: Show VTest results - name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }} if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: | run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do for folder in ${TMPDIR}/haregtests-*/vtc.*; do
printf "::group::" printf "::group::"
cat $folder/INFO cat $folder/INFO
cat $folder/LOG cat $folder/LOG
echo "::endgroup::" echo "::endgroup::"
done done
exit 1 shopt -s nullglob
- name: Run Unit tests for asan in asan.log*; do
id: unittests echo "::group::$asan"
run: | cat $asan
make unit-tests
- name: Show Unit-Tests results
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
run: |
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
printf "::group::"
cat $result
echo "::endgroup::" echo "::endgroup::"
done done
exit 1 exit 1
- name: Show coredumps - name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }} if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: | run: |

View File

@ -35,7 +35,7 @@ jobs:
- USE_THREAD=1 - USE_THREAD=1
- USE_ZLIB=1 - USE_ZLIB=1
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- uses: msys2/setup-msys2@v2 - uses: msys2/setup-msys2@v2
with: with:
install: >- install: >-

View File

@ -1,80 +0,0 @@
name: WolfSSL
on:
schedule:
- cron: "0 0 * * 4"
workflow_dispatch:
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb jose
- name: Install WolfSSL
run: env WOLFSSL_VERSION=git-master WOLFSSL_DEBUG=1 scripts/build-ssl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_OPENSSL_WOLFSSL=1 USE_QUIC=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
ARCH_FLAGS="-ggdb3 -fsanitize=address"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-vtest
- name: Run VTest for HAProxy
id: vtest
run: |
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi
- name: Show Unit-Tests results
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
run: |
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
printf "::group::"
cat $result
echo "::endgroup::"
done
exit 1

1
.gitignore vendored
View File

@ -57,4 +57,3 @@ dev/udp/udp-perturb
/src/dlmalloc.c /src/dlmalloc.c
/tests/test_hashes /tests/test_hashes
doc/lua-api/_build doc/lua-api/_build
dev/term_events/term_events

View File

@ -8,7 +8,7 @@ branches:
env: env:
global: global:
- FLAGS="USE_LUA=1 USE_OPENSSL=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_ZLIB=1" - FLAGS="USE_LUA=1 USE_OPENSSL=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_SYSTEMD=1 USE_ZLIB=1"
- TMPDIR=/tmp - TMPDIR=/tmp
addons: addons:

View File

@ -171,17 +171,7 @@ feedback for developers:
as the previous releases that had 6 months to stabilize. In terms of as the previous releases that had 6 months to stabilize. In terms of
stability it really means that the point zero version already accumulated stability it really means that the point zero version already accumulated
6 months of fixes and that it is much safer to use even just after it is 6 months of fixes and that it is much safer to use even just after it is
released. There is one exception though, features marked as "experimental" released.
are not guaranteed to be maintained beyond the release of the next LTS
branch. The rationale here is that the experimental status is made to
expose an early preview of a feature, that is often incomplete, not always
in its definitive form regarding configuration, and for which developers
are seeking feedback from the users. It is even possible that changes will
be brought within the stable branch and it may happen that the feature
breaks. It is not imaginable to always be able to backport bug fixes too
far in this context since the code and configuration may change quite a
bit. Users who want to try experimental features are expected to upgrade
quickly to benefit from the improvements made to that feature.
- for developers, given that the odd versions are solely used by highly - for developers, given that the odd versions are solely used by highly
skilled users, it's easier to get advanced traces and captures, and there skilled users, it's easier to get advanced traces and captures, and there

3832
CHANGELOG

File diff suppressed because it is too large Load Diff

View File

@ -1010,7 +1010,7 @@ you notice you're already practising some of them:
- continue to send pull requests after having been explained why they are not - continue to send pull requests after having been explained why they are not
welcome. welcome.
- give wrong advice to people asking for help, or sending them patches to - give wrong advices to people asking for help, or sending them patches to
try which make no sense, waste their time, and give them a bad impression try which make no sense, waste their time, and give them a bad impression
of the people working on the project. of the people working on the project.

120
INSTALL
View File

@ -9,7 +9,7 @@ used to follow updates then it is recommended that instead you use the packages
provided by your software vendor or Linux distribution. Most of them are taking provided by your software vendor or Linux distribution. Most of them are taking
this task seriously and are doing a good job at backporting important fixes. this task seriously and are doing a good job at backporting important fixes.
If for any reason you would prefer a different version than the one packaged If for any reason you'd prefer to use a different version than the one packaged
for your system, you want to be certain to have all the fixes or to get some for your system, you want to be certain to have all the fixes or to get some
commercial support, other choices are available at http://www.haproxy.com/. commercial support, other choices are available at http://www.haproxy.com/.
@ -34,26 +34,18 @@ are a few build examples :
- recent Linux system with all options, make and install : - recent Linux system with all options, make and install :
$ make clean $ make clean
$ make -j $(nproc) TARGET=linux-glibc \ $ make -j $(nproc) TARGET=linux-glibc \
USE_OPENSSL=1 USE_QUIC=1 USE_QUIC_OPENSSL_COMPAT=1 \ USE_OPENSSL=1 USE_LUA=1 USE_PCRE2=1 USE_SYSTEMD=1
USE_LUA=1 USE_PCRE2=1
$ sudo make install $ sudo make install
- FreeBSD + OpenSSL, build with all options : - FreeBSD and OpenBSD, build with all options :
$ gmake -j $(sysctl -n hw.ncpu) TARGET=freebsd \ $ gmake -j 4 TARGET=freebsd USE_OPENSSL=1 USE_LUA=1 USE_PCRE2=1
USE_OPENSSL=1 USE_QUIC=1 USE_QUIC_OPENSSL_COMPAT=1 \
USE_LUA=1 USE_PCRE2=1
- OpenBSD + LibreSSL, build with all options :
$ gmake -j $(sysctl -n hw.ncpu) TARGET=openbsd \
USE_OPENSSL=1 USE_QUIC=1 USE_LUA=1 USE_PCRE2=1
- embedded Linux, build using a cross-compiler : - embedded Linux, build using a cross-compiler :
$ make -j $(nproc) TARGET=linux-glibc USE_OPENSSL=1 USE_PCRE2=1 \ $ make -j $(nproc) TARGET=linux-glibc USE_OPENSSL=1 USE_PCRE2=1 \
CC=/opt/cross/gcc730-arm/bin/gcc CFLAGS="-mthumb" ADDLIB=-latomic CC=/opt/cross/gcc730-arm/bin/gcc ADDLIB=-latomic
- Build with static PCRE on Solaris / UltraSPARC : - Build with static PCRE on Solaris / UltraSPARC :
$ make -j $(/usr/sbin/psrinfo -p) TARGET=solaris \ $ make TARGET=solaris CPU_CFLAGS="-mcpu=v9" USE_STATIC_PCRE2=1
CPU_CFLAGS="-mcpu=v9" USE_STATIC_PCRE2=1
For more advanced build options or if a command above reports an error, please For more advanced build options or if a command above reports an error, please
read the following sections. read the following sections.
@ -111,22 +103,20 @@ HAProxy requires a working GCC or Clang toolchain and GNU make :
may want to retry with "gmake" which is the name commonly used for GNU make may want to retry with "gmake" which is the name commonly used for GNU make
on BSD systems. on BSD systems.
- GCC >= 4.7 (up to 15 tested). Older versions are no longer supported due to - GCC >= 4.2 (up to 13 tested). Older versions can be made to work with a
the latest mt_list update which only uses c11-like atomics. Newer versions few minor adaptations if really needed. Newer versions may sometimes break
may sometimes break due to compiler regressions or behaviour changes. The due to compiler regressions or behaviour changes. The version shipped with
version shipped with your operating system is very likely to work with no your operating system is very likely to work with no trouble. Clang >= 3.0
trouble. Clang >= 3.0 is also known to work as an alternative solution, and is also known to work as an alternative solution. Recent versions may emit
versions up to 19 were successfully tested. Recent versions may emit a bit a bit more warnings that are worth reporting as they may reveal real bugs.
more warnings that are worth reporting as they may reveal real bugs. TCC TCC (https://repo.or.cz/tinycc.git) is also usable for developers but will
(https://repo.or.cz/tinycc.git) is also usable for developers but will not not support threading and was found at least once to produce bad code in
support threading and was found at least once to produce bad code in some some rare corner cases (since fixed). But it builds extremely quickly
rare corner cases (since fixed). But it builds extremely quickly (typically (typically half a second for the whole project) and is very convenient to
half a second for the whole project) and is very convenient to run quick run quick tests during API changes or code refactoring.
tests during API changes or code refactoring.
- GNU ld (binutils package), with no particular version. Other linkers might - GNU ld (binutils package), with no particular version. Other linkers might
work but were not tested. The default one from your operating system will work but were not tested.
normally work.
On debian or Ubuntu systems and their derivatives, you may get all these tools On debian or Ubuntu systems and their derivatives, you may get all these tools
at once by issuing the two following commands : at once by issuing the two following commands :
@ -237,7 +227,7 @@ to forcefully enable it using "USE_LIBCRYPT=1".
----------------- -----------------
For SSL/TLS, it is necessary to use a cryptography library. HAProxy currently For SSL/TLS, it is necessary to use a cryptography library. HAProxy currently
supports the OpenSSL library, and is known to build and work with branches supports the OpenSSL library, and is known to build and work with branches
1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, and 3.0 to 3.6. It is recommended to use 1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, 3.0, 3.1 and 3.2. It is recommended to use
at least OpenSSL 1.1.1 to have support for all SSL keywords and configuration at least OpenSSL 1.1.1 to have support for all SSL keywords and configuration
in HAProxy. OpenSSL follows a long-term support cycle similar to HAProxy's, in HAProxy. OpenSSL follows a long-term support cycle similar to HAProxy's,
and each of the branches above receives its own fixes, without forcing you to and each of the branches above receives its own fixes, without forcing you to
@ -254,20 +244,16 @@ https://github.com/openssl/openssl/issues/17627). If a migration to 3.x is
mandated by support reasons, at least 3.1 recovers a small fraction of this mandated by support reasons, at least 3.1 recovers a small fraction of this
important loss. important loss.
Three OpenSSL derivatives called LibreSSL, QUICTLS, and AWS-LC are Four OpenSSL derivatives called LibreSSL, BoringSSL, QUICTLS, and AWS-LC are
reported to work as well. While there are some efforts from the community to reported to work as well. While there are some efforts from the community to
ensure they work well, OpenSSL remains the primary target and this means that ensure they work well, OpenSSL remains the primary target and this means that
in case of conflicting choices, OpenSSL support will be favored over other in case of conflicting choices, OpenSSL support will be favored over other
options. Note that QUIC is not fully supported when haproxy is built with options. Note that QUIC is not fully supported when haproxy is built with
OpenSSL < 3.5.2 version. In this case, QUICTLS or AWS-LC are the preferred OpenSSL. In this case, QUICTLS is the preferred alternative. As of writing
alternatives. As of writing this, the QuicTLS project follows OpenSSL very this, the QuicTLS project follows OpenSSL very closely and provides update
closely and provides update simultaneously, but being a volunteer-driven simultaneously, but being a volunteer-driven project, its long-term future does
project, its long-term future does not look certain enough to convince not look certain enough to convince operating systems to package it, so it
operating systems to package it, so it needs to be build locally. Recent needs to be build locally. See the section about QUIC in this document.
versions of AWS-LC (>= 1.22 and the FIPS branches) are pretty complete and
generally more performant than other OpenSSL derivatives, but may behave
slightly differently, particularly when dealing with outdated setups. See
the section about QUIC in this document.
A fifth option is wolfSSL (https://github.com/wolfSSL/wolfssl). It is the only A fifth option is wolfSSL (https://github.com/wolfSSL/wolfssl). It is the only
supported alternative stack not based on OpenSSL, yet which implements almost supported alternative stack not based on OpenSSL, yet which implements almost
@ -326,7 +312,7 @@ command line, for example:
$ make -j $(nproc) TARGET=generic USE_OPENSSL_WOLFSSL=1 USE_QUIC=1 \ $ make -j $(nproc) TARGET=generic USE_OPENSSL_WOLFSSL=1 USE_QUIC=1 \
SSL_INC=/opt/wolfssl-5.6.6/include SSL_LIB=/opt/wolfssl-5.6.6/lib SSL_INC=/opt/wolfssl-5.6.6/include SSL_LIB=/opt/wolfssl-5.6.6/lib
To use HAProxy with AWS-LC you must have version v1.22.0 or newer of AWS-LC To use HAProxy with AWS-LC you must have version v1.13.0 or newer of AWS-LC
built and installed locally. built and installed locally.
$ cd ~/build/aws-lc $ cd ~/build/aws-lc
$ cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/opt/aws-lc $ cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/opt/aws-lc
@ -389,15 +375,10 @@ systems, by passing "USE_SLZ=" to the "make" command.
Please note that SLZ will benefit from some CPU-specific instructions like the Please note that SLZ will benefit from some CPU-specific instructions like the
availability of the CRC32 extension on some ARM processors. Thus it can further availability of the CRC32 extension on some ARM processors. Thus it can further
improve its performance to build with: improve its performance to build with "CPU=native" on the target system, or
"CPU=armv81" (modern systems such as Graviton2 or A55/A75 and beyond),
- "CPU_CFLAGS=-march=native" on the target system or "CPU=a72" (e.g. for RPi4, or AWS Graviton), "CPU=a53" (e.g. for RPi3), or
- "CPU_CFLAGS=-march=armv81" on modern systems such as Graviton2 or A55/A75 "CPU=armv8-auto" (automatic detection with minor runtime penalty).
and beyond)
- "CPU_CFLAGS=-march=a72" (e.g. for RPi4, or AWS Graviton)
- "CPU_CFLAGS=-march=a53" (e.g. for RPi3)
- "CPU_CFLAGS=-march=armv8-auto" automatic detection with minor runtime
penalty)
A second option involves the widely known zlib library, which is very likely A second option involves the widely known zlib library, which is very likely
installed on your system. In order to use zlib, simply pass "USE_ZLIB=1" to the installed on your system. In order to use zlib, simply pass "USE_ZLIB=1" to the
@ -471,6 +452,12 @@ are the extra libraries that may be referenced at build time :
on Linux. It is automatically detected and may be disabled on Linux. It is automatically detected and may be disabled
using "USE_DL=", though it should never harm. using "USE_DL=", though it should never harm.
- USE_SYSTEMD=1 enables support for the sdnotify features of systemd,
allowing better integration with systemd on Linux systems
which come with it. It is never enabled by default so there
is no need to disable it.
4.10) Common errors 4.10) Common errors
------------------- -------------------
Some build errors may happen depending on the options combinations or the Some build errors may happen depending on the options combinations or the
@ -494,8 +481,8 @@ target. Common issues may include:
other supported compatible library. other supported compatible library.
- many "dereferencing pointer 'sa.985' does break strict-aliasing rules" - many "dereferencing pointer 'sa.985' does break strict-aliasing rules"
=> these warnings happen on old compilers (typically gcc before 7.x), => these warnings happen on old compilers (typically gcc-4.4), and may
and may safely be ignored; newer ones are better on these. safely be ignored; newer ones are better on these.
4.11) QUIC 4.11) QUIC
@ -504,11 +491,10 @@ QUIC is the new transport layer protocol and is required for HTTP/3. This
protocol stack is currently supported as an experimental feature in haproxy on protocol stack is currently supported as an experimental feature in haproxy on
the frontend side. In order to enable it, use "USE_QUIC=1 USE_OPENSSL=1". the frontend side. In order to enable it, use "USE_QUIC=1 USE_OPENSSL=1".
Note that QUIC is not always fully supported by the OpenSSL library depending on Note that QUIC is not fully supported by the OpenSSL library. Indeed QUIC 0-RTT
its version. Indeed QUIC 0-RTT cannot be supported by OpenSSL for versions before cannot be supported by OpenSSL contrary to others libraries with full QUIC
3.5 contrary to others libraries with full QUIC support. The preferred option is support. The preferred option is to use QUICTLS. This is a fork of OpenSSL with
to use QUICTLS. This is a fork of OpenSSL with a QUIC-compatible API. Its a QUIC-compatible API. Its repository is available at this location:
repository is available at this location:
https://github.com/quictls/openssl https://github.com/quictls/openssl
@ -536,18 +522,14 @@ way assuming that wolfSSL was installed in /opt/wolfssl-5.6.0 as shown in 4.5:
SSL_INC=/opt/wolfssl-5.6.0/include SSL_LIB=/opt/wolfssl-5.6.0/lib SSL_INC=/opt/wolfssl-5.6.0/include SSL_LIB=/opt/wolfssl-5.6.0/lib
LDFLAGS="-Wl,-rpath,/opt/wolfssl-5.6.0/lib" LDFLAGS="-Wl,-rpath,/opt/wolfssl-5.6.0/lib"
As last resort, haproxy may be compiled against OpenSSL as follows from 3.5 As last resort, haproxy may be compiled against OpenSSL as follows:
version with 0-RTT support:
$ make TARGET=generic USE_OPENSSL=1 USE_QUIC=1
or as follows for all OpenSSL versions but without O-RTT support:
$ make TARGET=generic USE_OPENSSL=1 USE_QUIC=1 USE_QUIC_OPENSSL_COMPAT=1 $ make TARGET=generic USE_OPENSSL=1 USE_QUIC=1 USE_QUIC_OPENSSL_COMPAT=1
In addition to this requirements, the QUIC listener bindings must be explicitly Note that QUIC 0-RTT is not supported by haproxy QUIC stack when built against
enabled with a specific QUIC tuning parameter. (see "limited-quic" global OpenSSL. In addition to this compilation requirements, the QUIC listener
parameter of haproxy Configuration Manual). bindings must be explicitly enabled with a specific QUIC tuning parameter.
(see "limited-quic" global parameter of haproxy Configuration Manual).
5) How to build HAProxy 5) How to build HAProxy
@ -563,9 +545,9 @@ It goes into more details with the main options.
To build haproxy, you have to choose your target OS amongst the following ones To build haproxy, you have to choose your target OS amongst the following ones
and assign it to the TARGET variable : and assign it to the TARGET variable :
- linux-glibc for Linux kernel 4.17 and above - linux-glibc for Linux kernel 2.6.28 and above
- linux-glibc-legacy for Linux kernel 2.6.28 and above without new features - linux-glibc-legacy for Linux kernel 2.6.28 and above without new features
- linux-musl for Linux kernel 4.17 and above with musl libc - linux-musl for Linux kernel 2.6.28 and above with musl libc
- solaris for Solaris 10 and above - solaris for Solaris 10 and above
- freebsd for FreeBSD 10 and above - freebsd for FreeBSD 10 and above
- dragonfly for DragonFlyBSD 4.3 and above - dragonfly for DragonFlyBSD 4.3 and above
@ -765,8 +747,8 @@ forced to produce final binaries, and must not be used during bisect sessions,
as it will often lead to the wrong commit. as it will often lead to the wrong commit.
Examples: Examples:
# silence strict-aliasing warnings with old gcc-5.5: # silence strict-aliasing warnings with old gcc-4.4:
$ make -j$(nproc) TARGET=linux-glibc CC=gcc-55 CFLAGS=-fno-strict-aliasing $ make -j$(nproc) TARGET=linux-glibc CC=gcc-44 CFLAGS=-fno-strict-aliasing
# disable all warning options: # disable all warning options:
$ make -j$(nproc) TARGET=linux-glibc CC=mycc WARN_CFLAGS= NOWARN_CFLAGS= $ make -j$(nproc) TARGET=linux-glibc CC=mycc WARN_CFLAGS= NOWARN_CFLAGS=

View File

@ -138,7 +138,7 @@ ScientiaMobile WURFL Device Detection
Maintainer: Paul Borile, Massimiliano Bellomi <wurfl-haproxy-support@scientiamobile.com> Maintainer: Paul Borile, Massimiliano Bellomi <wurfl-haproxy-support@scientiamobile.com>
Files: addons/wurfl, doc/WURFL-device-detection.txt Files: addons/wurfl, doc/WURFL-device-detection.txt
SPOE SPOE (deprecated)
Maintainer: Christopher Faulet <cfaulet@haproxy.com> Maintainer: Christopher Faulet <cfaulet@haproxy.com>
Files: src/flt_spoe.c, include/haproxy/spoe*.h, doc/SPOE.txt Files: src/flt_spoe.c, include/haproxy/spoe*.h, doc/SPOE.txt

203
Makefile
View File

@ -35,7 +35,6 @@
# USE_OPENSSL : enable use of OpenSSL. Recommended, but see below. # USE_OPENSSL : enable use of OpenSSL. Recommended, but see below.
# USE_OPENSSL_AWSLC : enable use of AWS-LC # USE_OPENSSL_AWSLC : enable use of AWS-LC
# USE_OPENSSL_WOLFSSL : enable use of wolfSSL with the OpenSSL API # USE_OPENSSL_WOLFSSL : enable use of wolfSSL with the OpenSSL API
# USE_ECH : enable use of ECH with the OpenSSL API
# USE_QUIC : enable use of QUIC with the quictls API (quictls, libressl, boringssl) # USE_QUIC : enable use of QUIC with the quictls API (quictls, libressl, boringssl)
# USE_QUIC_OPENSSL_COMPAT : enable use of QUIC with the standard openssl API (limited features) # USE_QUIC_OPENSSL_COMPAT : enable use of QUIC with the standard openssl API (limited features)
# USE_ENGINE : enable use of OpenSSL Engine. # USE_ENGINE : enable use of OpenSSL Engine.
@ -57,14 +56,14 @@
# USE_DEVICEATLAS : enable DeviceAtlas api. # USE_DEVICEATLAS : enable DeviceAtlas api.
# USE_51DEGREES : enable third party device detection library from 51Degrees # USE_51DEGREES : enable third party device detection library from 51Degrees
# USE_WURFL : enable WURFL detection library from Scientiamobile # USE_WURFL : enable WURFL detection library from Scientiamobile
# USE_SYSTEMD : enable sd_notify() support.
# USE_OBSOLETE_LINKER : use when the linker fails to emit __start_init/__stop_init # USE_OBSOLETE_LINKER : use when the linker fails to emit __start_init/__stop_init
# USE_THREAD_DUMP : use the more advanced thread state dump system. Automatic. # USE_THREAD_DUMP : use the more advanced thread state dump system. Automatic.
# USE_OT : enable the OpenTracing filter # USE_OT : enable the OpenTracing filter
# USE_MEMORY_PROFILING : enable the memory profiler. Linux-glibc only. # USE_MEMORY_PROFILING : enable the memory profiler. Linux-glibc only.
# USE_LIBATOMIC : force to link with/without libatomic. Automatic. # USE_LIBATOMIC : force to link with/without libatomic. Automatic.
# USE_PTHREAD_EMULATION : replace pthread's rwlocks with ours # USE_PTHREAD_EMULATION : replace pthread's rwlocks with ours
# USE_SHM_OPEN : use shm_open() for features that can make use of shared memory # USE_SHM_OPEN : use shm_open() for the startup-logs
# USE_KTLS : use kTLS.(requires at least Linux 4.17).
# #
# Options can be forced by specifying "USE_xxx=1" or can be disabled by using # Options can be forced by specifying "USE_xxx=1" or can be disabled by using
# "USE_xxx=" (empty string). The list of enabled and disabled options for a # "USE_xxx=" (empty string). The list of enabled and disabled options for a
@ -136,12 +135,7 @@
# VTEST_PROGRAM : location of the vtest program to run reg-tests. # VTEST_PROGRAM : location of the vtest program to run reg-tests.
# DEBUG_USE_ABORT: use abort() for program termination, see include/haproxy/bug.h for details # DEBUG_USE_ABORT: use abort() for program termination, see include/haproxy/bug.h for details
#### Add -Werror when set to non-empty, and make Makefile stop on warnings.
#### It must be declared before includes because it's used there.
ERR =
include include/make/verbose.mk include include/make/verbose.mk
include include/make/errors.mk
include include/make/compiler.mk include include/make/compiler.mk
include include/make/options.mk include include/make/options.mk
@ -165,7 +159,7 @@ TARGET =
CPU = CPU =
ifneq ($(CPU),) ifneq ($(CPU),)
ifneq ($(CPU),generic) ifneq ($(CPU),generic)
$(call $(complain),the "CPU" variable was forced to "$(CPU)" but is no longer \ $(warning Warning: the "CPU" variable was forced to "$(CPU)" but is no longer \
used and will be ignored. For native builds, modern compilers generally \ used and will be ignored. For native builds, modern compilers generally \
prefer that the string "-march=native" is passed in CPU_CFLAGS or CFLAGS. \ prefer that the string "-march=native" is passed in CPU_CFLAGS or CFLAGS. \
For other CPU-specific options, please read suggestions in the INSTALL file.) For other CPU-specific options, please read suggestions in the INSTALL file.)
@ -175,7 +169,7 @@ endif
#### No longer used #### No longer used
ARCH = ARCH =
ifneq ($(ARCH),) ifneq ($(ARCH),)
$(call $(complain),the "ARCH" variable was forced to "$(ARCH)" but is no \ $(warning Warning: the "ARCH" variable was forced to "$(ARCH)" but is no \
longer used and will be ignored. Please check the INSTALL file for other \ longer used and will be ignored. Please check the INSTALL file for other \
options, but usually in order to pass arch-specific options, ARCH_FLAGS, \ options, but usually in order to pass arch-specific options, ARCH_FLAGS, \
CFLAGS or LDFLAGS are preferred.) CFLAGS or LDFLAGS are preferred.)
@ -193,7 +187,7 @@ OPT_CFLAGS = -O2
#### No longer used #### No longer used
DEBUG_CFLAGS = DEBUG_CFLAGS =
ifneq ($(DEBUG_CFLAGS),) ifneq ($(DEBUG_CFLAGS),)
$(call $(complain),DEBUG_CFLAGS was forced to "$(DEBUG_CFLAGS)" but is no \ $(warning Warning: DEBUG_CFLAGS was forced to "$(DEBUG_CFLAGS)" but is no \
longer used and will be ignored. If you have ported this build setting from \ longer used and will be ignored. If you have ported this build setting from \
and older version, it is likely that you just want to pass these options \ and older version, it is likely that you just want to pass these options \
to the CFLAGS variable. If you are passing some debugging-related options \ to the CFLAGS variable. If you are passing some debugging-related options \
@ -201,10 +195,12 @@ $(call $(complain),DEBUG_CFLAGS was forced to "$(DEBUG_CFLAGS)" but is no \
both the compilation and linking stages.) both the compilation and linking stages.)
endif endif
#### Add -Werror when set to non-empty
ERR =
#### May be used to force running a specific set of reg-tests #### May be used to force running a specific set of reg-tests
REG_TEST_FILES = REG_TEST_FILES =
REG_TEST_SCRIPT=./scripts/run-regtests.sh REG_TEST_SCRIPT=./scripts/run-regtests.sh
UNIT_TEST_SCRIPT=./scripts/run-unittests.sh
#### Standard C definition #### Standard C definition
# Compiler-specific flags that may be used to set the standard behavior we # Compiler-specific flags that may be used to set the standard behavior we
@ -214,8 +210,7 @@ UNIT_TEST_SCRIPT=./scripts/run-unittests.sh
# undefined behavior to silently produce invalid code. For this reason we have # undefined behavior to silently produce invalid code. For this reason we have
# to use -fwrapv or -fno-strict-overflow to guarantee the intended behavior. # to use -fwrapv or -fno-strict-overflow to guarantee the intended behavior.
# It is preferable not to change this option in order to avoid breakage. # It is preferable not to change this option in order to avoid breakage.
STD_CFLAGS := $(call cc-opt-alt,-fwrapv,-fno-strict-overflow) \ STD_CFLAGS := $(call cc-opt-alt,-fwrapv,-fno-strict-overflow)
$(call cc-opt,-fvect-cost-model=very-cheap)
#### Compiler-specific flags to enable certain classes of warnings. #### Compiler-specific flags to enable certain classes of warnings.
# Some are hard-coded, others are enabled only if supported. # Some are hard-coded, others are enabled only if supported.
@ -252,7 +247,7 @@ endif
#### No longer used #### No longer used
SMALL_OPTS = SMALL_OPTS =
ifneq ($(SMALL_OPTS),) ifneq ($(SMALL_OPTS),)
$(call $(complain),SMALL_OPTS was forced to "$(SMALL_OPTS)" but is no longer \ $(warning Warning: SMALL_OPTS was forced to "$(SMALL_OPTS)" but is no longer \
used and will be ignored. Please check if this setting are still relevant, \ used and will be ignored. Please check if this setting are still relevant, \
and move it either to DEFINE or to CFLAGS instead.) and move it either to DEFINE or to CFLAGS instead.)
endif endif
@ -265,9 +260,8 @@ endif
# without appearing here. Currently defined DEBUG macros include DEBUG_FULL, # without appearing here. Currently defined DEBUG macros include DEBUG_FULL,
# DEBUG_MEM_STATS, DEBUG_DONT_SHARE_POOLS, DEBUG_FD, DEBUG_POOL_INTEGRITY, # DEBUG_MEM_STATS, DEBUG_DONT_SHARE_POOLS, DEBUG_FD, DEBUG_POOL_INTEGRITY,
# DEBUG_NO_POOLS, DEBUG_FAIL_ALLOC, DEBUG_STRICT_ACTION=[0-3], DEBUG_HPACK, # DEBUG_NO_POOLS, DEBUG_FAIL_ALLOC, DEBUG_STRICT_ACTION=[0-3], DEBUG_HPACK,
# DEBUG_AUTH, DEBUG_SPOE, DEBUG_UAF, DEBUG_THREAD=0-2, DEBUG_STRICT, DEBUG_DEV, # DEBUG_AUTH, DEBUG_SPOE, DEBUG_UAF, DEBUG_THREAD, DEBUG_STRICT, DEBUG_DEV,
# DEBUG_TASK, DEBUG_MEMORY_POOLS, DEBUG_POOL_TRACING, DEBUG_QPACK, DEBUG_LIST, # DEBUG_TASK, DEBUG_MEMORY_POOLS, DEBUG_POOL_TRACING, DEBUG_QPACK, DEBUG_LIST.
# DEBUG_COUNTERS=[0-2], DEBUG_STRESS, DEBUG_UNIT.
DEBUG = DEBUG =
#### Trace options #### Trace options
@ -342,16 +336,14 @@ use_opts = USE_EPOLL USE_KQUEUE USE_NETFILTER USE_POLL \
USE_TPROXY USE_LINUX_TPROXY USE_LINUX_CAP \ USE_TPROXY USE_LINUX_TPROXY USE_LINUX_CAP \
USE_LINUX_SPLICE USE_LIBCRYPT USE_CRYPT_H USE_ENGINE \ USE_LINUX_SPLICE USE_LIBCRYPT USE_CRYPT_H USE_ENGINE \
USE_GETADDRINFO USE_OPENSSL USE_OPENSSL_WOLFSSL USE_OPENSSL_AWSLC \ USE_GETADDRINFO USE_OPENSSL USE_OPENSSL_WOLFSSL USE_OPENSSL_AWSLC \
USE_ECH \
USE_SSL USE_LUA USE_ACCEPT4 USE_CLOSEFROM USE_ZLIB USE_SLZ \ USE_SSL USE_LUA USE_ACCEPT4 USE_CLOSEFROM USE_ZLIB USE_SLZ \
USE_CPU_AFFINITY USE_TFO USE_NS USE_DL USE_RT USE_LIBATOMIC \ USE_CPU_AFFINITY USE_TFO USE_NS USE_DL USE_RT USE_LIBATOMIC \
USE_MATH USE_DEVICEATLAS USE_51DEGREES \ USE_MATH USE_DEVICEATLAS USE_51DEGREES \
USE_WURFL USE_OBSOLETE_LINKER USE_PRCTL USE_PROCCTL \ USE_WURFL USE_SYSTEMD USE_OBSOLETE_LINKER USE_PRCTL USE_PROCCTL \
USE_THREAD_DUMP USE_EVPORTS USE_OT USE_QUIC USE_PROMEX \ USE_THREAD_DUMP USE_EVPORTS USE_OT USE_QUIC USE_PROMEX \
USE_MEMORY_PROFILING USE_SHM_OPEN \ USE_MEMORY_PROFILING USE_SHM_OPEN \
USE_STATIC_PCRE USE_STATIC_PCRE2 \ USE_STATIC_PCRE USE_STATIC_PCRE2 \
USE_PCRE USE_PCRE_JIT USE_PCRE2 USE_PCRE2_JIT \ USE_PCRE USE_PCRE_JIT USE_PCRE2 USE_PCRE2_JIT USE_QUIC_OPENSSL_COMPAT
USE_QUIC_OPENSSL_COMPAT USE_KTLS
# preset all variables for all supported build options among use_opts # preset all variables for all supported build options among use_opts
$(reset_opts_vars) $(reset_opts_vars)
@ -382,13 +374,13 @@ ifeq ($(TARGET),haiku)
set_target_defaults = $(call default_opts,USE_POLL USE_TPROXY USE_OBSOLETE_LINKER) set_target_defaults = $(call default_opts,USE_POLL USE_TPROXY USE_OBSOLETE_LINKER)
endif endif
# For linux >= 4.17 and glibc # For linux >= 2.6.28 and glibc
ifeq ($(TARGET),linux-glibc) ifeq ($(TARGET),linux-glibc)
set_target_defaults = $(call default_opts, \ set_target_defaults = $(call default_opts, \
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \ USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \ USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \ USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
USE_GETADDRINFO USE_BACKTRACE USE_SHM_OPEN USE_KTLS) USE_GETADDRINFO USE_BACKTRACE USE_SHM_OPEN USE_SYSTEMD)
INSTALL = install -v INSTALL = install -v
endif endif
@ -401,13 +393,13 @@ ifeq ($(TARGET),linux-glibc-legacy)
INSTALL = install -v INSTALL = install -v
endif endif
# For linux >= 4.17 and musl # For linux >= 2.6.28 and musl
ifeq ($(TARGET),linux-musl) ifeq ($(TARGET),linux-musl)
set_target_defaults = $(call default_opts, \ set_target_defaults = $(call default_opts, \
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \ USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \ USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \ USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
USE_GETADDRINFO USE_BACKTRACE USE_SHM_OPEN USE_KTLS) USE_GETADDRINFO USE_SHM_OPEN)
INSTALL = install -v INSTALL = install -v
endif endif
@ -424,7 +416,7 @@ endif
ifeq ($(TARGET),freebsd) ifeq ($(TARGET),freebsd)
set_target_defaults = $(call default_opts, \ set_target_defaults = $(call default_opts, \
USE_POLL USE_TPROXY USE_LIBCRYPT USE_THREAD USE_CPU_AFFINITY USE_KQUEUE \ USE_POLL USE_TPROXY USE_LIBCRYPT USE_THREAD USE_CPU_AFFINITY USE_KQUEUE \
USE_ACCEPT4 USE_CLOSEFROM USE_GETADDRINFO USE_PROCCTL) USE_ACCEPT4 USE_CLOSEFROM USE_GETADDRINFO USE_PROCCTL USE_SHM_OPEN)
endif endif
# kFreeBSD glibc # kFreeBSD glibc
@ -598,16 +590,10 @@ endif
ifneq ($(USE_BACKTRACE:0=),) ifneq ($(USE_BACKTRACE:0=),)
BACKTRACE_LDFLAGS = -Wl,$(if $(EXPORT_SYMBOL),$(EXPORT_SYMBOL),--export-dynamic) BACKTRACE_LDFLAGS = -Wl,$(if $(EXPORT_SYMBOL),$(EXPORT_SYMBOL),--export-dynamic)
BACKTRACE_CFLAGS = -fno-omit-frame-pointer
endif
ifneq ($(USE_MEMORY_PROFILING:0=),)
MEMORY_PROFILING_CFLAGS = -fno-optimize-sibling-calls
endif endif
ifneq ($(USE_CPU_AFFINITY:0=),) ifneq ($(USE_CPU_AFFINITY:0=),)
OPTIONS_OBJS += src/cpuset.o OPTIONS_OBJS += src/cpuset.o
OPTIONS_OBJS += src/cpu_topo.o
endif endif
# OpenSSL is packaged in various forms and with various dependencies. # OpenSSL is packaged in various forms and with various dependencies.
@ -640,10 +626,7 @@ ifneq ($(USE_OPENSSL:0=),)
SSL_LDFLAGS := $(if $(SSL_LIB),-L$(SSL_LIB)) -lssl -lcrypto SSL_LDFLAGS := $(if $(SSL_LIB),-L$(SSL_LIB)) -lssl -lcrypto
endif endif
USE_SSL := $(if $(USE_SSL:0=),$(USE_SSL:0=),implicit) USE_SSL := $(if $(USE_SSL:0=),$(USE_SSL:0=),implicit)
OPTIONS_OBJS += src/ssl_sock.o src/ssl_ckch.o src/ssl_ocsp.o src/ssl_crtlist.o \ OPTIONS_OBJS += src/ssl_sock.o src/ssl_ckch.o src/ssl_sample.o src/ssl_crtlist.o src/cfgparse-ssl.o src/ssl_utils.o src/jwt.o src/ssl_ocsp.o src/ssl_gencert.o
src/ssl_sample.o src/cfgparse-ssl.o src/ssl_gencert.o \
src/ssl_utils.o src/jwt.o src/ssl_clienthello.o src/jws.o src/acme.o \
src/ssl_trace.o src/jwe.o
endif endif
ifneq ($(USE_ENGINE:0=),) ifneq ($(USE_ENGINE:0=),)
@ -655,22 +638,17 @@ ifneq ($(USE_ENGINE:0=),)
endif endif
ifneq ($(USE_QUIC:0=),) ifneq ($(USE_QUIC:0=),)
OPTIONS_OBJS += src/quic_conn.o src/mux_quic.o src/h3.o src/xprt_quic.o \
OPTIONS_OBJS += src/mux_quic.o src/h3.o src/quic_rx.o src/quic_tx.o \ src/quic_frame.o src/quic_tls.o src/quic_tp.o \
src/quic_conn.o src/quic_frame.o src/quic_sock.o \ src/quic_stats.o src/quic_sock.o src/proto_quic.o \
src/quic_tls.o src/quic_ssl.o src/proto_quic.o \ src/qmux_trace.o src/quic_loss.o src/qpack-enc.o \
src/quic_cli.o src/quic_trace.o src/quic_tp.o \ src/quic_cc_newreno.o src/quic_cc_cubic.o src/qpack-tbl.o \
src/quic_cid.o src/quic_stream.o \ src/qpack-dec.o src/hq_interop.o src/quic_stream.o \
src/quic_retransmit.o src/quic_loss.o \ src/h3_stats.o src/qmux_http.o src/cfgparse-quic.o \
src/hq_interop.o src/quic_cc_cubic.o \ src/cbuf.o src/quic_cc.o src/quic_cc_nocc.o src/quic_ack.o \
src/quic_cc_bbr.o src/quic_retry.o \ src/quic_trace.o src/quic_cli.o src/quic_ssl.o \
src/cfgparse-quic.o src/xprt_quic.o src/quic_token.o \ src/quic_rx.o src/quic_tx.o src/quic_cid.o src/quic_retry.o\
src/quic_ack.o src/qpack-dec.o src/quic_cc_newreno.o \ src/quic_retransmit.o src/quic_fctl.o
src/qmux_http.o src/qmux_trace.o src/quic_rules.o \
src/quic_cc_nocc.o src/quic_cc.o src/quic_pacing.o \
src/h3_stats.o src/quic_stats.o src/qpack-enc.o \
src/qpack-tbl.o src/quic_cc_drs.o src/quic_fctl.o \
src/quic_enc.o
endif endif
ifneq ($(USE_QUIC_OPENSSL_COMPAT:0=),) ifneq ($(USE_QUIC_OPENSSL_COMPAT:0=),)
@ -782,6 +760,10 @@ ifneq ($(USE_WURFL:0=),)
WURFL_LDFLAGS = $(if $(WURFL_LIB),-L$(WURFL_LIB)) -lwurfl WURFL_LDFLAGS = $(if $(WURFL_LIB),-L$(WURFL_LIB)) -lwurfl
endif endif
ifneq ($(USE_SYSTEMD:0=),)
OPTIONS_OBJS += src/systemd.o
endif
ifneq ($(USE_PCRE:0=)$(USE_STATIC_PCRE:0=)$(USE_PCRE_JIT:0=),) ifneq ($(USE_PCRE:0=)$(USE_STATIC_PCRE:0=)$(USE_PCRE_JIT:0=),)
ifneq ($(USE_PCRE2:0=)$(USE_STATIC_PCRE2:0=)$(USE_PCRE2_JIT:0=),) ifneq ($(USE_PCRE2:0=)$(USE_STATIC_PCRE2:0=)$(USE_PCRE2_JIT:0=),)
$(error cannot compile both PCRE and PCRE2 support) $(error cannot compile both PCRE and PCRE2 support)
@ -951,7 +933,7 @@ all:
@echo @echo
@exit 1 @exit 1
else else
all: dev/flags/flags haproxy $(EXTRA) all: haproxy dev/flags/flags $(EXTRA)
endif # obsolete targets endif # obsolete targets
endif # TARGET endif # TARGET
@ -961,48 +943,40 @@ ifneq ($(EXTRA_OBJS),)
OBJS += $(EXTRA_OBJS) OBJS += $(EXTRA_OBJS)
endif endif
OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \ OBJS += src/mux_h2.o src/mux_fcgi.o src/mux_h1.o src/tcpcheck.o \
src/server.o src/stream.o src/tcpcheck.o src/http_ana.o \ src/stream.o src/stats.o src/http_ana.o src/server.o \
src/stick_table.o src/tools.o src/mux_spop.o src/sample.o \ src/stick_table.o src/sample.o src/flt_spoe.o src/tools.o \
src/activity.o src/cfgparse.o src/peers.o src/cli.o \ src/log.o src/cfgparse.o src/peers.o src/backend.o src/resolvers.o \
src/backend.o src/connection.o src/resolvers.o src/proxy.o \ src/cli.o src/connection.o src/proxy.o src/http_htx.o \
src/cache.o src/stconn.o src/http_htx.o src/debug.o \ src/cfgparse-listen.o src/pattern.o src/check.o src/haproxy.o \
src/check.o src/stats-html.o src/haproxy.o src/listener.o \ src/cache.o src/stconn.o src/http_act.o src/http_fetch.o \
src/applet.o src/pattern.o src/cfgparse-listen.o \ src/http_client.o src/listener.o src/dns.o src/vars.o src/debug.o \
src/flt_spoe.o src/cebis_tree.o src/http_ext.o \ src/tcp_rules.o src/sink.o src/h1_htx.o src/task.o src/mjson.o \
src/http_act.o src/http_fetch.o src/cebs_tree.o \ src/h2.o src/filters.o src/server_state.o src/payload.o \
src/cebib_tree.o src/http_client.o src/dns.o \ src/fcgi-app.o src/map.o src/htx.o src/h1.o src/pool.o src/dns_ring.o \
src/cebb_tree.o src/vars.o src/event_hdl.o src/tcp_rules.o \ src/cfgparse-global.o src/trace.o src/tcp_sample.o src/http_ext.o \
src/trace.o src/stats-proxy.o src/pool.o src/stats.o \ src/flt_http_comp.o src/mux_pt.o src/flt_trace.o src/mqtt.o \
src/cfgparse-global.o src/filters.o src/mux_pt.o \ src/acl.o src/sock.o src/mworker.o src/tcp_act.o src/ring.o \
src/flt_http_comp.o src/sock.o src/h1.o src/sink.o \ src/session.o src/proto_tcp.o src/fd.o src/channel.o src/activity.o \
src/ceba_tree.o src/session.o src/payload.o src/htx.o \ src/queue.o src/lb_fas.o src/http_rules.o src/extcheck.o \
src/cebl_tree.o src/ceb32_tree.o src/ceb64_tree.o \ src/flt_bwlim.o src/thread.o src/http.o src/lb_chash.o src/applet.o \
src/server_state.o src/proto_rhttp.o src/flt_trace.o src/fd.o \ src/compression.o src/raw_sock.o src/ncbuf.o src/frontend.o \
src/task.o src/map.o src/fcgi-app.o src/h2.o src/mworker.o \ src/errors.o src/uri_normalizer.o src/http_conv.o src/lb_fwrr.o \
src/tcp_sample.o src/mjson.o src/h1_htx.o src/tcp_act.o \ src/sha1.o src/proto_sockpair.o src/mailers.o src/lb_fwlc.o \
src/ring.o src/flt_bwlim.o src/acl.o src/thread.o src/queue.o \ src/ebmbtree.o src/cfgcond.o src/action.o src/xprt_handshake.o \
src/http_rules.o src/http.o src/channel.o src/proto_tcp.o \ src/protocol.o src/proto_uxst.o src/proto_udp.o src/lb_map.o \
src/mqtt.o src/lb_chash.o src/extcheck.o src/dns_ring.o \ src/fix.o src/ev_select.o src/arg.o src/sock_inet.o src/event_hdl.o \
src/errors.o src/ncbuf.o src/compression.o src/http_conv.o \ src/mworker-prog.o src/hpack-dec.o src/cfgparse-tcp.o src/lb_ss.o \
src/frontend.o src/stats-json.o src/proto_sockpair.o \ src/sock_unix.o src/shctx.o src/proto_uxdg.o src/fcgi.o \
src/raw_sock.o src/action.o src/stats-file.o src/buf.o \ src/eb64tree.o src/clock.o src/chunk.o src/cfgdiag.o src/signal.o \
src/xprt_handshake.o src/proto_uxst.o src/lb_fwrr.o \ src/regex.o src/lru.o src/eb32tree.o src/eb32sctree.o \
src/uri_normalizer.o src/mailers.o src/protocol.o \ src/cfgparse-unix.o src/hpack-tbl.o src/ebsttree.o src/ebimtree.o \
src/cfgcond.o src/proto_udp.o src/lb_fwlc.o src/ebmbtree.o \ src/base64.o src/auth.o src/uri_auth.o src/time.o src/ebistree.o \
src/proto_uxdg.o src/cfgdiag.o src/sock_unix.o src/sha1.o \ src/dynbuf.o src/wdt.o src/pipe.o src/init.o src/http_acl.o \
src/lb_fas.o src/clock.o src/sock_inet.o src/ev_select.o \ src/hpack-huff.o src/hpack-enc.o src/dict.o src/freq_ctr.o \
src/lb_map.o src/shctx.o src/hpack-dec.o src/net_helper.o \ src/ebtree.o src/hash.o src/dgram.o src/version.o src/proto_rhttp.o \
src/arg.o src/signal.o src/fix.o src/dynbuf.o src/guid.o \ src/guid.o src/stats-html.o src/stats-json.o src/stats-file.o \
src/cfgparse-tcp.o src/lb_ss.o src/chunk.o src/counters.o \ src/stats-proxy.o
src/cfgparse-unix.o src/regex.o src/fcgi.o src/uri_auth.o \
src/eb64tree.o src/eb32tree.o src/eb32sctree.o src/lru.o \
src/limits.o src/ebimtree.o src/wdt.o src/hpack-tbl.o \
src/ebistree.o src/base64.o src/auth.o src/time.o \
src/ebsttree.o src/freq_ctr.o src/systemd.o src/init.o \
src/http_acl.o src/dict.o src/dgram.o src/pipe.o \
src/hpack-huff.o src/hpack-enc.o src/ebtree.o src/hash.o \
src/httpclient_cli.o src/version.o src/ncbmbuf.o src/ech.o
ifneq ($(TRACE),) ifneq ($(TRACE),)
OBJS += src/calltrace.o OBJS += src/calltrace.o
@ -1037,9 +1011,8 @@ help:
# TARGET variable is not set since we're not building, by definition. # TARGET variable is not set since we're not building, by definition.
IGNORE_OPTS=help install install-man install-doc install-bin \ IGNORE_OPTS=help install install-man install-doc install-bin \
uninstall clean tags cscope tar git-tar version update-version \ uninstall clean tags cscope tar git-tar version update-version \
opts reg-tests reg-tests-help unit-tests admin/halog/halog dev/flags/flags \ opts reg-tests reg-tests-help admin/halog/halog dev/flags/flags \
dev/haring/haring dev/ncpu/ncpu dev/poll/poll dev/tcploop/tcploop \ dev/haring/haring dev/poll/poll dev/tcploop/tcploop
dev/term_events/term_events
ifneq ($(TARGET),) ifneq ($(TARGET),)
ifeq ($(filter $(firstword $(MAKECMDGOALS)),$(IGNORE_OPTS)),) ifeq ($(filter $(firstword $(MAKECMDGOALS)),$(IGNORE_OPTS)),)
@ -1076,9 +1049,6 @@ dev/haring/haring: dev/haring/haring.o
dev/hpack/%: dev/hpack/%.o dev/hpack/%: dev/hpack/%.o
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS) $(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
dev/ncpu/ncpu:
$(cmd_MAKE) -C dev/ncpu ncpu V='$(V)'
dev/poll/poll: dev/poll/poll:
$(cmd_MAKE) -C dev/poll poll CC='$(CC)' OPTIMIZE='$(COPTS)' V='$(V)' $(cmd_MAKE) -C dev/poll poll CC='$(CC)' OPTIMIZE='$(COPTS)' V='$(V)'
@ -1091,16 +1061,13 @@ dev/tcploop/tcploop:
dev/udp/udp-perturb: dev/udp/udp-perturb.o dev/udp/udp-perturb: dev/udp/udp-perturb.o
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS) $(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
dev/term_events/term_events: dev/term_events/term_events.o
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
# rebuild it every time # rebuild it every time
.PHONY: src/version.c dev/ncpu/ncpu dev/poll/poll dev/tcploop/tcploop .PHONY: src/version.c dev/poll/poll dev/tcploop/tcploop
src/calltrace.o: src/calltrace.c $(DEP) src/calltrace.o: src/calltrace.c $(DEP)
$(cmd_CC) $(TRACE_COPTS) -c -o $@ $< $(cmd_CC) $(TRACE_COPTS) -c -o $@ $<
src/version.o: src/version.c $(DEP) src/haproxy.o: src/haproxy.c $(DEP)
$(cmd_CC) $(COPTS) \ $(cmd_CC) $(COPTS) \
-DBUILD_TARGET='"$(strip $(TARGET))"' \ -DBUILD_TARGET='"$(strip $(TARGET))"' \
-DBUILD_CC='"$(strip $(CC))"' \ -DBUILD_CC='"$(strip $(CC))"' \
@ -1123,11 +1090,6 @@ install-doc:
$(INSTALL) -m 644 doc/$$x.txt "$(DESTDIR)$(DOCDIR)" ; \ $(INSTALL) -m 644 doc/$$x.txt "$(DESTDIR)$(DOCDIR)" ; \
done done
install-admin:
$(Q)$(INSTALL) -d "$(DESTDIR)$(SBINDIR)"
$(Q)$(INSTALL) admin/cli/haproxy-dump-certs "$(DESTDIR)$(SBINDIR)"
$(Q)$(INSTALL) admin/cli/haproxy-reload "$(DESTDIR)$(SBINDIR)"
install-bin: install-bin:
$(Q)for i in haproxy $(EXTRA); do \ $(Q)for i in haproxy $(EXTRA); do \
if ! [ -e "$$i" ]; then \ if ! [ -e "$$i" ]; then \
@ -1138,7 +1100,7 @@ install-bin:
$(Q)$(INSTALL) -d "$(DESTDIR)$(SBINDIR)" $(Q)$(INSTALL) -d "$(DESTDIR)$(SBINDIR)"
$(Q)$(INSTALL) haproxy $(EXTRA) "$(DESTDIR)$(SBINDIR)" $(Q)$(INSTALL) haproxy $(EXTRA) "$(DESTDIR)$(SBINDIR)"
install: install-bin install-admin install-man install-doc install: install-bin install-man install-doc
uninstall: uninstall:
$(Q)rm -f "$(DESTDIR)$(MANDIR)"/man1/haproxy.1 $(Q)rm -f "$(DESTDIR)$(MANDIR)"/man1/haproxy.1
@ -1160,13 +1122,10 @@ clean:
$(Q)rm -f addons/ot/src/*.[oas] $(Q)rm -f addons/ot/src/*.[oas]
$(Q)rm -f addons/wurfl/*.[oas] addons/wurfl/dummy/*.[oas] $(Q)rm -f addons/wurfl/*.[oas] addons/wurfl/dummy/*.[oas]
$(Q)rm -f admin/*/*.[oas] admin/*/*/*.[oas] $(Q)rm -f admin/*/*.[oas] admin/*/*/*.[oas]
$(Q)rm -f dev/*/*.[oas]
$(Q)rm -f dev/flags/flags
distclean: clean
$(Q)rm -f admin/iprange/iprange admin/iprange/ip6range admin/halog/halog $(Q)rm -f admin/iprange/iprange admin/iprange/ip6range admin/halog/halog
$(Q)rm -f admin/dyncookie/dyncookie $(Q)rm -f admin/dyncookie/dyncookie
$(Q)rm -f dev/haring/haring dev/ncpu/ncpu{,.so} dev/poll/poll dev/tcploop/tcploop $(Q)rm -f dev/*/*.[oas]
$(Q)rm -f dev/flags/flags dev/haring/haring dev/poll/poll dev/tcploop/tcploop
$(Q)rm -f dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht $(Q)rm -f dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
$(Q)rm -f dev/qpack/decode $(Q)rm -f dev/qpack/decode
@ -1286,17 +1245,10 @@ reg-tests-help:
.PHONY: reg-tests reg-tests-help .PHONY: reg-tests reg-tests-help
unit-tests:
$(Q)$(UNIT_TEST_SCRIPT)
.PHONY: unit-tests
# "make range" iteratively builds using "make all" and the exact same build # "make range" iteratively builds using "make all" and the exact same build
# options for all commits within RANGE. RANGE may be either a git range # options for all commits within RANGE. RANGE may be either a git range
# such as ref1..ref2 or a single commit, in which case all commits from # such as ref1..ref2 or a single commit, in which case all commits from
# the master branch to this one will be tested. # the master branch to this one will be tested.
# Will execute TEST_CMD for each commit if defined, and will stop in case of
# failure.
range: range:
$(Q)[ -d .git/. ] || { echo "## Fatal: \"make $@\" may only be used inside a Git repository."; exit 1; } $(Q)[ -d .git/. ] || { echo "## Fatal: \"make $@\" may only be used inside a Git repository."; exit 1; }
@ -1322,7 +1274,6 @@ range:
echo "[ $$index/$$count ] $$commit #############################"; \ echo "[ $$index/$$count ] $$commit #############################"; \
git checkout -q $$commit || die 1; \ git checkout -q $$commit || die 1; \
$(MAKE) all || die 1; \ $(MAKE) all || die 1; \
[ -z "$(TEST_CMD)" ] || $(TEST_CMD) || die 1; \
index=$$((index + 1)); \ index=$$((index + 1)); \
done; \ done; \
echo;echo "Done! $${count} commit(s) built successfully for RANGE $${RANGE}" ; \ echo;echo "Done! $${count} commit(s) built successfully for RANGE $${RANGE}" ; \

22
README Normal file
View File

@ -0,0 +1,22 @@
The HAProxy documentation has been split into a number of different files for
ease of use.
Please refer to the following files depending on what you're looking for :
- INSTALL for instructions on how to build and install HAProxy
- BRANCHES to understand the project's life cycle and what version to use
- LICENSE for the project's license
- CONTRIBUTING for the process to follow to submit contributions
The more detailed documentation is located into the doc/ directory :
- doc/intro.txt for a quick introduction on HAProxy
- doc/configuration.txt for the configuration's reference manual
- doc/lua.txt for the Lua's reference manual
- doc/SPOE.txt for how to use the SPOE engine
- doc/network-namespaces.txt for how to use network namespaces under Linux
- doc/management.txt for the management guide
- doc/regression-testing.txt for how to use the regression testing suite
- doc/peers.txt for the peers protocol reference
- doc/coding-style.txt for how to adopt HAProxy's coding style
- doc/internals for developer-specific documentation (not all up to date)

View File

@ -1,62 +0,0 @@
# HAProxy
[![alpine/musl](https://github.com/haproxy/haproxy/actions/workflows/musl.yml/badge.svg)](https://github.com/haproxy/haproxy/actions/workflows/musl.yml)
[![AWS-LC](https://github.com/haproxy/haproxy/actions/workflows/aws-lc.yml/badge.svg)](https://github.com/haproxy/haproxy/actions/workflows/aws-lc.yml)
[![openssl no-deprecated](https://github.com/haproxy/haproxy/actions/workflows/openssl-nodeprecated.yml/badge.svg)](https://github.com/haproxy/haproxy/actions/workflows/openssl-nodeprecated.yml)
[![Illumos](https://github.com/haproxy/haproxy/actions/workflows/illumos.yml/badge.svg)](https://github.com/haproxy/haproxy/actions/workflows/illumos.yml)
[![NetBSD](https://github.com/haproxy/haproxy/actions/workflows/netbsd.yml/badge.svg)](https://github.com/haproxy/haproxy/actions/workflows/netbsd.yml)
[![FreeBSD](https://api.cirrus-ci.com/github/haproxy/haproxy.svg?task=FreeBSD)](https://cirrus-ci.com/github/haproxy/haproxy/)
[![VTest](https://github.com/haproxy/haproxy/actions/workflows/vtest.yml/badge.svg)](https://github.com/haproxy/haproxy/actions/workflows/vtest.yml)
![HAProxy logo](doc/HAProxyCommunityEdition_60px.png)
HAProxy is a free, very fast and reliable reverse-proxy offering high availability, load balancing, and proxying for TCP
and HTTP-based applications.
## Installation
The [INSTALL](INSTALL) file describes how to build HAProxy.
A [list of packages](https://github.com/haproxy/wiki/wiki/Packages) is also available on the wiki.
## Getting help
The [discourse](https://discourse.haproxy.org/) and the [mailing-list](https://www.mail-archive.com/haproxy@formilux.org/)
are available for questions or configuration assistance. You can also use the [slack](https://slack.haproxy.org/) or
[IRC](irc://irc.libera.chat/%23haproxy) channel. Please don't use the issue tracker for these.
The [issue tracker](https://github.com/haproxy/haproxy/issues/) is only for bug reports or feature requests.
## Documentation
The HAProxy documentation has been split into a number of different files for
ease of use. It is available in text format as well as HTML. The wiki is also meant to replace the old architecture
guide.
- [HTML documentation](http://docs.haproxy.org/)
- [HTML HAProxy LUA API Documentation](https://www.arpalert.org/haproxy-api.html)
- [Wiki](https://github.com/haproxy/wiki/wiki)
Please refer to the following files depending on what you're looking for:
- [INSTALL](INSTALL) for instructions on how to build and install HAProxy
- [BRANCHES](BRANCHES) to understand the project's life cycle and what version to use
- [LICENSE](LICENSE) for the project's license
- [CONTRIBUTING](CONTRIBUTING) for the process to follow to submit contributions
The more detailed documentation is located into the doc/ directory:
- [ doc/intro.txt ](doc/intro.txt) for a quick introduction on HAProxy
- [ doc/configuration.txt ](doc/configuration.txt) for the configuration's reference manual
- [ doc/lua.txt ](doc/lua.txt) for the Lua's reference manual
- [ doc/SPOE.txt ](doc/SPOE.txt) for how to use the SPOE engine
- [ doc/network-namespaces.txt ](doc/network-namespaces.txt) for how to use network namespaces under Linux
- [ doc/management.txt ](doc/management.txt) for the management guide
- [ doc/regression-testing.txt ](doc/regression-testing.txt) for how to use the regression testing suite
- [ doc/peers.txt ](doc/peers.txt) for the peers protocol reference
- [ doc/coding-style.txt ](doc/coding-style.txt) for how to adopt HAProxy's coding style
- [ doc/internals ](doc/internals) for developer-specific documentation (not all up to date)
## License
HAProxy is licensed under [GPL 2](doc/gpl.txt) or any later version, the headers under [LGPL 2.1](doc/lgpl.txt). See the
[LICENSE](LICENSE) file for a more detailed explanation.

View File

@ -1,2 +1,2 @@
$Format:%ci$ $Format:%ci$
2026/01/07 2024/05/04

View File

@ -1 +1 @@
3.4-dev2 3.0-dev10

View File

@ -5,8 +5,7 @@ CXX := c++
CXXLIB := -lstdc++ CXXLIB := -lstdc++
ifeq ($(DEVICEATLAS_SRC),) ifeq ($(DEVICEATLAS_SRC),)
OPTIONS_CFLAGS += -I$(DEVICEATLAS_INC) OPTIONS_LDFLAGS += -lda
OPTIONS_LDFLAGS += -Wl,-rpath,$(DEVICEATLAS_LIB) -L$(DEVICEATLAS_LIB) -lda
else else
DEVICEATLAS_INC = $(DEVICEATLAS_SRC) DEVICEATLAS_INC = $(DEVICEATLAS_SRC)
DEVICEATLAS_LIB = $(DEVICEATLAS_SRC) DEVICEATLAS_LIB = $(DEVICEATLAS_SRC)

View File

@ -212,7 +212,7 @@ da_status_t da_atlas_compile(void *ctx, da_read_fn readfn, da_setpos_fn setposfn
* da_getpropid on the atlas, and if generated by the search, the ID will be consistent across * da_getpropid on the atlas, and if generated by the search, the ID will be consistent across
* different calls to search. * different calls to search.
* Properties added by a search that are neither in the compiled atlas, nor in the extra_props list * Properties added by a search that are neither in the compiled atlas, nor in the extra_props list
* Are assigned an ID within the context that is not transferable through different search results * Are assigned an ID within the context that is not transferrable through different search results
* within the same atlas. * within the same atlas.
* @param atlas Atlas instance * @param atlas Atlas instance
* @param extra_props properties * @param extra_props properties

View File

@ -47,12 +47,6 @@ via the OpenTracing API with OpenTracing compatible servers (tracers).
Currently, tracers that support this API include Datadog, Jaeger, LightStep Currently, tracers that support this API include Datadog, Jaeger, LightStep
and Zipkin. and Zipkin.
Note: The OpenTracing filter shouldn't be used for new designs as OpenTracing
itself is no longer maintained nor supported by its authors. A
replacement filter base on OpenTelemetry is currently under development
and is expected to be ready around HAProxy 3.2. As such OpenTracing will
be deprecated in 3.3 and removed in 3.5.
The OT filter was primarily tested with the Jaeger tracer, while configurations The OT filter was primarily tested with the Jaeger tracer, while configurations
for both Datadog and Zipkin tracers were also set in the test directory. for both Datadog and Zipkin tracers were also set in the test directory.

View File

@ -718,7 +718,7 @@ static void flt_ot_check_timeouts(struct stream *s, struct filter *f)
if (flt_ot_is_disabled(f FLT_OT_DBG_ARGS(, -1))) if (flt_ot_is_disabled(f FLT_OT_DBG_ARGS(, -1)))
FLT_OT_RETURN(); FLT_OT_RETURN();
s->pending_events |= STRM_EVT_MSG; s->pending_events |= TASK_WOKEN_MSG;
flt_ot_return_void(f, &err); flt_ot_return_void(f, &err);

View File

@ -1075,7 +1075,6 @@ static int flt_ot_post_parse_cfg_scope(void)
static int flt_ot_parse_cfg(struct flt_ot_conf *conf, const char *flt_name, char **err) static int flt_ot_parse_cfg(struct flt_ot_conf *conf, const char *flt_name, char **err)
{ {
struct list backup_sections; struct list backup_sections;
struct cfgfile cfg_file = {0};
int retval = ERR_ABORT | ERR_ALERT; int retval = ERR_ABORT | ERR_ALERT;
FLT_OT_FUNC("%p, \"%s\", %p:%p", conf, flt_name, FLT_OT_DPTR_ARGS(err)); FLT_OT_FUNC("%p, \"%s\", %p:%p", conf, flt_name, FLT_OT_DPTR_ARGS(err));
@ -1095,16 +1094,8 @@ static int flt_ot_parse_cfg(struct flt_ot_conf *conf, const char *flt_name, char
/* Do nothing. */; /* Do nothing. */;
else if (access(conf->cfg_file, R_OK) == -1) else if (access(conf->cfg_file, R_OK) == -1)
FLT_OT_PARSE_ERR(err, "'%s' : %s", conf->cfg_file, strerror(errno)); FLT_OT_PARSE_ERR(err, "'%s' : %s", conf->cfg_file, strerror(errno));
else { else
cfg_file.filename = conf->cfg_file; retval = readcfgfile(conf->cfg_file);
cfg_file.size = load_cfg_in_mem(cfg_file.filename, &cfg_file.content);
if (cfg_file.size < 0) {
ha_free(&cfg_file.content);
FLT_OT_RETURN_INT(retval);
}
retval = parse_cfg(&cfg_file);
ha_free(&cfg_file.content);
}
/* Unregister OT sections and restore previous sections. */ /* Unregister OT sections and restore previous sections. */
cfg_unregister_sections(); cfg_unregister_sections();

View File

@ -39,21 +39,14 @@
*/ */
static void flt_ot_vars_scope_dump(struct vars *vars, const char *scope) static void flt_ot_vars_scope_dump(struct vars *vars, const char *scope)
{ {
int i; const struct var *var;
if (vars == NULL) if (vars == NULL)
return; return;
vars_rdlock(vars); vars_rdlock(vars);
for (i = 0; i < VAR_NAME_ROOTS; i++) { list_for_each_entry(var, &(vars->head), l)
struct ceb_node *node = cebu64_first(&(vars->name_root[i]));
for ( ; node != NULL; node = cebu64_next(&(vars->name_root[i]), node)) {
struct var *var = container_of(node, struct var, node);
FLT_OT_DBG(2, "'%s.%016" PRIx64 "' -> '%.*s'", scope, var->name_hash, (int)b_data(&(var->data.u.str)), b_orig(&(var->data.u.str))); FLT_OT_DBG(2, "'%s.%016" PRIx64 "' -> '%.*s'", scope, var->name_hash, (int)b_data(&(var->data.u.str)), b_orig(&(var->data.u.str)));
}
}
vars_rdunlock(vars); vars_rdunlock(vars);
} }

View File

@ -91,18 +91,6 @@ name must be preceded by a minus character ('-'). Here are examples:
# Only dump frontends, backends and servers status # Only dump frontends, backends and servers status
/metrics?metrics=haproxy_frontend_status,haproxy_backend_status,haproxy_server_status /metrics?metrics=haproxy_frontend_status,haproxy_backend_status,haproxy_server_status
* Add section description as label for all metrics
It is possible to set a description in global and proxy sections, via the
"description" directive. The global description is exposed if it is define via
the "haproxy_process_description" metric. But the descriptions provided in proxy
sections are not dumped. However, it is possible to add it as a label for all
metrics of the corresponding section, including the global one. To do so,
"desc-labels" parameter must be set:
/metrics?desc-labels
/ metrics?scope=frontend&desc-labels
* Dump extra counters * Dump extra counters
@ -205,8 +193,6 @@ listed below. Metrics from extra counters are not listed.
| haproxy_process_current_tasks | | haproxy_process_current_tasks |
| haproxy_process_current_run_queue | | haproxy_process_current_run_queue |
| haproxy_process_idle_time_percent | | haproxy_process_idle_time_percent |
| haproxy_process_node |
| haproxy_process_description |
| haproxy_process_stopping | | haproxy_process_stopping |
| haproxy_process_jobs | | haproxy_process_jobs |
| haproxy_process_unstoppable_jobs | | haproxy_process_unstoppable_jobs |
@ -389,9 +375,6 @@ listed below. Metrics from extra counters are not listed.
| haproxy_server_max_connect_time_seconds | | haproxy_server_max_connect_time_seconds |
| haproxy_server_max_response_time_seconds | | haproxy_server_max_response_time_seconds |
| haproxy_server_max_total_time_seconds | | haproxy_server_max_total_time_seconds |
| haproxy_server_agent_status |
| haproxy_server_agent_code |
| haproxy_server_agent_duration_seconds |
| haproxy_server_internal_errors_total | | haproxy_server_internal_errors_total |
| haproxy_server_unsafe_idle_connections_current | | haproxy_server_unsafe_idle_connections_current |
| haproxy_server_safe_idle_connections_current | | haproxy_server_safe_idle_connections_current |

View File

@ -32,11 +32,11 @@
/* Prometheus exporter flags (ctx->flags) */ /* Prometheus exporter flags (ctx->flags) */
#define PROMEX_FL_METRIC_HDR 0x00000001 #define PROMEX_FL_METRIC_HDR 0x00000001
#define PROMEX_FL_BODYLESS_RESP 0x00000002 #define PROMEX_FL_INFO_METRIC 0x00000002
/* unused: 0x00000004 */ #define PROMEX_FL_FRONT_METRIC 0x00000004
/* unused: 0x00000008 */ #define PROMEX_FL_BACK_METRIC 0x00000008
/* unused: 0x00000010 */ #define PROMEX_FL_SRV_METRIC 0x00000010
/* unused: 0x00000020 */ #define PROMEX_FL_LI_METRIC 0x00000020
#define PROMEX_FL_MODULE_METRIC 0x00000040 #define PROMEX_FL_MODULE_METRIC 0x00000040
#define PROMEX_FL_SCOPE_GLOBAL 0x00000080 #define PROMEX_FL_SCOPE_GLOBAL 0x00000080
#define PROMEX_FL_SCOPE_FRONT 0x00000100 #define PROMEX_FL_SCOPE_FRONT 0x00000100
@ -47,7 +47,6 @@
#define PROMEX_FL_NO_MAINT_SRV 0x00002000 #define PROMEX_FL_NO_MAINT_SRV 0x00002000
#define PROMEX_FL_EXTRA_COUNTERS 0x00004000 #define PROMEX_FL_EXTRA_COUNTERS 0x00004000
#define PROMEX_FL_INC_METRIC_BY_DEFAULT 0x00008000 #define PROMEX_FL_INC_METRIC_BY_DEFAULT 0x00008000
#define PROMEX_FL_DESC_LABELS 0x00010000
#define PROMEX_FL_SCOPE_ALL (PROMEX_FL_SCOPE_GLOBAL | PROMEX_FL_SCOPE_FRONT | \ #define PROMEX_FL_SCOPE_ALL (PROMEX_FL_SCOPE_GLOBAL | PROMEX_FL_SCOPE_FRONT | \
PROMEX_FL_SCOPE_LI | PROMEX_FL_SCOPE_BACK | \ PROMEX_FL_SCOPE_LI | PROMEX_FL_SCOPE_BACK | \

File diff suppressed because it is too large Load Diff

674
admin/acme.sh/LICENSE Normal file
View File

@ -0,0 +1,674 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

13
admin/acme.sh/README Normal file
View File

@ -0,0 +1,13 @@
This directory contains a fork of the acme.sh deploy script for haproxy which
allow acme.sh to run as non-root and don't require to reload haproxy.
The content of this directory is licensed under GPLv3 as explained in the
LICENSE file.
This was originally written for this pull request
https://github.com/acmesh-official/acme.sh/pull/4581.
The documentation is available on the haproxy wiki:
https://github.com/haproxy/wiki/wiki/Letsencrypt-integration-with-HAProxy-and-acme.sh
The haproxy.sh script must replace the one provided by acme.sh.

403
admin/acme.sh/haproxy.sh Normal file
View File

@ -0,0 +1,403 @@
#!/usr/bin/env sh
# Script for acme.sh to deploy certificates to haproxy
#
# The following variables can be exported:
#
# export DEPLOY_HAPROXY_PEM_NAME="${domain}.pem"
#
# Defines the name of the PEM file.
# Defaults to "<domain>.pem"
#
# export DEPLOY_HAPROXY_PEM_PATH="/etc/haproxy"
#
# Defines location of PEM file for HAProxy.
# Defaults to /etc/haproxy
#
# export DEPLOY_HAPROXY_RELOAD="systemctl reload haproxy"
#
# OPTIONAL: Reload command used post deploy
# This defaults to be a no-op (ie "true").
# It is strongly recommended to set this something that makes sense
# for your distro.
#
# export DEPLOY_HAPROXY_ISSUER="no"
#
# OPTIONAL: Places CA file as "${DEPLOY_HAPROXY_PEM}.issuer"
# Note: Required for OCSP stapling to work
#
# export DEPLOY_HAPROXY_BUNDLE="no"
#
# OPTIONAL: Deploy this certificate as part of a multi-cert bundle
# This adds a suffix to the certificate based on the certificate type
# eg RSA certificates will have .rsa as a suffix to the file name
# HAProxy will load all certificates and provide one or the other
# depending on client capabilities
# Note: This functionality requires HAProxy was compiled against
# a version of OpenSSL that supports this.
#
# export DEPLOY_HAPROXY_HOT_UPDATE="yes"
# export DEPLOY_HAPROXY_STATS_SOCKET="UNIX:/run/haproxy/admin.sock"
#
# OPTIONAL: Deploy the certificate over the HAProxy stats socket without
# needing to reload HAProxy. Default is "no".
#
# Require the socat binary. DEPLOY_HAPROXY_STATS_SOCKET variable uses the socat
# address format.
#
# export DEPLOY_HAPROXY_MASTER_CLI="UNIX:/run/haproxy-master.sock"
#
# OPTIONAL: To use the master CLI with DEPLOY_HAPROXY_HOT_UPDATE="yes" instead
# of a stats socket, use this variable.
######## Public functions #####################
#domain keyfile certfile cafile fullchain
haproxy_deploy() {
_cdomain="$1"
_ckey="$2"
_ccert="$3"
_cca="$4"
_cfullchain="$5"
_cmdpfx=""
# Some defaults
DEPLOY_HAPROXY_PEM_PATH_DEFAULT="/etc/haproxy"
DEPLOY_HAPROXY_PEM_NAME_DEFAULT="${_cdomain}.pem"
DEPLOY_HAPROXY_BUNDLE_DEFAULT="no"
DEPLOY_HAPROXY_ISSUER_DEFAULT="no"
DEPLOY_HAPROXY_RELOAD_DEFAULT="true"
DEPLOY_HAPROXY_HOT_UPDATE_DEFAULT="no"
DEPLOY_HAPROXY_STATS_SOCKET_DEFAULT="UNIX:/run/haproxy/admin.sock"
_debug _cdomain "${_cdomain}"
_debug _ckey "${_ckey}"
_debug _ccert "${_ccert}"
_debug _cca "${_cca}"
_debug _cfullchain "${_cfullchain}"
# PEM_PATH is optional. If not provided then assume "${DEPLOY_HAPROXY_PEM_PATH_DEFAULT}"
_getdeployconf DEPLOY_HAPROXY_PEM_PATH
_debug2 DEPLOY_HAPROXY_PEM_PATH "${DEPLOY_HAPROXY_PEM_PATH}"
if [ -n "${DEPLOY_HAPROXY_PEM_PATH}" ]; then
Le_Deploy_haproxy_pem_path="${DEPLOY_HAPROXY_PEM_PATH}"
_savedomainconf Le_Deploy_haproxy_pem_path "${Le_Deploy_haproxy_pem_path}"
elif [ -z "${Le_Deploy_haproxy_pem_path}" ]; then
Le_Deploy_haproxy_pem_path="${DEPLOY_HAPROXY_PEM_PATH_DEFAULT}"
fi
# Ensure PEM_PATH exists
if [ -d "${Le_Deploy_haproxy_pem_path}" ]; then
_debug "PEM_PATH ${Le_Deploy_haproxy_pem_path} exists"
else
_err "PEM_PATH ${Le_Deploy_haproxy_pem_path} does not exist"
return 1
fi
# PEM_NAME is optional. If not provided then assume "${DEPLOY_HAPROXY_PEM_NAME_DEFAULT}"
_getdeployconf DEPLOY_HAPROXY_PEM_NAME
_debug2 DEPLOY_HAPROXY_PEM_NAME "${DEPLOY_HAPROXY_PEM_NAME}"
if [ -n "${DEPLOY_HAPROXY_PEM_NAME}" ]; then
Le_Deploy_haproxy_pem_name="${DEPLOY_HAPROXY_PEM_NAME}"
_savedomainconf Le_Deploy_haproxy_pem_name "${Le_Deploy_haproxy_pem_name}"
elif [ -z "${Le_Deploy_haproxy_pem_name}" ]; then
Le_Deploy_haproxy_pem_name="${DEPLOY_HAPROXY_PEM_NAME_DEFAULT}"
# We better not have '*' as the first character
if [ "${Le_Deploy_haproxy_pem_name%%"${Le_Deploy_haproxy_pem_name#?}"}" = '*' ]; then
# removes the first characters and add a _ instead
Le_Deploy_haproxy_pem_name="_${Le_Deploy_haproxy_pem_name#?}"
fi
fi
# BUNDLE is optional. If not provided then assume "${DEPLOY_HAPROXY_BUNDLE_DEFAULT}"
_getdeployconf DEPLOY_HAPROXY_BUNDLE
_debug2 DEPLOY_HAPROXY_BUNDLE "${DEPLOY_HAPROXY_BUNDLE}"
if [ -n "${DEPLOY_HAPROXY_BUNDLE}" ]; then
Le_Deploy_haproxy_bundle="${DEPLOY_HAPROXY_BUNDLE}"
_savedomainconf Le_Deploy_haproxy_bundle "${Le_Deploy_haproxy_bundle}"
elif [ -z "${Le_Deploy_haproxy_bundle}" ]; then
Le_Deploy_haproxy_bundle="${DEPLOY_HAPROXY_BUNDLE_DEFAULT}"
fi
# ISSUER is optional. If not provided then assume "${DEPLOY_HAPROXY_ISSUER_DEFAULT}"
_getdeployconf DEPLOY_HAPROXY_ISSUER
_debug2 DEPLOY_HAPROXY_ISSUER "${DEPLOY_HAPROXY_ISSUER}"
if [ -n "${DEPLOY_HAPROXY_ISSUER}" ]; then
Le_Deploy_haproxy_issuer="${DEPLOY_HAPROXY_ISSUER}"
_savedomainconf Le_Deploy_haproxy_issuer "${Le_Deploy_haproxy_issuer}"
elif [ -z "${Le_Deploy_haproxy_issuer}" ]; then
Le_Deploy_haproxy_issuer="${DEPLOY_HAPROXY_ISSUER_DEFAULT}"
fi
# RELOAD is optional. If not provided then assume "${DEPLOY_HAPROXY_RELOAD_DEFAULT}"
_getdeployconf DEPLOY_HAPROXY_RELOAD
_debug2 DEPLOY_HAPROXY_RELOAD "${DEPLOY_HAPROXY_RELOAD}"
if [ -n "${DEPLOY_HAPROXY_RELOAD}" ]; then
Le_Deploy_haproxy_reload="${DEPLOY_HAPROXY_RELOAD}"
_savedomainconf Le_Deploy_haproxy_reload "${Le_Deploy_haproxy_reload}"
elif [ -z "${Le_Deploy_haproxy_reload}" ]; then
Le_Deploy_haproxy_reload="${DEPLOY_HAPROXY_RELOAD_DEFAULT}"
fi
# HOT_UPDATE is optional. If not provided then assume "${DEPLOY_HAPROXY_HOT_UPDATE_DEFAULT}"
_getdeployconf DEPLOY_HAPROXY_HOT_UPDATE
_debug2 DEPLOY_HAPROXY_HOT_UPDATE "${DEPLOY_HAPROXY_HOT_UPDATE}"
if [ -n "${DEPLOY_HAPROXY_HOT_UPDATE}" ]; then
Le_Deploy_haproxy_hot_update="${DEPLOY_HAPROXY_HOT_UPDATE}"
_savedomainconf Le_Deploy_haproxy_hot_update "${Le_Deploy_haproxy_hot_update}"
elif [ -z "${Le_Deploy_haproxy_hot_update}" ]; then
Le_Deploy_haproxy_hot_update="${DEPLOY_HAPROXY_HOT_UPDATE_DEFAULT}"
fi
# STATS_SOCKET is optional. If not provided then assume "${DEPLOY_HAPROXY_STATS_SOCKET_DEFAULT}"
_getdeployconf DEPLOY_HAPROXY_STATS_SOCKET
_debug2 DEPLOY_HAPROXY_STATS_SOCKET "${DEPLOY_HAPROXY_STATS_SOCKET}"
if [ -n "${DEPLOY_HAPROXY_STATS_SOCKET}" ]; then
Le_Deploy_haproxy_stats_socket="${DEPLOY_HAPROXY_STATS_SOCKET}"
_savedomainconf Le_Deploy_haproxy_stats_socket "${Le_Deploy_haproxy_stats_socket}"
elif [ -z "${Le_Deploy_haproxy_stats_socket}" ]; then
Le_Deploy_haproxy_stats_socket="${DEPLOY_HAPROXY_STATS_SOCKET_DEFAULT}"
fi
# MASTER_CLI is optional. No defaults are used. When the master CLI is used,
# all commands are sent with a prefix.
_getdeployconf DEPLOY_HAPROXY_MASTER_CLI
_debug2 DEPLOY_HAPROXY_MASTER_CLI "${DEPLOY_HAPROXY_MASTER_CLI}"
if [ -n "${DEPLOY_HAPROXY_MASTER_CLI}" ]; then
Le_Deploy_haproxy_stats_socket="${DEPLOY_HAPROXY_MASTER_CLI}"
_savedomainconf Le_Deploy_haproxy_stats_socket "${Le_Deploy_haproxy_stats_socket}"
_cmdpfx="@1 " # command prefix used for master CLI only.
fi
# Set the suffix depending if we are creating a bundle or not
if [ "${Le_Deploy_haproxy_bundle}" = "yes" ]; then
_info "Bundle creation requested"
# Initialise $Le_Keylength if its not already set
if [ -z "${Le_Keylength}" ]; then
Le_Keylength=""
fi
if _isEccKey "${Le_Keylength}"; then
_info "ECC key type detected"
_suffix=".ecdsa"
else
_info "RSA key type detected"
_suffix=".rsa"
fi
else
_suffix=""
fi
_debug _suffix "${_suffix}"
# Set variables for later
_pem="${Le_Deploy_haproxy_pem_path}/${Le_Deploy_haproxy_pem_name}${_suffix}"
_issuer="${_pem}.issuer"
_ocsp="${_pem}.ocsp"
_reload="${Le_Deploy_haproxy_reload}"
_statssock="${Le_Deploy_haproxy_stats_socket}"
_info "Deploying PEM file"
# Create a temporary PEM file
_temppem="$(_mktemp)"
_debug _temppem "${_temppem}"
cat "${_ccert}" "${_cca}" "${_ckey}" | grep . >"${_temppem}"
_ret="$?"
# Check that we could create the temporary file
if [ "${_ret}" != "0" ]; then
_err "Error code ${_ret} returned during PEM file creation"
[ -f "${_temppem}" ] && rm -f "${_temppem}"
return ${_ret}
fi
# Move PEM file into place
_info "Moving new certificate into place"
_debug _pem "${_pem}"
cat "${_temppem}" >"${_pem}"
_ret=$?
# Clean up temp file
[ -f "${_temppem}" ] && rm -f "${_temppem}"
# Deal with any failure of moving PEM file into place
if [ "${_ret}" != "0" ]; then
_err "Error code ${_ret} returned while moving new certificate into place"
return ${_ret}
fi
# Update .issuer file if requested
if [ "${Le_Deploy_haproxy_issuer}" = "yes" ]; then
_info "Updating .issuer file"
_debug _issuer "${_issuer}"
cat "${_cca}" >"${_issuer}"
_ret="$?"
if [ "${_ret}" != "0" ]; then
_err "Error code ${_ret} returned while copying issuer/CA certificate into place"
return ${_ret}
fi
else
[ -f "${_issuer}" ] && _err "Issuer file update not requested but .issuer file exists"
fi
# Update .ocsp file if certificate was requested with --ocsp/--ocsp-must-staple option
if [ -z "${Le_OCSP_Staple}" ]; then
Le_OCSP_Staple="0"
fi
if [ "${Le_OCSP_Staple}" = "1" ]; then
_info "Updating OCSP stapling info"
_debug _ocsp "${_ocsp}"
_info "Extracting OCSP URL"
_ocsp_url=$(${ACME_OPENSSL_BIN:-openssl} x509 -noout -ocsp_uri -in "${_pem}")
_debug _ocsp_url "${_ocsp_url}"
# Only process OCSP if URL was present
if [ "${_ocsp_url}" != "" ]; then
# Extract the hostname from the OCSP URL
_info "Extracting OCSP URL"
_ocsp_host=$(echo "${_ocsp_url}" | cut -d/ -f3)
_debug _ocsp_host "${_ocsp_host}"
# Only process the certificate if we have a .issuer file
if [ -r "${_issuer}" ]; then
# Check if issuer cert is also a root CA cert
_subjectdn=$(${ACME_OPENSSL_BIN:-openssl} x509 -in "${_issuer}" -subject -noout | cut -d'/' -f2,3,4,5,6,7,8,9,10)
_debug _subjectdn "${_subjectdn}"
_issuerdn=$(${ACME_OPENSSL_BIN:-openssl} x509 -in "${_issuer}" -issuer -noout | cut -d'/' -f2,3,4,5,6,7,8,9,10)
_debug _issuerdn "${_issuerdn}"
_info "Requesting OCSP response"
# If the issuer is a CA cert then our command line has "-CAfile" added
if [ "${_subjectdn}" = "${_issuerdn}" ]; then
_cafile_argument="-CAfile \"${_issuer}\""
else
_cafile_argument=""
fi
_debug _cafile_argument "${_cafile_argument}"
# if OpenSSL/LibreSSL is v1.1 or above, the format for the -header option has changed
_openssl_version=$(${ACME_OPENSSL_BIN:-openssl} version | cut -d' ' -f2)
_debug _openssl_version "${_openssl_version}"
_openssl_major=$(echo "${_openssl_version}" | cut -d '.' -f1)
_openssl_minor=$(echo "${_openssl_version}" | cut -d '.' -f2)
if [ "${_openssl_major}" -eq "1" ] && [ "${_openssl_minor}" -ge "1" ] || [ "${_openssl_major}" -ge "2" ]; then
_header_sep="="
else
_header_sep=" "
fi
# Request the OCSP response from the issuer and store it
_openssl_ocsp_cmd="${ACME_OPENSSL_BIN:-openssl} ocsp \
-issuer \"${_issuer}\" \
-cert \"${_pem}\" \
-url \"${_ocsp_url}\" \
-header Host${_header_sep}\"${_ocsp_host}\" \
-respout \"${_ocsp}\" \
-verify_other \"${_issuer}\" \
${_cafile_argument} \
| grep -q \"${_pem}: good\""
_debug _openssl_ocsp_cmd "${_openssl_ocsp_cmd}"
eval "${_openssl_ocsp_cmd}"
_ret=$?
else
# Non fatal: No issuer file was present so no OCSP stapling file created
_err "OCSP stapling in use but no .issuer file was present"
fi
else
# Non fatal: No OCSP url was found int the certificate
_err "OCSP update requested but no OCSP URL was found in certificate"
fi
# Non fatal: Check return code of openssl command
if [ "${_ret}" != "0" ]; then
_err "Updating OCSP stapling failed with return code ${_ret}"
fi
else
# An OCSP file was already present but certificate did not have OCSP extension
if [ -f "${_ocsp}" ]; then
_err "OCSP was not requested but .ocsp file exists."
# Could remove the file at this step, although HAProxy just ignores it in this case
# rm -f "${_ocsp}" || _err "Problem removing stale .ocsp file"
fi
fi
if [ "${Le_Deploy_haproxy_hot_update}" = "yes" ]; then
# set the socket name for messages
if [ -n "${_cmdpfx}" ]; then
_socketname="master CLI"
else
_socketname="stats socket"
fi
# Update certificate over HAProxy stats socket or master CLI.
if _exists socat; then
# look for the certificate on the stats socket, to chose between updating or creating one
_socat_cert_cmd="echo '${_cmdpfx}show ssl cert' | socat '${_statssock}' - | grep -q '^${_pem}$'"
_debug _socat_cert_cmd "${_socat_cert_cmd}"
eval "${_socat_cert_cmd}"
_ret=$?
if [ "${_ret}" != "0" ]; then
_newcert="1"
_info "Creating new certificate '${_pem}' over HAProxy ${_socketname}."
# certificate wasn't found, it's a new one. We should check if the crt-list exists and creates/inserts the certificate.
_socat_crtlist_show_cmd="echo '${_cmdpfx}show ssl crt-list' | socat '${_statssock}' - | grep -q '^${Le_Deploy_haproxy_pem_path}$'"
_debug _socat_crtlist_show_cmd "${_socat_crtlist_show_cmd}"
eval "${_socat_crtlist_show_cmd}"
_ret=$?
if [ "${_ret}" != "0" ]; then
_err "Couldn't find '${Le_Deploy_haproxy_pem_path}' in haproxy 'show ssl crt-list'"
return "${_ret}"
fi
# create a new certificate
_socat_new_cmd="echo '${_cmdpfx}new ssl cert ${_pem}' | socat '${_statssock}' - | grep -q 'New empty'"
_debug _socat_new_cmd "${_socat_new_cmd}"
eval "${_socat_new_cmd}"
_ret=$?
if [ "${_ret}" != "0" ]; then
_err "Couldn't create '${_pem}' in haproxy"
return "${_ret}"
fi
else
_info "Update existing certificate '${_pem}' over HAProxy ${_socketname}."
fi
_socat_cert_set_cmd="echo -e '${_cmdpfx}set ssl cert ${_pem} <<\n$(cat "${_pem}")\n' | socat '${_statssock}' - | grep -q 'Transaction created'"
_debug _socat_cert_set_cmd "${_socat_cert_set_cmd}"
eval "${_socat_cert_set_cmd}"
_ret=$?
if [ "${_ret}" != "0" ]; then
_err "Can't update '${_pem}' in haproxy"
return "${_ret}"
fi
_socat_cert_commit_cmd="echo '${_cmdpfx}commit ssl cert ${_pem}' | socat '${_statssock}' - | grep -q '^Success!$'"
_debug _socat_cert_commit_cmd "${_socat_cert_commit_cmd}"
eval "${_socat_cert_commit_cmd}"
_ret=$?
if [ "${_ret}" != "0" ]; then
_err "Can't commit '${_pem}' in haproxy"
return ${_ret}
fi
if [ "${_newcert}" = "1" ]; then
# if this is a new certificate, it needs to be inserted into the crt-list`
_socat_cert_add_cmd="echo '${_cmdpfx}add ssl crt-list ${Le_Deploy_haproxy_pem_path} ${_pem}' | socat '${_statssock}' - | grep -q 'Success!'"
_debug _socat_cert_add_cmd "${_socat_cert_add_cmd}"
eval "${_socat_cert_add_cmd}"
_ret=$?
if [ "${_ret}" != "0" ]; then
_err "Can't update '${_pem}' in haproxy"
return "${_ret}"
fi
fi
else
_err "'socat' is not available, couldn't update over ${_socketname}"
fi
else
# Reload HAProxy
_debug _reload "${_reload}"
eval "${_reload}"
_ret=$?
if [ "${_ret}" != "0" ]; then
_err "Error code ${_ret} during reload"
return ${_ret}
else
_info "Reload successful"
fi
fi
return 0
}

View File

@ -1,235 +0,0 @@
#!/bin/bash
#
# Dump certificates from the HAProxy stats or master socket to the filesystem
# Experimental script
#
set -e
export BASEPATH=${BASEPATH:-/etc/haproxy}/
export SOCKET=${SOCKET:-/var/run/haproxy-master.sock}
export DRY_RUN=0
export DEBUG=
export VERBOSE=
export M="@1 "
export TMP
vecho() {
[ -n "$VERBOSE" ] && echo "$@"
return 0
}
read_certificate() {
name=$1
crt_filename=
key_filename=
OFS=$IFS
IFS=":"
while read -r key value; do
case "$key" in
"Crt filename")
crt_filename="${value# }"
key_filename="${value# }"
;;
"Key filename")
key_filename="${value# }"
;;
esac
done < <(echo "${M}show ssl cert ${name}" | socat "${SOCKET}" -)
IFS=$OFS
if [ -z "$crt_filename" ] || [ -z "$key_filename" ]; then
return 1
fi
# handle fields without a crt-base/key-base
[ "${crt_filename:0:1}" != "/" ] && crt_filename="${BASEPATH}${crt_filename}"
[ "${key_filename:0:1}" != "/" ] && key_filename="${BASEPATH}${key_filename}"
vecho "name:$name"
vecho "crt:$crt_filename"
vecho "key:$key_filename"
export NAME="$name"
export CRT_FILENAME="$crt_filename"
export KEY_FILENAME="$key_filename"
return 0
}
cmp_certkey() {
prev=$1
new=$2
if [ ! -f "$prev" ]; then
return 1;
fi
if ! cmp -s <(openssl x509 -in "$prev" -noout -fingerprint -sha256) <(openssl x509 -in "$new" -noout -fingerprint -sha256); then
return 1
fi
return 0
}
dump_certificate() {
name=$1
prev_crt=$2
prev_key=$3
r="tmp.${RANDOM}"
d="old.$(date +%s)"
new_crt="$TMP/$(basename "$prev_crt").${r}"
new_key="$TMP/$(basename "$prev_key").${r}"
if ! touch "${new_crt}" || ! touch "${new_key}"; then
echo "[ALERT] ($$) : can't dump \"$name\", can't create tmp files" >&2
return 1
fi
echo "${M}dump ssl cert ${name}" | socat "${SOCKET}" - | openssl pkey >> "${new_key}"
# use crl2pkcs7 as a way to dump multiple x509, storeutl could be used in modern versions of openssl
echo "${M}dump ssl cert ${name}" | socat "${SOCKET}" - | openssl crl2pkcs7 -nocrl -certfile /dev/stdin | openssl pkcs7 -print_certs >> "${new_crt}"
if ! cmp -s <(openssl x509 -in "${new_crt}" -pubkey -noout) <(openssl pkey -in "${new_key}" -pubout); then
echo "[ALERT] ($$) : Private key \"${new_key}\" and public key \"${new_crt}\" don't match" >&2
return 1
fi
if cmp_certkey "${prev_crt}" "${new_crt}"; then
echo "[NOTICE] ($$) : ${crt_filename} is already up to date" >&2
return 0
fi
# dry run will just return before trying to move the files
if [ "${DRY_RUN}" != "0" ]; then
return 0
fi
# move the current certificates to ".old.timestamp"
if [ -f "${prev_crt}" ] && [ -f "${prev_key}" ]; then
mv "${prev_crt}" "${prev_crt}.${d}"
[ "${prev_crt}" != "${prev_key}" ] && mv "${prev_key}" "${prev_key}.${d}"
fi
# move the new certificates to old place
mv "${new_crt}" "${prev_crt}"
[ "${prev_crt}" != "${prev_key}" ] && mv "${new_key}" "${prev_key}"
return 0
}
dump_all_certificates() {
echo "${M}show ssl cert" | socat "${SOCKET}" - | grep -v '^#' | grep -v '^$' | while read -r line; do
export NAME
export CRT_FILENAME
export KEY_FILENAME
if read_certificate "$line"; then
dump_certificate "$NAME" "$CRT_FILENAME" "$KEY_FILENAME"
else
echo "[WARNING] ($$) : can't dump \"$name\", crt/key filename details not found in \"show ssl cert\"" >&2
fi
done
}
usage() {
echo "Usage:"
echo " $0 [options]* [cert]*"
echo ""
echo " Dump certificates from the HAProxy stats or master socket to the filesystem"
echo " Require socat and openssl"
echo " EXPERIMENTAL script, backup your files!"
echo " The script will move your previous files to FILE.old.unixtimestamp (ex: foo.com.pem.old.1759044998)"
echo ""
echo "Options:"
echo " -S, --master-socket <path> Use the master socket at <path> (default: ${SOCKET})"
echo " -s, --socket <path> Use the stats socket at <path>"
echo " -p, --path <path> Specifiy a base path for relative files (default: ${BASEPATH})"
echo " -n, --dry-run Read certificates on the socket but don't dump them"
echo " -d, --debug Debug mode, set -x"
echo " -v, --verbose Verbose mode"
echo " -h, --help This help"
echo " -- End of options"
echo ""
echo "Examples:"
echo " $0 -v -p ${BASEPATH} -S ${SOCKET}"
echo " $0 -v -p ${BASEPATH} -S ${SOCKET} bar.com.rsa.pem"
echo " $0 -v -p ${BASEPATH} -S ${SOCKET} -- foo.com.ecdsa.pem bar.com.rsa.pem"
}
main() {
while [ -n "$1" ]; do
case "$1" in
-S|--master-socket)
SOCKET="$2"
M="@1 "
shift 2
;;
-s|--socket)
SOCKET="$2"
M=
shift 2
;;
-p|--path)
BASEPATH="$2/"
shift 2
;;
-n|--dry-run)
DRY_RUN=1
shift
;;
-d|--debug)
DEBUG=1
shift
;;
-v|--verbose)
VERBOSE=1
shift
;;
-h|--help)
usage "$@"
exit 0
;;
--)
shift
break
;;
-*)
echo "[ALERT] ($$) : Unknown option '$1'" >&2
usage "$@"
exit 1
;;
*)
break
;;
esac
done
if [ -n "$DEBUG" ]; then
set -x
fi
TMP=${TMP:-$(mktemp -d)}
if [ -z "$1" ]; then
dump_all_certificates
else
# compute the certificates names at the end of the command
while [ -n "$1" ]; do
if ! read_certificate "$1"; then
echo "[ALERT] ($$) : can't dump \"$1\", crt/key filename details not found in \"show ssl cert\"" >&2
exit 1
fi
[ "${DRY_RUN}" = "0" ] && dump_certificate "$NAME" "$CRT_FILENAME" "$KEY_FILENAME"
shift
done
fi
}
trap 'rm -rf -- "$TMP"' EXIT
main "$@"

View File

@ -1,113 +0,0 @@
#!/bin/bash
set -e
export VERBOSE=1
export TIMEOUT=90
export MASTER_SOCKET=${MASTER_SOCKET:-/var/run/haproxy-master.sock}
export RET=
alert() {
if [ "$VERBOSE" -ge "1" ]; then
echo "[ALERT] $*" >&2
fi
}
reload() {
while read -r line; do
if [ "$line" = "Success=0" ]; then
RET=1
elif [ "$line" = "Success=1" ]; then
RET=0
elif [ "$line" = "Another reload is still in progress." ]; then
alert "$line"
elif [ "$line" = "--" ]; then
continue;
else
if [ "$RET" = 1 ] && [ "$VERBOSE" = "2" ]; then
echo "$line" >&2
elif [ "$VERBOSE" = "3" ]; then
echo "$line" >&2
fi
fi
done < <(echo "reload" | socat -t"${TIMEOUT}" "${MASTER_SOCKET}" -)
if [ -z "$RET" ]; then
alert "Couldn't finish the reload before the timeout (${TIMEOUT})."
return 1
fi
return "$RET"
}
usage() {
echo "Usage:"
echo " $0 [options]*"
echo ""
echo " Trigger a reload from the master socket"
echo " Require socat"
echo " EXPERIMENTAL script!"
echo ""
echo "Options:"
echo " -S, --master-socket <path> Use the master socket at <path> (default: ${MASTER_SOCKET})"
echo " -d, --debug Debug mode, set -x"
echo " -t, --timeout Timeout (socat -t) (default: ${TIMEOUT})"
echo " -s, --silent Silent mode (no output)"
echo " -v, --verbose Verbose output (output from haproxy on failure)"
echo " -vv Even more verbose output (output from haproxy on success and failure)"
echo " -h, --help This help"
echo ""
echo "Examples:"
echo " $0 -S ${MASTER_SOCKET} -d ${TIMEOUT}"
}
main() {
while [ -n "$1" ]; do
case "$1" in
-S|--master-socket)
MASTER_SOCKET="$2"
shift 2
;;
-t|--timeout)
TIMEOUT="$2"
shift 2
;;
-s|--silent)
VERBOSE=0
shift
;;
-v|--verbose)
VERBOSE=2
shift
;;
-vv|--verbose)
VERBOSE=3
shift
;;
-d|--debug)
DEBUG=1
shift
;;
-h|--help)
usage "$@"
exit 0
;;
*)
echo "[ALERT] ($$) : Unknown option '$1'" >&2
usage "$@"
exit 1
;;
esac
done
if [ -n "$DEBUG" ]; then
set -x
fi
}
main "$@"
reload

View File

@ -123,22 +123,6 @@ struct url_stat {
#define FILT2_PRESERVE_QUERY 0x02 #define FILT2_PRESERVE_QUERY 0x02
#define FILT2_EXTRACT_CAPTURE 0x04 #define FILT2_EXTRACT_CAPTURE 0x04
#define FILT_OUTPUT_FMT (FILT_COUNT_ONLY| \
FILT_COUNT_STATUS| \
FILT_COUNT_SRV_STATUS| \
FILT_COUNT_COOK_CODES| \
FILT_COUNT_TERM_CODES| \
FILT_COUNT_URL_ONLY| \
FILT_COUNT_URL_COUNT| \
FILT_COUNT_URL_ERR| \
FILT_COUNT_URL_TAVG| \
FILT_COUNT_URL_TTOT| \
FILT_COUNT_URL_TAVGO| \
FILT_COUNT_URL_TTOTO| \
FILT_COUNT_URL_BAVG| \
FILT_COUNT_URL_BTOT| \
FILT_COUNT_IP_COUNT)
unsigned int filter = 0; unsigned int filter = 0;
unsigned int filter2 = 0; unsigned int filter2 = 0;
unsigned int filter_invert = 0; unsigned int filter_invert = 0;
@ -208,7 +192,7 @@ void help()
" you can also use -n to start from earlier then field %d\n" " you can also use -n to start from earlier then field %d\n"
" -query preserve the query string for per-URL (-u*) statistics\n" " -query preserve the query string for per-URL (-u*) statistics\n"
"\n" "\n"
"Output format - **only one** may be used at a time\n" "Output format - only one may be used at a time\n"
" -c only report the number of lines that would have been printed\n" " -c only report the number of lines that would have been printed\n"
" -pct output connect and response times percentiles\n" " -pct output connect and response times percentiles\n"
" -st output number of requests per HTTP status code\n" " -st output number of requests per HTTP status code\n"
@ -914,9 +898,6 @@ int main(int argc, char **argv)
if (!filter && !filter2) if (!filter && !filter2)
die("No action specified.\n"); die("No action specified.\n");
if ((filter & FILT_OUTPUT_FMT) & ((filter & FILT_OUTPUT_FMT) - 1))
die("Please, set only one output filter.\n");
if (filter & FILT_ACC_COUNT && !filter_acc_count) if (filter & FILT_ACC_COUNT && !filter_acc_count)
filter_acc_count=1; filter_acc_count=1;
@ -1571,10 +1552,6 @@ void filter_count_srv_status(const char *accept_field, const char *time_field, s
if (!srv_node) { if (!srv_node) {
/* server not yet in the tree, let's create it */ /* server not yet in the tree, let's create it */
srv = (void *)calloc(1, sizeof(struct srv_st) + e - b + 1); srv = (void *)calloc(1, sizeof(struct srv_st) + e - b + 1);
if (unlikely(!srv)) {
fprintf(stderr, "%s: not enough memory\n", __FUNCTION__);
exit(1);
}
srv_node = &srv->node; srv_node = &srv->node;
memcpy(&srv_node->key, b, e - b); memcpy(&srv_node->key, b, e - b);
srv_node->key[e - b] = '\0'; srv_node->key[e - b] = '\0';
@ -1684,10 +1661,6 @@ void filter_count_url(const char *accept_field, const char *time_field, struct t
*/ */
if (unlikely(!ustat)) if (unlikely(!ustat))
ustat = calloc(1, sizeof(*ustat)); ustat = calloc(1, sizeof(*ustat));
if (unlikely(!ustat)) {
fprintf(stderr, "%s: not enough memory\n", __FUNCTION__);
exit(1);
}
ustat->nb_err = err; ustat->nb_err = err;
ustat->nb_req = 1; ustat->nb_req = 1;

View File

@ -7,21 +7,6 @@ the queue.
## Requirements ## Requirements
- Python 3.x - Python 3.x
- [lxml](https://lxml.de/installation.html) - [lxml](https://lxml.de/installation.html)
- requests
- urllib3
## Installation
It can be easily installed with venv from python3
$ python3 -m venv ~/.local/venvs/stable-bot/
$ source ~/.local/venvs/stable-bot/bin/activate
$ pip install -r requirements.txt
And can be executed with:
$ ~/.local/venvs/stable-bot/bin/python release-estimator.py
## Usage ## Usage

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/python3
# #
# Release estimator for HAProxy # Release estimator for HAProxy
# #
@ -16,7 +16,6 @@
# #
from lxml import html from lxml import html
from urllib.parse import urljoin
import requests import requests
import traceback import traceback
import smtplib import smtplib
@ -191,7 +190,6 @@ This is a friendly bot that watches fixes pending for the next haproxy-stable re
# parse out the CHANGELOG link # parse out the CHANGELOG link
CHANGELOG = tree.xpath('//a[contains(@href,"CHANGELOG")]/@href')[0] CHANGELOG = tree.xpath('//a[contains(@href,"CHANGELOG")]/@href')[0]
CHANGELOG = urljoin("https://", CHANGELOG)
last_version = tree.xpath('//td[contains(text(), "last")]/../td/a/text()')[0] last_version = tree.xpath('//td[contains(text(), "last")]/../td/a/text()')[0]
first_version = "%s.0" % (version) first_version = "%s.0" % (version)

View File

@ -1,3 +0,0 @@
lxml
requests
urllib3

View File

@ -6,9 +6,9 @@ Wants=network-online.target
[Service] [Service]
EnvironmentFile=-/etc/default/haproxy EnvironmentFile=-/etc/default/haproxy
EnvironmentFile=-/etc/sysconfig/haproxy EnvironmentFile=-/etc/sysconfig/haproxy
Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/run/haproxy.pid" "CFGDIR=/etc/haproxy/conf.d" "EXTRAOPTS=-S /run/haproxy-master.sock" Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/run/haproxy.pid" "EXTRAOPTS=-S /run/haproxy-master.sock"
ExecStart=@SBINDIR@/haproxy -Ws -f $CONFIG -f $CFGDIR -p $PIDFILE $EXTRAOPTS ExecStart=@SBINDIR@/haproxy -Ws -f $CONFIG -p $PIDFILE $EXTRAOPTS
ExecReload=@SBINDIR@/haproxy -Ws -f $CONFIG -f $CFGDIR -c $EXTRAOPTS ExecReload=@SBINDIR@/haproxy -Ws -f $CONFIG -c $EXTRAOPTS
ExecReload=/bin/kill -USR2 $MAINPID ExecReload=/bin/kill -USR2 $MAINPID
KillMode=mixed KillMode=mixed
Restart=always Restart=always

View File

@ -1,34 +0,0 @@
// find calls to calloc
@call@
expression ptr;
position p;
@@
ptr@p = calloc(...);
// find ok calls to calloc
@ok@
expression ptr;
position call.p;
@@
ptr@p = calloc(...);
... when != ptr
(
(ptr == NULL || ...)
|
(ptr == 0 || ...)
|
(ptr != NULL || ...)
|
(ptr != 0 || ...)
)
// fix bad calls to calloc
@depends on !ok@
expression ptr;
position call.p;
@@
ptr@p = calloc(...);
+ if (ptr == NULL) return;

View File

@ -1,34 +0,0 @@
// find calls to malloc
@call@
expression ptr;
position p;
@@
ptr@p = malloc(...);
// find ok calls to malloc
@ok@
expression ptr;
position call.p;
@@
ptr@p = malloc(...);
... when != ptr
(
(ptr == NULL || ...)
|
(ptr == 0 || ...)
|
(ptr != NULL || ...)
|
(ptr != 0 || ...)
)
// fix bad calls to malloc
@depends on !ok@
expression ptr;
position call.p;
@@
ptr@p = malloc(...);
+ if (ptr == NULL) return;

View File

@ -1,34 +0,0 @@
// find calls to strdup
@call@
expression ptr;
position p;
@@
ptr@p = strdup(...);
// find ok calls to strdup
@ok@
expression ptr;
position call.p;
@@
ptr@p = strdup(...);
... when != ptr
(
(ptr == NULL || ...)
|
(ptr == 0 || ...)
|
(ptr != NULL || ...)
|
(ptr != 0 || ...)
)
// fix bad calls to strdup
@depends on !ok@
expression ptr;
position call.p;
@@
ptr@p = strdup(...);
+ if (ptr == NULL) return;

View File

@ -4,7 +4,6 @@
/* make the include files below expose their flags */ /* make the include files below expose their flags */
#define HA_EXPOSE_FLAGS #define HA_EXPOSE_FLAGS
#include <haproxy/applet-t.h>
#include <haproxy/channel-t.h> #include <haproxy/channel-t.h>
#include <haproxy/connection-t.h> #include <haproxy/connection-t.h>
#include <haproxy/fd-t.h> #include <haproxy/fd-t.h>
@ -13,10 +12,7 @@
#include <haproxy/mux_fcgi-t.h> #include <haproxy/mux_fcgi-t.h>
#include <haproxy/mux_h2-t.h> #include <haproxy/mux_h2-t.h>
#include <haproxy/mux_h1-t.h> #include <haproxy/mux_h1-t.h>
#include <haproxy/mux_quic-t.h>
#include <haproxy/mux_spop-t.h>
#include <haproxy/peers-t.h> #include <haproxy/peers-t.h>
#include <haproxy/quic_conn-t.h>
#include <haproxy/stconn-t.h> #include <haproxy/stconn-t.h>
#include <haproxy/stream-t.h> #include <haproxy/stream-t.h>
#include <haproxy/task-t.h> #include <haproxy/task-t.h>
@ -43,17 +39,11 @@
#define SHOW_AS_FSTRM 0x00040000 #define SHOW_AS_FSTRM 0x00040000
#define SHOW_AS_PEERS 0x00080000 #define SHOW_AS_PEERS 0x00080000
#define SHOW_AS_PEER 0x00100000 #define SHOW_AS_PEER 0x00100000
#define SHOW_AS_QC 0x00200000
#define SHOW_AS_SPOPC 0x00400000
#define SHOW_AS_SPOPS 0x00800000
#define SHOW_AS_QCC 0x01000000
#define SHOW_AS_QCS 0x02000000
#define SHOW_AS_APPCTX 0x04000000
// command line names, must be in exact same order as the SHOW_AS_* flags above // command line names, must be in exact same order as the SHOW_AS_* flags above
// so that show_as_words[i] matches flag 1U<<i. // so that show_as_words[i] matches flag 1U<<i.
const char *show_as_words[] = { "ana", "chn", "conn", "sc", "stet", "strm", "task", "txn", "sd", "hsl", "htx", "hmsg", "fd", "h2c", "h2s", "h1c", "h1s", "fconn", "fstrm", const char *show_as_words[] = { "ana", "chn", "conn", "sc", "stet", "strm", "task", "txn", "sd", "hsl", "htx", "hmsg", "fd", "h2c", "h2s", "h1c", "h1s", "fconn", "fstrm",
"peers", "peer", "qc", "spopc", "spops", "qcc", "qcs", "appctx"}; "peers", "peer"};
/* will be sufficient for even largest flag names */ /* will be sufficient for even largest flag names */
static char buf[4096]; static char buf[4096];
@ -168,12 +158,6 @@ int main(int argc, char **argv)
if (show_as & SHOW_AS_FSTRM) printf("fstrm->flags = %s\n",(fstrm_show_flags (buf, bsz, " | ", flags), buf)); if (show_as & SHOW_AS_FSTRM) printf("fstrm->flags = %s\n",(fstrm_show_flags (buf, bsz, " | ", flags), buf));
if (show_as & SHOW_AS_PEERS) printf("peers->flags = %s\n",(peers_show_flags (buf, bsz, " | ", flags), buf)); if (show_as & SHOW_AS_PEERS) printf("peers->flags = %s\n",(peers_show_flags (buf, bsz, " | ", flags), buf));
if (show_as & SHOW_AS_PEER) printf("peer->flags = %s\n", (peer_show_flags (buf, bsz, " | ", flags), buf)); if (show_as & SHOW_AS_PEER) printf("peer->flags = %s\n", (peer_show_flags (buf, bsz, " | ", flags), buf));
if (show_as & SHOW_AS_QC) printf("qc->flags = %s\n", (qc_show_flags (buf, bsz, " | ", flags), buf));
if (show_as & SHOW_AS_SPOPC) printf("spopc->flags = %s\n",(spop_conn_show_flags(buf, bsz, " | ", flags), buf));
if (show_as & SHOW_AS_SPOPS) printf("spops->flags = %s\n",(spop_strm_show_flags(buf, bsz, " | ", flags), buf));
if (show_as & SHOW_AS_QCC) printf("qcc->flags = %s\n", (qcc_show_flags (buf, bsz, " | ", flags), buf));
if (show_as & SHOW_AS_QCS) printf("qcs->flags = %s\n", (qcs_show_flags (buf, bsz, " | ", flags), buf));
if (show_as & SHOW_AS_APPCTX) printf("appctx->flags = %s\n", (appctx_show_flags(buf, bsz, " | ", flags), buf));
} }
return 0; return 0;
} }

View File

@ -1,2 +1,2 @@
#!/bin/sh #!/bin/sh
grep -o 'cflg=[0-9a-fx]*' | sort | uniq -c | sort -nr | while read a b; do c=${b##*=}; d=$(${0%/*}/flags conn $c);d=${d##*= }; printf "%6d %s %s\n" $a "$b" "$d";done awk '{print $12}' | grep cflg= | sort | uniq -c | sort -nr | while read a b; do c=${b##*=}; d=$(${0%/*}/flags conn $c);d=${d##*= }; printf "%6d %s %s\n" $a "$b" "$d";done

View File

@ -195,7 +195,7 @@ while read -r; do
! [[ "$REPLY" =~ [[:blank:]]h2c.*\.flg=([0-9a-fx]*) ]] || append_flag b.h2c.flg h2c "${BASH_REMATCH[1]}" ! [[ "$REPLY" =~ [[:blank:]]h2c.*\.flg=([0-9a-fx]*) ]] || append_flag b.h2c.flg h2c "${BASH_REMATCH[1]}"
elif [ $ctx = cob ]; then elif [ $ctx = cob ]; then
! [[ "$REPLY" =~ [[:blank:]]flags=([0-9a-fx]*) ]] || append_flag b.co.flg conn "${BASH_REMATCH[1]}" ! [[ "$REPLY" =~ [[:blank:]]flags=([0-9a-fx]*) ]] || append_flag b.co.flg conn "${BASH_REMATCH[1]}"
! [[ "$REPLY" =~ [[:blank:]]fd.state=([0-9a-fx]*) ]] || append_flag b.co.fd.st fd 0x"${BASH_REMATCH[1]}" ! [[ "$REPLY" =~ [[:blank:]]fd.state=([0-9a-fx]*) ]] || append_flag b.co.fd.st fd "${BASH_REMATCH[1]}"
elif [ $ctx = res ]; then elif [ $ctx = res ]; then
! [[ "$REPLY" =~ [[:blank:]]\(f=([0-9a-fx]*) ]] || append_flag res.flg chn "${BASH_REMATCH[1]}" ! [[ "$REPLY" =~ [[:blank:]]\(f=([0-9a-fx]*) ]] || append_flag res.flg chn "${BASH_REMATCH[1]}"
! [[ "$REPLY" =~ [[:blank:]]an=([0-9a-fx]*) ]] || append_flag res.ana ana "${BASH_REMATCH[1]}" ! [[ "$REPLY" =~ [[:blank:]]an=([0-9a-fx]*) ]] || append_flag res.ana ana "${BASH_REMATCH[1]}"

View File

@ -1,118 +0,0 @@
# sets $tag and $node from $arg0, for internal use only
define _ebtree_set_tag_node
set $tag = (unsigned long)$arg0 & 0x1
set $node = (unsigned long)$arg0 & 0xfffffffffffffffe
set $node = (struct eb_node *)$node
end
# get root from any node (leaf of node), returns in $node
define ebtree_root
set $node = (struct eb_root *)$arg0->node_p
if $node == 0
# sole node
set $node = (struct eb_root *)$arg0->leaf_p
end
# walk up
while 1
_ebtree_set_tag_node $node
if $node->branches.b[1] == 0
break
end
set $node = $node->node_p
end
# root returned in $node
end
# returns $node filled with the first node of ebroot $arg0
define ebtree_first
# browse ebtree left until encountering leaf
set $node = (struct eb_node *)$arg0->b[0]
while 1
_ebtree_set_tag_node $node
if $tag == 0
loop_break
end
set $node = (struct eb_root *)$node->branches.b[0]
end
# extract last node
_ebtree_set_tag_node $node
end
# finds next ebtree node after $arg0, and returns it in $node
define ebtree_next
# get parent
set $node = (struct eb_root *)$arg0->leaf_p
# Walking up from right branch, so we cannot be below root
# while (eb_gettag(t) != EB_LEFT) // #define EB_LEFT 0
while 1
_ebtree_set_tag_node $node
if $tag == 0
loop_break
end
set $node = (struct eb_root *)$node->node_p
end
set $node = (struct eb_root *)$node->branches.b[1]
# walk down (left side => 0)
# while (eb_gettag(start) == EB_NODE) // #define EB_NODE 1
while 1
_ebtree_set_tag_node $node
if $node == 0
loop_break
end
if $tag != 1
loop_break
end
set $node = (struct eb_root *)$node->branches.b[0]
end
end
# sets $tag and $node from $arg0, for internal use only
define _ebsctree_set_tag_node
set $tag = (unsigned long)$arg0 & 0x1
set $node = (unsigned long)$arg0 & 0xfffffffffffffffe
set $node = (struct eb32sc_node *)$node
end
# returns $node filled with the first node of ebroot $arg0
define ebsctree_first
# browse ebsctree left until encountering leaf
set $node = (struct eb32sc_node *)$arg0->b[0]
while 1
_ebsctree_set_tag_node $node
if $tag == 0
loop_break
end
set $node = (struct eb_root *)$node->branches.b[0]
end
# extract last node
_ebsctree_set_tag_node $node
end
# finds next ebtree node after $arg0, and returns it in $node
define ebsctree_next
# get parent
set $node = (struct eb_root *)$arg0->node.leaf_p
# Walking up from right branch, so we cannot be below root
# while (eb_gettag(t) != EB_LEFT) // #define EB_LEFT 0
while 1
_ebsctree_set_tag_node $node
if $tag == 0
loop_break
end
set $node = (struct eb_root *)$node->node.node_p
end
set $node = (struct eb_root *)$node->node.branches.b[1]
# walk down (left side => 0)
# while (eb_gettag(start) == EB_NODE) // #define EB_NODE 1
while 1
_ebsctree_set_tag_node $node
if $node == 0
loop_break
end
if $tag != 1
loop_break
end
set $node = (struct eb_root *)$node->node.branches.b[0]
end
end

View File

@ -1,26 +0,0 @@
# lists entries starting at list head $arg0
define list_dump
set $h = $arg0
set $p = *(void **)$h
while ($p != $h)
printf "%#lx\n", $p
if ($p == 0)
loop_break
end
set $p = *(void **)$p
end
end
# list all entries starting at list head $arg0 until meeting $arg1
define list_find
set $h = $arg0
set $k = $arg1
set $p = *(void **)$h
while ($p != $h)
printf "%#lx\n", $p
if ($p == 0 || $p == $k)
loop_break
end
set $p = *(void **)$p
end
end

View File

@ -1,19 +0,0 @@
# show non-null memprofile entries with method, alloc/free counts/tot and caller
define memprof_dump
set $i = 0
set $meth={ "UNKN", "MALL", "CALL", "REAL", "STRD", "FREE", "P_AL", "P_FR", "STND", "VALL", "ALAL", "PALG", "MALG", "PVAL" }
while $i < sizeof(memprof_stats) / sizeof(memprof_stats[0])
if memprof_stats[$i].alloc_calls || memprof_stats[$i].free_calls
set $m = memprof_stats[$i].method
printf "m:%s ac:%u fc:%u at:%u ft:%u ", $meth[$m], \
memprof_stats[$i].alloc_calls, memprof_stats[$i].free_calls, \
memprof_stats[$i].alloc_tot, memprof_stats[$i].free_tot
output/a memprof_stats[$i].caller
printf "\n"
end
set $i = $i + 1
end
end

View File

@ -1,21 +0,0 @@
# dump pool contents (2.9 and above, with buckets)
define pools_dump
set $h = $po
set $p = *(void **)$h
while ($p != $h)
set $e = (struct pool_head *)(((char *)$p) - (unsigned long)&((struct pool_head *)0)->list)
set $total = 0
set $used = 0
set $idx = 0
while $idx < sizeof($e->buckets) / sizeof($e->buckets[0])
set $total=$total + $e->buckets[$idx].allocated
set $used=$used + $e->buckets[$idx].used
set $idx=$idx + 1
end
set $mem = $total * $e->size
printf "list=%#lx pool_head=%p name=%s size=%u alloc=%u used=%u mem=%u\n", $p, $e, $e->name, $e->size, $total, $used, $mem
set $p = *(void **)$p
end
end

View File

@ -1,47 +0,0 @@
# This script will set the post_mortem struct pointer ($pm) from the one found
# in the "post_mortem" symbol. If not found or if not correct, it's the same
# address as the "_post_mortem" section, which can be found using "info files"
# or "objdump -h" on the executable. The guessed value is the by a first call
# to pm_init, but if not correct, you just need to call pm_init again with the
# correct pointer, e.g:
# pm_init 0xcfd400
define pm_init
set $pm = (struct post_mortem*)$arg0
set $g = $pm.global
set $ti = $pm.thread_info
set $tc = $pm.thread_ctx
set $tgi = $pm.tgroup_info
set $tgc = $pm.tgroup_ctx
set $fd = $pm.fdtab
set $pxh = *$pm.proxies
set $po = $pm.pools
set $ac = $pm.activity
end
# show basic info on the running process (OS, uid, etc)
define pm_show_info
print $pm->platform
print $pm->process
end
# show thread IDs to easily map between gdb threads and tid
define pm_show_threads
set $t = 0
while $t < $g.nbthread
printf "Tid %4d: pthread_id=%#lx stack_top=%#lx\n", $t, $ti[$t].pth_id, $ti[$t].stack_top
set $t = $t + 1
end
end
# dump all threads' dump buffers
define pm_show_thread_dump
set $t = 0
while $t < $g.nbthread
printf "%s\n", $tc[$t].thread_dump_buffer->area
set $t = $t + 1
end
end
# initialize the various pointers
pm_init &post_mortem

View File

@ -1,25 +0,0 @@
# list proxies starting with the one in argument (typically $pxh)
define px_list
set $p = (struct proxy *)$arg0
while ($p != 0)
printf "%p (", $p
if $p->cap & 0x10
printf "LB,"
end
if $p->cap & 0x1
printf "FE,"
end
if $p->cap & 0x2
printf "BE,"
end
printf "%s)", $p->id
if $p->cap & 0x1
printf " feconn=%u cmax=%u cum_conn=%llu cpsmax=%u", $p->feconn, $p->fe_counters.conn_max, $p->fe_counters.cum_conn, $p->fe_counters.cps_max
end
if $p->cap & 0x2
printf " beconn=%u served=%u queued=%u qmax=%u cum_sess=%llu wact=%u", $p->beconn, $p->served, $p->queue.length, $p->be_counters.nbpend_max, $p->be_counters.cum_sess, $p->lbprm.tot_wact
end
printf "\n"
set $p = ($p)->next
end
end

View File

@ -1,9 +0,0 @@
# list servers in a proxy whose pointer is passed in argument
define px_list_srv
set $h = (struct proxy *)$arg0
set $p = ($h)->srv
while ($p != 0)
printf "%#lx %s maxconn=%u cur_sess=%u max_sess=%u served=%u queued=%u st=%u->%u ew=%u sps_max=%u\n", $p, $p->id, $p->maxconn, $p->cur_sess, $p->counters.cur_sess_max, $p->served, $p->queue.length, $p->cur_state, $p->next_state, $p->cur_eweight, $p->counters.sps_max
set $p = ($p)->next
end
end

View File

@ -1,18 +0,0 @@
# list all streams for all threads
define stream_dump
set $t = 0
while $t < $g.nbthread
set $h = &$tc[$t].streams
printf "Tid %4d: &streams=%p\n", $t, $h
set $p = *(void **)$h
while ($p != $h)
set $s = (struct stream *)(((char *)$p) - (unsigned long)&((struct stream *)0)->list)
printf " &list=%#lx strm=%p uid=%u strm.fe=%s strm.flg=%#x strm.list={n=%p,p=%p}\n", $p, $s, $s->uniq_id, $s->sess->fe->id, $s->flags, $s->list.n, $s->list.p
if ($p == 0)
loop_break
end
set $p = *(void **)$p
end
set $t = $t + 1
end
end

View File

@ -1,247 +0,0 @@
-- This is an HTTP/2 tracer for a TCP proxy. It will decode the frames that are
-- exchanged between the client and the server and indicate their direction,
-- types, flags and lengths. Lines are prefixed with a connection number modulo
-- 4096 that allows to sort out multiplexed exchanges. In order to use this,
-- simply load this file in the global section and use it from a TCP proxy:
--
-- global
-- lua-load "dev/h2/h2-tracer.lua"
--
-- listen h2_sniffer
-- mode tcp
-- bind :8002
-- filter lua.h2-tracer #hex
-- server s1 127.0.0.1:8003
--
-- define the decoder's class here
Dec = {}
Dec.id = "Lua H2 tracer"
Dec.flags = 0
Dec.__index = Dec
Dec.args = {} -- args passed by the filter's declaration
Dec.cid = 0 -- next connection ID
-- prefix to indent responses
res_pfx = " | "
-- H2 frame types
h2ft = {
[0] = "DATA",
[1] = "HEADERS",
[2] = "PRIORITY",
[3] = "RST_STREAM",
[4] = "SETTINGS",
[5] = "PUSH_PROMISE",
[6] = "PING",
[7] = "GOAWAY",
[8] = "WINDOW_UPDATE",
[9] = "CONTINUATION",
}
h2ff = {
[0] = { [0] = "ES", [3] = "PADDED" }, -- data
[1] = { [0] = "ES", [2] = "EH", [3] = "PADDED", [5] = "PRIORITY" }, -- headers
[2] = { }, -- priority
[3] = { }, -- rst_stream
[4] = { [0] = "ACK" }, -- settings
[5] = { [2] = "EH", [3] = "PADDED" }, -- push_promise
[6] = { [0] = "ACK" }, -- ping
[7] = { }, -- goaway
[8] = { }, -- window_update
[9] = { [2] = "EH" }, -- continuation
}
function Dec:new()
local dec = {}
setmetatable(dec, Dec)
dec.do_hex = false
if (Dec.args[1] == "hex") then
dec.do_hex = true
end
Dec.cid = Dec.cid+1
-- mix the thread number when multithreading.
dec.cid = Dec.cid + 64 * core.thread
-- state per dir. [1]=req [2]=res
dec.st = {
[1] = {
hdr = { 0, 0, 0, 0, 0, 0, 0, 0, 0 },
fofs = 0,
flen = 0,
ftyp = 0,
fflg = 0,
sid = 0,
tot = 0,
},
[2] = {
hdr = { 0, 0, 0, 0, 0, 0, 0, 0, 0 },
fofs = 0,
flen = 0,
ftyp = 0,
fflg = 0,
sid = 0,
tot = 0,
},
}
return dec
end
function Dec:start_analyze(txn, chn)
if chn:is_resp() then
io.write(string.format("[%03x] ", self.cid % 4096) .. res_pfx .. "### res start\n")
else
io.write(string.format("[%03x] ", self.cid % 4096) .. "### req start\n")
end
filter.register_data_filter(self, chn)
end
function Dec:end_analyze(txn, chn)
if chn:is_resp() then
io.write(string.format("[%03x] ", self.cid % 4096) .. res_pfx .. "### res end: " .. self.st[2].tot .. " bytes total\n")
else
io.write(string.format("[%03x] ", self.cid % 4096) .. "### req end: " ..self.st[1].tot.. " bytes total\n")
end
end
function Dec:tcp_payload(txn, chn)
local data = { }
local dofs = 1
local pfx = ""
local dir = 1
local sofs = 0
local ft = ""
local ff = ""
if chn:is_resp() then
pfx = res_pfx
dir = 2
end
pfx = string.format("[%03x] ", self.cid % 4096) .. pfx
-- stream offset before processing
sofs = self.st[dir].tot
if (chn:input() > 0) then
data = chn:data()
self.st[dir].tot = self.st[dir].tot + chn:input()
end
if (chn:input() > 0 and self.do_hex ~= false) then
io.write("\n" .. pfx .. "Hex:\n")
for i = 1, #data do
if ((i & 7) == 1) then io.write(pfx) end
io.write(string.format("0x%02x ", data:sub(i, i):byte()))
if ((i & 7) == 0 or i == #data) then io.write("\n") end
end
end
-- start at byte 1 in the <data> string
dofs = 1
-- the first 24 bytes are expected to be an H2 preface on the request
if (dir == 1 and sofs < 24) then
-- let's not check it for now
local bytes = self.st[dir].tot - sofs
if (sofs + self.st[dir].tot >= 24) then
-- skip what was missing from the preface
dofs = dofs + 24 - sofs
sofs = 24
io.write(pfx .. "[PREFACE len=24]\n")
else
-- consume more preface bytes
sofs = sofs + self.st[dir].tot
return
end
end
-- parse contents as long as there are pending data
while true do
-- check if we need to consume data from the current frame
-- flen is the number of bytes left before the frame's end.
if (self.st[dir].flen > 0) then
if dofs > #data then return end -- missing data
if (#data - dofs + 1 < self.st[dir].flen) then
-- insufficient data
self.st[dir].flen = self.st[dir].flen - (#data - dofs + 1)
io.write(pfx .. string.format("%32s\n", "... -" .. (#data - dofs + 1) .. " = " .. self.st[dir].flen))
dofs = #data + 1
return
else
-- enough data to finish
if (dofs == 1) then
-- only print a partial size if the frame was interrupted
io.write(pfx .. string.format("%32s\n", "... -" .. self.st[dir].flen .. " = 0"))
end
dofs = dofs + self.st[dir].flen
self.st[dir].flen = 0
end
end
-- here, flen = 0, we're at the beginning of a new frame --
-- read possibly missing header bytes until dec.fofs == 9
while self.st[dir].fofs < 9 do
if dofs > #data then return end -- missing data
self.st[dir].hdr[self.st[dir].fofs + 1] = data:sub(dofs, dofs):byte()
dofs = dofs + 1
self.st[dir].fofs = self.st[dir].fofs + 1
end
-- we have a full frame header here
if (self.do_hex ~= false) then
io.write("\n" .. pfx .. string.format("hdr=%02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
self.st[dir].hdr[1], self.st[dir].hdr[2], self.st[dir].hdr[3],
self.st[dir].hdr[4], self.st[dir].hdr[5], self.st[dir].hdr[6],
self.st[dir].hdr[7], self.st[dir].hdr[8], self.st[dir].hdr[9]))
end
-- we have a full frame header, we'll be ready
-- for a new frame once the data is gone
self.st[dir].flen = self.st[dir].hdr[1] * 65536 +
self.st[dir].hdr[2] * 256 +
self.st[dir].hdr[3]
self.st[dir].ftyp = self.st[dir].hdr[4]
self.st[dir].fflg = self.st[dir].hdr[5]
self.st[dir].sid = self.st[dir].hdr[6] * 16777216 +
self.st[dir].hdr[7] * 65536 +
self.st[dir].hdr[8] * 256 +
self.st[dir].hdr[9]
self.st[dir].fofs = 0
-- decode frame type
if self.st[dir].ftyp <= 9 then
ft = h2ft[self.st[dir].ftyp]
else
ft = string.format("TYPE_0x%02x\n", self.st[dir].ftyp)
end
-- decode frame flags for frame type <ftyp>
ff = ""
for i = 7, 0, -1 do
if (((self.st[dir].fflg >> i) & 1) ~= 0) then
if self.st[dir].ftyp <= 9 and h2ff[self.st[dir].ftyp][i] ~= nil then
ff = ff .. ((ff == "") and "" or "+")
ff = ff .. h2ff[self.st[dir].ftyp][i]
else
ff = ff .. ((ff == "") and "" or "+")
ff = ff .. string.format("0x%02x", 1<<i)
end
end
end
io.write(pfx .. string.format("[%s %ssid=%u len=%u (bytes=%u)]\n",
ft, (ff == "") and "" or ff .. " ",
self.st[dir].sid, self.st[dir].flen,
(#data - dofs + 1)))
end
end
core.register_filter("h2-tracer", Dec, function(dec, args)
Dec.args = args
return dec
end)

View File

@ -59,9 +59,9 @@ struct ring_v2 {
struct ring_v2a { struct ring_v2a {
size_t size; // storage size size_t size; // storage size
size_t rsvd; // header length (used for file-backed maps) size_t rsvd; // header length (used for file-backed maps)
size_t tail ALIGNED(64); // storage tail size_t tail __attribute__((aligned(64))); // storage tail
size_t head ALIGNED(64); // storage head size_t head __attribute__((aligned(64))); // storage head
char area[0] ALIGNED(64); // storage area begins immediately here char area[0] __attribute__((aligned(64))); // storage area begins immediately here
}; };
/* display the message and exit with the code */ /* display the message and exit with the code */

View File

@ -1,31 +0,0 @@
include ../../include/make/verbose.mk
CC = cc
OPTIMIZE = -O2 -g
DEFINE =
INCLUDE =
OBJS = ncpu.so ncpu
OBJDUMP = objdump
all: $(OBJS)
%.o: %.c
$(cmd_CC) $(OPTIMIZE) $(DEFINE) $(INCLUDE) -shared -fPIC -c -o $@ $^
%.so: %.o
$(cmd_CC) -pie -o $@ $^
$(Q)rm -f $^
%: %.so
$(call qinfo, PATCHING)set -- $$($(OBJDUMP) -j .dynamic -h $^ | fgrep .dynamic); \
ofs=$$6; size=$$3; \
dd status=none bs=1 count=$$((0x$$ofs)) if=$^ of=$^-p1; \
dd status=none bs=1 skip=$$((0x$$ofs)) count=$$((0x$$size)) if=$^ of=$^-p2; \
dd status=none bs=1 skip=$$((0x$$ofs+0x$$size)) if=$^ of=$^-p3; \
sed -e 's,\xfb\xff\xff\x6f\x00\x00\x00\x00\x00\x00\x00\x08,\xfb\xff\xff\x6f\x00\x00\x00\x00\x00\x00\x00\x00,g' < $^-p2 > $^-p2-patched; \
cat $^-p1 $^-p2-patched $^-p3 > "$@"
$(Q)rm -f $^-p*
$(Q)chmod 755 "$@"
clean:
rm -f $(OBJS) *.[oas] *.so-* *~

View File

@ -1,136 +0,0 @@
#define _GNU_SOURCE
#include <errno.h>
#include <limits.h>
#include <sched.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
// gcc -fPIC -shared -O2 -o ncpu{.so,.c}
// NCPU=16 LD_PRELOAD=$PWD/ncpu.so command args...
static char prog_full_path[PATH_MAX];
long sysconf(int name)
{
if (name == _SC_NPROCESSORS_ONLN ||
name == _SC_NPROCESSORS_CONF) {
const char *ncpu = getenv("NCPU");
int n;
n = ncpu ? atoi(ncpu) : CPU_SETSIZE;
if (n < 0 || n > CPU_SETSIZE)
n = CPU_SETSIZE;
return n;
}
errno = EINVAL;
return -1;
}
/* return a cpu_set having the first $NCPU set */
int sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask)
{
const char *ncpu;
int i, n;
CPU_ZERO_S(cpusetsize, mask);
ncpu = getenv("NCPU");
n = ncpu ? atoi(ncpu) : CPU_SETSIZE;
if (n < 0 || n > CPU_SETSIZE)
n = CPU_SETSIZE;
for (i = 0; i < n; i++)
CPU_SET_S(i, cpusetsize, mask);
return 0;
}
/* silently ignore the operation */
int sched_setaffinity(pid_t pid, size_t cpusetsize, const cpu_set_t *mask)
{
return 0;
}
void usage(const char *argv0)
{
fprintf(stderr,
"Usage: %s [-n ncpu] [cmd [args...]]\n"
" Will install itself in LD_PRELOAD before calling <cmd> with args.\n"
" The number of CPUs may also come from variable NCPU or default to %d.\n"
"\n"
"",
argv0, CPU_SETSIZE);
exit(1);
}
/* Called in wrapper mode, no longer supported on recent glibc */
int main(int argc, char **argv)
{
const char *argv0 = argv[0];
char *preload;
int plen;
prog_full_path[0] = 0;
plen = readlink("/proc/self/exe", prog_full_path, sizeof(prog_full_path) - 1);
if (plen != -1)
prog_full_path[plen] = 0;
else
plen = snprintf(prog_full_path, sizeof(prog_full_path), "%s", argv[0]);
while (1) {
argc--;
argv++;
if (argc < 1)
usage(argv0);
if (strcmp(argv[0], "--") == 0) {
argc--;
argv++;
break;
}
else if (strcmp(argv[0], "-n") == 0) {
if (argc < 2)
usage(argv0);
if (setenv("NCPU", argv[1], 1) != 0)
usage(argv0);
argc--;
argv++;
}
else {
/* unknown arg, that's the command */
break;
}
}
/* here the only args left start with the cmd name */
/* now we'll concatenate ourselves at the end of the LD_PRELOAD variable */
preload = getenv("LD_PRELOAD");
if (preload) {
int olen = strlen(preload);
preload = realloc(preload, olen + 1 + plen + 1);
if (!preload) {
perror("realloc");
exit(2);
}
preload[olen] = ' ';
memcpy(preload + olen + 1, prog_full_path, plen);
preload[olen + 1 + plen] = 0;
}
else {
preload = prog_full_path;
}
if (setenv("LD_PRELOAD", preload, 1) < 0) {
perror("setenv");
exit(2);
}
execvp(*argv, argv);
perror("execve");
exit(2);
}

View File

@ -14,11 +14,11 @@ that are picked from the development branch.
Branches are numbered in 0.1 increments. Every 6 months, upon a new major Branches are numbered in 0.1 increments. Every 6 months, upon a new major
release, the development branch enters maintenance and a new development branch release, the development branch enters maintenance and a new development branch
is created with a new, higher version. The current development branch is is created with a new, higher version. The current development branch is
3.1-dev, and maintenance branches are 3.0 and below. 3.0-dev, and maintenance branches are 2.9 and below.
Fixes created in the development branch for issues that were introduced in an Fixes created in the development branch for issues that were introduced in an
earlier branch are applied in descending order to each and every version till earlier branch are applied in descending order to each and every version till
that branch that introduced the issue: 3.0 first, then 2.9, then 2.8 and so that branch that introduced the issue: 2.9 first, then 2.8, then 2.7 and so
on. This operation is called "backporting". A fix for an issue is never on. This operation is called "backporting". A fix for an issue is never
backported beyond the branch that introduced the issue. An important point is backported beyond the branch that introduced the issue. An important point is
that the project maintainers really aim at zero regression in maintenance that the project maintainers really aim at zero regression in maintenance

View File

@ -17,7 +17,7 @@ Finally, based on your analysis, give your general conclusion as "Conclusion: X"
where X is a single word among: where X is a single word among:
- "yes", if you recommend to backport the patch right now either because - "yes", if you recommend to backport the patch right now either because
it explicitly states this or because it's a fix for a bug that affects it explicitly states this or because it's a fix for a bug that affects
a maintenance branch (3.2 or lower); a maintenance branch (2.9 or lower);
- "wait", if this patch explicitly mentions that it must be backported, but - "wait", if this patch explicitly mentions that it must be backported, but
only after waiting some time. only after waiting some time.
- "no", if nothing clearly indicates a necessity to backport this patch (e.g. - "no", if nothing clearly indicates a necessity to backport this patch (e.g.

View File

@ -1,29 +0,0 @@
ENDINPUT
BEGININSTRUCTION
You are an AI assistant that follows instruction extremely well. Help as much
as you can, responding to a single question using a single response.
The developer wants to know if he needs to backport the patch above to fix
maintenance branches, for which branches, and what possible dependencies might
be mentioned in the commit message. Carefully study the commit message and its
backporting instructions if any (otherwise it should probably not be backported),
then provide a very concise and short summary that will help the developer decide
to backport it, or simply to skip it.
Start by explaining in one or two sentences what you recommend for this one and why.
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
where X is a single word among:
- "yes", if you recommend to backport the patch right now either because
it explicitly states this or because it's a fix for a bug that affects
a maintenance branch (3.0 or lower);
- "wait", if this patch explicitly mentions that it must be backported, but
only after waiting some time.
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
lack of explicit backport instructions, or it's just an improvement);
- "uncertain" otherwise for cases not covered above
ENDINSTRUCTION
Explanation:

View File

@ -1,70 +0,0 @@
BEGININPUT
BEGINCONTEXT
HAProxy's development cycle consists in one development branch, and multiple
maintenance branches.
All the development is made into the development branch exclusively. This
includes mostly new features, doc updates, cleanups and or course, fixes.
The maintenance branches, also called stable branches, never see any
development, and only receive ultra-safe fixes for bugs that affect them,
that are picked from the development branch.
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
release, the development branch enters maintenance and a new development branch
is created with a new, higher version. The current development branch is
3.2-dev, and maintenance branches are 3.1 and below.
Fixes created in the development branch for issues that were introduced in an
earlier branch are applied in descending order to each and every version till
that branch that introduced the issue: 3.1 first, then 3.0, then 2.9, then 2.8
and so on. This operation is called "backporting". A fix for an issue is never
backported beyond the branch that introduced the issue. An important point is
that the project maintainers really aim at zero regression in maintenance
branches, so they're never willing to take any risk backporting patches that
are not deemed strictly necessary.
Fixes consist of patches managed using the Git version control tool and are
identified by a Git commit ID and a commit message. For this reason we
indistinctly talk about backporting fixes, commits, or patches; all mean the
same thing. When mentioning commit IDs, developers always use a short form
made of the first 8 characters only, and expect the AI assistant to do the
same.
It seldom happens that some fixes depend on changes that were brought by other
patches that were not in some branches and that will need to be backported as
well for the fix to work. In this case, such information is explicitly provided
in the commit message by the patch's author in natural language.
Developers are serious and always indicate if a patch needs to be backported.
Sometimes they omit the exact target branch, or they will say that the patch is
"needed" in some older branch, but it means the same. If a commit message
doesn't mention any backport instructions, it means that the commit does not
have to be backported. And patches that are not strictly bug fixes nor doc
improvements are normally not backported. For example, fixes for design
limitations, architectural improvements and performance optimizations are
considered too risky for a backport. Finally, all bug fixes are tagged as
"BUG" at the beginning of their subject line. Patches that are not tagged as
such are not bugs, and must never be backported unless their commit message
explicitly requests so.
ENDCONTEXT
A developer is reviewing the development branch, trying to spot which commits
need to be backported to maintenance branches. This person is already expert
on HAProxy and everything related to Git, patch management, and the risks
associated with backports, so he doesn't want to be told how to proceed nor to
review the contents of the patch.
The goal for this developer is to get some help from the AI assistant to save
some precious time on this tedious review work. In order to do a better job, he
needs an accurate summary of the information and instructions found in each
commit message. Specifically he needs to figure if the patch fixes a problem
affecting an older branch or not, if it needs to be backported, if so to which
branches, and if other patches need to be backported along with it.
The indented text block below after an "id" line and starting with a Subject line
is a commit message from the HAProxy development branch that describes a patch
applied to that branch, starting with its subject line, please read it carefully.

View File

@ -1,29 +0,0 @@
ENDINPUT
BEGININSTRUCTION
You are an AI assistant that follows instruction extremely well. Help as much
as you can, responding to a single question using a single response.
The developer wants to know if he needs to backport the patch above to fix
maintenance branches, for which branches, and what possible dependencies might
be mentioned in the commit message. Carefully study the commit message and its
backporting instructions if any (otherwise it should probably not be backported),
then provide a very concise and short summary that will help the developer decide
to backport it, or simply to skip it.
Start by explaining in one or two sentences what you recommend for this one and why.
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
where X is a single word among:
- "yes", if you recommend to backport the patch right now either because
it explicitly states this or because it's a fix for a bug that affects
a maintenance branch (3.1 or lower);
- "wait", if this patch explicitly mentions that it must be backported, but
only after waiting some time.
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
lack of explicit backport instructions, or it's just an improvement);
- "uncertain" otherwise for cases not covered above
ENDINSTRUCTION
Explanation:

View File

@ -1,70 +0,0 @@
BEGININPUT
BEGINCONTEXT
HAProxy's development cycle consists in one development branch, and multiple
maintenance branches.
All the development is made into the development branch exclusively. This
includes mostly new features, doc updates, cleanups and or course, fixes.
The maintenance branches, also called stable branches, never see any
development, and only receive ultra-safe fixes for bugs that affect them,
that are picked from the development branch.
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
release, the development branch enters maintenance and a new development branch
is created with a new, higher version. The current development branch is
3.3-dev, and maintenance branches are 3.2 and below.
Fixes created in the development branch for issues that were introduced in an
earlier branch are applied in descending order to each and every version till
that branch that introduced the issue: 3.2 first, then 3.1, then 3.0, then 2.9
and so on. This operation is called "backporting". A fix for an issue is never
backported beyond the branch that introduced the issue. An important point is
that the project maintainers really aim at zero regression in maintenance
branches, so they're never willing to take any risk backporting patches that
are not deemed strictly necessary.
Fixes consist of patches managed using the Git version control tool and are
identified by a Git commit ID and a commit message. For this reason we
indistinctly talk about backporting fixes, commits, or patches; all mean the
same thing. When mentioning commit IDs, developers always use a short form
made of the first 8 characters only, and expect the AI assistant to do the
same.
It seldom happens that some fixes depend on changes that were brought by other
patches that were not in some branches and that will need to be backported as
well for the fix to work. In this case, such information is explicitly provided
in the commit message by the patch's author in natural language.
Developers are serious and always indicate if a patch needs to be backported.
Sometimes they omit the exact target branch, or they will say that the patch is
"needed" in some older branch, but it means the same. If a commit message
doesn't mention any backport instructions, it means that the commit does not
have to be backported. And patches that are not strictly bug fixes nor doc
improvements are normally not backported. For example, fixes for design
limitations, architectural improvements and performance optimizations are
considered too risky for a backport. Finally, all bug fixes are tagged as
"BUG" at the beginning of their subject line. Patches that are not tagged as
such are not bugs, and must never be backported unless their commit message
explicitly requests so.
ENDCONTEXT
A developer is reviewing the development branch, trying to spot which commits
need to be backported to maintenance branches. This person is already expert
on HAProxy and everything related to Git, patch management, and the risks
associated with backports, so he doesn't want to be told how to proceed nor to
review the contents of the patch.
The goal for this developer is to get some help from the AI assistant to save
some precious time on this tedious review work. In order to do a better job, he
needs an accurate summary of the information and instructions found in each
commit message. Specifically he needs to figure if the patch fixes a problem
affecting an older branch or not, if it needs to be backported, if so to which
branches, and if other patches need to be backported along with it.
The indented text block below after an "id" line and starting with a Subject line
is a commit message from the HAProxy development branch that describes a patch
applied to that branch, starting with its subject line, please read it carefully.

View File

@ -1,70 +0,0 @@
BEGININPUT
BEGINCONTEXT
HAProxy's development cycle consists in one development branch, and multiple
maintenance branches.
All the development is made into the development branch exclusively. This
includes mostly new features, doc updates, cleanups and or course, fixes.
The maintenance branches, also called stable branches, never see any
development, and only receive ultra-safe fixes for bugs that affect them,
that are picked from the development branch.
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
release, the development branch enters maintenance and a new development branch
is created with a new, higher version. The current development branch is
3.4-dev, and maintenance branches are 3.3 and below.
Fixes created in the development branch for issues that were introduced in an
earlier branch are applied in descending order to each and every version till
that branch that introduced the issue: 3.3 first, then 3.2, then 3.1, then 3.0
and so on. This operation is called "backporting". A fix for an issue is never
backported beyond the branch that introduced the issue. An important point is
that the project maintainers really aim at zero regression in maintenance
branches, so they're never willing to take any risk backporting patches that
are not deemed strictly necessary.
Fixes consist of patches managed using the Git version control tool and are
identified by a Git commit ID and a commit message. For this reason we
indistinctly talk about backporting fixes, commits, or patches; all mean the
same thing. When mentioning commit IDs, developers always use a short form
made of the first 8 characters only, and expect the AI assistant to do the
same.
It seldom happens that some fixes depend on changes that were brought by other
patches that were not in some branches and that will need to be backported as
well for the fix to work. In this case, such information is explicitly provided
in the commit message by the patch's author in natural language.
Developers are serious and always indicate if a patch needs to be backported.
Sometimes they omit the exact target branch, or they will say that the patch is
"needed" in some older branch, but it means the same. If a commit message
doesn't mention any backport instructions, it means that the commit does not
have to be backported. And patches that are not strictly bug fixes nor doc
improvements are normally not backported. For example, fixes for design
limitations, architectural improvements and performance optimizations are
considered too risky for a backport. Finally, all bug fixes are tagged as
"BUG" at the beginning of their subject line. Patches that are not tagged as
such are not bugs, and must never be backported unless their commit message
explicitly requests so.
ENDCONTEXT
A developer is reviewing the development branch, trying to spot which commits
need to be backported to maintenance branches. This person is already expert
on HAProxy and everything related to Git, patch management, and the risks
associated with backports, so he doesn't want to be told how to proceed nor to
review the contents of the patch.
The goal for this developer is to get some help from the AI assistant to save
some precious time on this tedious review work. In order to do a better job, he
needs an accurate summary of the information and instructions found in each
commit message. Specifically he needs to figure if the patch fixes a problem
affecting an older branch or not, if it needs to be backported, if so to which
branches, and if other patches need to be backported along with it.
The indented text block below after an "id" line and starting with a Subject line
is a commit message from the HAProxy development branch that describes a patch
applied to that branch, starting with its subject line, please read it carefully.

View File

@ -1,29 +0,0 @@
ENDINPUT
BEGININSTRUCTION
You are an AI assistant that follows instruction extremely well. Help as much
as you can, responding to a single question using a single response.
The developer wants to know if he needs to backport the patch above to fix
maintenance branches, for which branches, and what possible dependencies might
be mentioned in the commit message. Carefully study the commit message and its
backporting instructions if any (otherwise it should probably not be backported),
then provide a very concise and short summary that will help the developer decide
to backport it, or simply to skip it.
Start by explaining in one or two sentences what you recommend for this one and why.
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
where X is a single word among:
- "yes", if you recommend to backport the patch right now either because
it explicitly states this or because it's a fix for a bug that affects
a maintenance branch (3.3 or lower);
- "wait", if this patch explicitly mentions that it must be backported, but
only after waiting some time.
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
lack of explicit backport instructions, or it's just an improvement);
- "uncertain" otherwise for cases not covered above
ENDINSTRUCTION
Explanation:

View File

@ -150,14 +150,11 @@ function updt_table(line) {
var w = document.getElementById("sh_w").checked; var w = document.getElementById("sh_w").checked;
var y = document.getElementById("sh_y").checked; var y = document.getElementById("sh_y").checked;
var tn = 0, tu = 0, tw = 0, ty = 0; var tn = 0, tu = 0, tw = 0, ty = 0;
var bn = 0, bu = 0, bw = 0, by = 0;
var i, el; var i, el;
for (i = 1; i < nb_patches; i++) { for (i = 1; i < nb_patches; i++) {
if (document.getElementById("bt_" + i + "_n").checked) { if (document.getElementById("bt_" + i + "_n").checked) {
tn++; tn++;
if (bkp[i])
bn++;
if (line && i != line) if (line && i != line)
continue; continue;
el = document.getElementById("tr_" + i); el = document.getElementById("tr_" + i);
@ -166,8 +163,6 @@ function updt_table(line) {
} }
else if (document.getElementById("bt_" + i + "_u").checked) { else if (document.getElementById("bt_" + i + "_u").checked) {
tu++; tu++;
if (bkp[i])
bu++;
if (line && i != line) if (line && i != line)
continue; continue;
el = document.getElementById("tr_" + i); el = document.getElementById("tr_" + i);
@ -176,8 +171,6 @@ function updt_table(line) {
} }
else if (document.getElementById("bt_" + i + "_w").checked) { else if (document.getElementById("bt_" + i + "_w").checked) {
tw++; tw++;
if (bkp[i])
bw++;
if (line && i != line) if (line && i != line)
continue; continue;
el = document.getElementById("tr_" + i); el = document.getElementById("tr_" + i);
@ -186,8 +179,6 @@ function updt_table(line) {
} }
else if (document.getElementById("bt_" + i + "_y").checked) { else if (document.getElementById("bt_" + i + "_y").checked) {
ty++; ty++;
if (bkp[i])
by++;
if (line && i != line) if (line && i != line)
continue; continue;
el = document.getElementById("tr_" + i); el = document.getElementById("tr_" + i);
@ -207,18 +198,6 @@ function updt_table(line) {
document.getElementById("cnt_u").innerText = tu; document.getElementById("cnt_u").innerText = tu;
document.getElementById("cnt_w").innerText = tw; document.getElementById("cnt_w").innerText = tw;
document.getElementById("cnt_y").innerText = ty; document.getElementById("cnt_y").innerText = ty;
document.getElementById("cnt_bn").innerText = bn;
document.getElementById("cnt_bu").innerText = bu;
document.getElementById("cnt_bw").innerText = bw;
document.getElementById("cnt_by").innerText = by;
document.getElementById("cnt_bt").innerText = bn + bu + bw + by;
document.getElementById("cnt_nbn").innerText = tn - bn;
document.getElementById("cnt_nbu").innerText = tu - bu;
document.getElementById("cnt_nbw").innerText = tw - bw;
document.getElementById("cnt_nby").innerText = ty - by;
document.getElementById("cnt_nbt").innerText = tn - bn + tu - bu + tw - bw + ty - by;
} }
function updt_output() { function updt_output() {
@ -257,47 +236,23 @@ function updt(line,value) {
updt_output(); updt_output();
} }
function show_only(b,n,u,w,y) {
document.getElementById("sh_b").checked = !!b;
document.getElementById("sh_n").checked = !!n;
document.getElementById("sh_u").checked = !!u;
document.getElementById("sh_w").checked = !!w;
document.getElementById("sh_y").checked = !!y;
document.getElementById("show_all").checked = true;
updt(0,"r");
}
// --> // -->
</script> </script>
</HEAD> </HEAD>
EOF EOF
echo "<BODY>" echo "<BODY>"
echo -n "<table cellpadding=3 cellspacing=5 style='font-size: 150%;'><tr><th align=left>Backported</th>"
echo -n "<td style='background-color:$BG_N'><a href='#' onclick='show_only(1,1,0,0,0);'> N: <span id='cnt_bn'>0</span> </a></td>"
echo -n "<td style='background-color:$BG_U'><a href='#' onclick='show_only(1,0,1,0,0);'> U: <span id='cnt_bu'>0</span> </a></td>"
echo -n "<td style='background-color:$BG_W'><a href='#' onclick='show_only(1,0,0,1,0);'> W: <span id='cnt_bw'>0</span> </a></td>"
echo -n "<td style='background-color:$BG_Y'><a href='#' onclick='show_only(1,0,0,0,1);'> Y: <span id='cnt_by'>0</span> </a></td>"
echo -n "<td>total: <span id='cnt_bt'>0</span></td>"
echo "</tr><tr>"
echo -n "<th align=left>Not backported</th>"
echo -n "<td style='background-color:$BG_N'><a href='#' onclick='show_only(0,1,0,0,0);'> N: <span id='cnt_nbn'>0</span> </a></td>"
echo -n "<td style='background-color:$BG_U'><a href='#' onclick='show_only(0,0,1,0,0);'> U: <span id='cnt_nbu'>0</span> </a></td>"
echo -n "<td style='background-color:$BG_W'><a href='#' onclick='show_only(0,0,0,1,0);'> W: <span id='cnt_nbw'>0</span> </a></td>"
echo -n "<td style='background-color:$BG_Y'><a href='#' onclick='show_only(0,0,0,0,1);'> Y: <span id='cnt_nby'>0</span> </a></td>"
echo -n "<td>total: <span id='cnt_nbt'>0</span></td>"
echo "</tr></table><P/>"
echo -n "<big><big>Show:" echo -n "<big><big>Show:"
echo -n " <span style='background-color:$BG_B'><input type='checkbox' onclick='updt_table(0);' id='sh_b' checked />B (${#bkp[*]})</span> " echo -n " <span style='background-color:$BG_B'><input type='checkbox' onclick='updt_table(0);' id='sh_b' checked />B (${#bkp[*]})</span> "
echo -n " <span style='background-color:$BG_N'><input type='checkbox' onclick='updt_table(0);' id='sh_n' checked />N (<span id='cnt_n'>0</span>)</span> " echo -n " <span style='background-color:$BG_N'><input type='checkbox' onclick='updt_table(0);' id='sh_n' checked />N (<span id='cnt_n'>0</span>)</span> "
echo -n " <span style='background-color:$BG_U'><input type='checkbox' onclick='updt_table(0);' id='sh_u' checked />U (<span id='cnt_u'>0</span>)</span> " echo -n " <span style='background-color:$BG_U'><input type='checkbox' onclick='updt_table(0);' id='sh_u' checked />U (<span id='cnt_u'>0</span>)</span> "
echo -n " <span style='background-color:$BG_W'><input type='checkbox' onclick='updt_table(0);' id='sh_w' checked />W (<span id='cnt_w'>0</span>)</span> " echo -n " <span style='background-color:$BG_W'><input type='checkbox' onclick='updt_table(0);' id='sh_w' checked />W (<span id='cnt_w'>0</span>)</span> "
echo -n " <span style='background-color:$BG_Y'><input type='checkbox' onclick='updt_table(0);' id='sh_y' checked />Y (<span id='cnt_y'>0</span>)</span> " echo -n " <span style='background-color:$BG_Y'><input type='checkbox' onclick='updt_table(0);' id='sh_y' checked />Y (<span id='cnt_y'>0</span>)</span> "
echo -n "</big/></big><br/>(B=show backported, N=no/drop, U=uncertain, W=wait/next, Y=yes/pick" echo -n "</big/></big> (B=show backported, N=no/drop, U=uncertain, W=wait/next, Y=yes/pick"
echo ")<P/>" echo ")<P/>"
echo "<TABLE COLS=5 BORDER=1 CELLSPACING=0 CELLPADDING=3>" echo "<TABLE COLS=5 BORDER=1 CELLSPACING=0 CELLPADDING=3>"
echo "<TR><TH>All<br/><input type='radio' name='review' id='show_all' onclick='updt(0,\"r\");' checked title='Start review here'/></TH><TH>CID</TH><TH>Subject</TH><TH>Verdict<BR>N U W Y</BR></TH><TH>Reason</TH></TR>" echo "<TR><TH>All<br/><input type='radio' name='review' onclick='updt(0,\"r\");' checked title='Start review here'/></TH><TH>CID</TH><TH>Subject</TH><TH>Verdict<BR>N U W Y</BR></TH><TH>Reason</TH></TR>"
seq_num=1; do_check=1; review=0; seq_num=1; do_check=1; review=0;
for patch in "${PATCHES[@]}"; do for patch in "${PATCHES[@]}"; do
# try to retrieve the patch's numbering (0001-9999) # try to retrieve the patch's numbering (0001-9999)
@ -380,7 +335,7 @@ for patch in "${PATCHES[@]}"; do
resp=$(echo "$resp" | sed -e "s|#\([0-9]\{1,5\}\)|<a href='${ISSUES}\1'>#\1</a>|g") resp=$(echo "$resp" | sed -e "s|#\([0-9]\{1,5\}\)|<a href='${ISSUES}\1'>#\1</a>|g")
# put links to commit IDs # put links to commit IDs
resp=$(echo "$resp" | sed -e "s|\([0-9a-f]\{7,40\}\)|<a href='${GITURL}\1'>\1</a>|g") resp=$(echo "$resp" | sed -e "s|\([0-9a-f]\{8,40\}\)|<a href='${GITURL}\1'>\1</a>|g")
echo -n "<TD nowrap align=center ${bkp[$cid]:+style='background-color:${BG_B}'}>$seq_num<BR/>" echo -n "<TD nowrap align=center ${bkp[$cid]:+style='background-color:${BG_B}'}>$seq_num<BR/>"
echo -n "<input type='radio' name='review' onclick='updt($seq_num,\"r\");' ${do_check:+checked} title='Start review here'/></TD>" echo -n "<input type='radio' name='review' onclick='updt($seq_num,\"r\");' ${do_check:+checked} title='Start review here'/></TD>"

View File

@ -22,8 +22,7 @@ STABLE=$(cd "$HAPROXY_DIR" && git describe --tags "v${BRANCH}-dev0^" |cut -f1,2
PATCHES_DIR="$PATCHES_PFX"-"$BRANCH" PATCHES_DIR="$PATCHES_PFX"-"$BRANCH"
(cd "$HAPROXY_DIR" (cd "$HAPROXY_DIR"
# avoid git pull, it chokes on forced push git pull
git remote update origin; git reset origin/master;git checkout -f
last_file=$(ls -1 "$PATCHES_DIR"/*.patch 2>/dev/null | tail -n1) last_file=$(ls -1 "$PATCHES_DIR"/*.patch 2>/dev/null | tail -n1)
if [ -n "$last_file" ]; then if [ -n "$last_file" ]; then
restart=$(head -n1 "$last_file" | cut -f2 -d' ') restart=$(head -n1 "$last_file" | cut -f2 -d' ')

View File

@ -17,9 +17,9 @@
//const int codes[CODES] = { 200,400,401,403,404,405,407,408,410,413,421,422,425,429,500,501,502,503,504}; //const int codes[CODES] = { 200,400,401,403,404,405,407,408,410,413,421,422,425,429,500,501,502,503,504};
#define CODES 32 #define CODES 32
const int codes[CODES] = { 200,400,401,403,404,405,407,408,410,413,414,421,422,425,429,431,500,501,502,503,504, const int codes[CODES] = { 200,400,401,403,404,405,407,408,410,413,421,422,425,429,500,501,502,503,504,
/* padding entries below, which will fall back to the default code */ /* padding entries below, which will fall back to the default code */
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}; -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
unsigned mul, xor; unsigned mul, xor;
unsigned bmul = 0, bxor = 0; unsigned bmul = 0, bxor = 0;

View File

@ -1,233 +0,0 @@
#include <stdio.h>
#include <stdlib.h>
#include <haproxy/connection-t.h>
#include <haproxy/intops.h>
struct tevt_info {
const char *loc;
const char **types;
};
/* will be sufficient for even largest flag names */
static char buf[4096];
static size_t bsz = sizeof(buf);
static const char *tevt_unknown_types[16] = {
[ 0] = "-", [ 1] = "-", [ 2] = "-", [ 3] = "-",
[ 4] = "-", [ 5] = "-", [ 6] = "-", [ 7] = "-",
[ 8] = "-", [ 9] = "-", [10] = "-", [11] = "-",
[12] = "-", [13] = "-", [14] = "-", [15] = "-",
};
static const char *tevt_fd_types[16] = {
[ 0] = "-", [ 1] = "shutw", [ 2] = "shutr", [ 3] = "rcv_err",
[ 4] = "snd_err", [ 5] = "-", [ 6] = "-", [ 7] = "conn_err",
[ 8] = "intercepted", [ 9] = "conn_poll_err", [10] = "poll_err", [11] = "poll_hup",
[12] = "-", [13] = "-", [14] = "-", [15] = "-",
};
static const char *tevt_hs_types[16] = {
[ 0] = "-", [ 1] = "-", [ 2] = "-", [ 3] = "rcv_err",
[ 4] = "snd_err", [ 5] = "-", [ 6] = "-", [ 7] = "-",
[ 8] = "-", [ 9] = "-", [10] = "-", [11] = "-",
[12] = "-", [13] = "-", [14] = "-", [15] = "-",
};
static const char *tevt_xprt_types[16] = {
[ 0] = "-", [ 1] = "shutw", [ 2] = "shutr", [ 3] = "rcv_err",
[ 4] = "snd_err", [ 5] = "-", [ 6] = "-", [ 7] = "-",
[ 8] = "-", [ 9] = "-", [10] = "-", [11] = "-",
[12] = "-", [13] = "-", [14] = "-", [15] = "-",
};
static const char *tevt_muxc_types[16] = {
[ 0] = "-", [ 1] = "shutw", [ 2] = "shutr", [ 3] = "rcv_err",
[ 4] = "snd_err", [ 5] = "truncated_shutr", [ 6] = "truncated_rcv_err", [ 7] = "tout",
[ 8] = "goaway_rcvd", [ 9] = "proto_err", [10] = "internal_err", [11] = "other_err",
[12] = "graceful_shut", [13] = "-", [14] = "-", [15] = "-",
};
static const char *tevt_se_types[16] = {
[ 0] = "-", [ 1] = "shutw", [ 2] = "eos", [ 3] = "rcv_err",
[ 4] = "snd_err", [ 5] = "truncated_eos", [ 6] = "truncated_rcv_err", [ 7] = "-",
[ 8] = "rst_rcvd", [ 9] = "proto_err", [10] = "internal_err", [11] = "other_err",
[12] = "cancelled", [13] = "-", [14] = "-", [15] = "-",
};
static const char *tevt_strm_types[16] = {
[ 0] = "-", [ 1] = "shutw", [ 2] = "eos", [ 3] = "rcv_err",
[ 4] = "snd_err", [ 5] = "truncated_eos", [ 6] = "truncated_rcv_err", [ 7] = "tout",
[ 8] = "intercepted", [ 9] = "proto_err", [10] = "internal_err", [11] = "other_err",
[12] = "aborted", [13] = "-", [14] = "-", [15] = "-",
};
static const struct tevt_info tevt_location[26] = {
[ 0] = {.loc = "-", .types = tevt_unknown_types}, [ 1] = {.loc = "-", .types = tevt_unknown_types},
[ 2] = {.loc = "-", .types = tevt_unknown_types}, [ 3] = {.loc = "-", .types = tevt_unknown_types},
[ 4] = {.loc = "se", .types = tevt_se_types}, [ 5] = {.loc = "fd", .types = tevt_fd_types},
[ 6] = {.loc = "-", .types = tevt_unknown_types}, [ 7] = {.loc = "hs", .types = tevt_hs_types},
[ 8] = {.loc = "-", .types = tevt_unknown_types}, [ 9] = {.loc = "-", .types = tevt_unknown_types},
[10] = {.loc = "-", .types = tevt_unknown_types}, [11] = {.loc = "-", .types = tevt_unknown_types},
[12] = {.loc = "muxc", .types = tevt_muxc_types}, [13] = {.loc = "-", .types = tevt_unknown_types},
[14] = {.loc = "-", .types = tevt_unknown_types}, [15] = {.loc = "-", .types = tevt_unknown_types},
[16] = {.loc = "-", .types = tevt_unknown_types}, [17] = {.loc = "-", .types = tevt_unknown_types},
[18] = {.loc = "strm", .types = tevt_strm_types}, [19] = {.loc = "-", .types = tevt_unknown_types},
[20] = {.loc = "-", .types = tevt_unknown_types}, [21] = {.loc = "-", .types = tevt_unknown_types},
[22] = {.loc = "-", .types = tevt_unknown_types}, [23] = {.loc = "xprt", .types = tevt_xprt_types},
[24] = {.loc = "-", .types = tevt_unknown_types}, [25] = {.loc = "-", .types = tevt_unknown_types},
};
void usage_exit(const char *name)
{
fprintf(stderr, "Usage: %s { value* | - }\n", name);
exit(1);
}
char *to_upper(char *dst, const char *src)
{
int i;
for (i = 0; src[i]; i++)
dst[i] = toupper(src[i]);
dst[i] = 0;
return dst;
}
char *tevt_show_events(char *buf, size_t len, const char *delim, const char *value)
{
char loc[5];
int ret;
if (!value || !*value) {
snprintf(buf, len, "##NONE");
goto end;
}
if (strcmp(value, "-") == 0) {
snprintf(buf, len, "##UNK");
goto end;
}
if (strlen(value) % 2 != 0) {
snprintf(buf, len, "##INV");
goto end;
}
while (*value) {
struct tevt_info info;
char l = value[0];
char t = value[1];
if (!isalpha(l) || !isxdigit(t)) {
snprintf(buf, len, "##INV");
goto end;
}
info = tevt_location[tolower(l) - 'a'];
ret = snprintf(buf, len, "%s:%s%s",
isupper(l) ? to_upper(loc, info.loc) : info.loc,
info.types[hex2i(t)],
value[2] != 0 ? delim : "");
if (ret < 0)
break;
len -= ret;
buf += ret;
value += 2;
}
end:
return buf;
}
char *tevt_show_tuple_events(char *buf, size_t len, char *value)
{
char *p = value;
/* skip '{' */
p++;
while (*p) {
char *v;
char c;
while (*p == ' ' || *p == '\t')
p++;
v = p;
while (*p && *p != ',' && *p != '}')
p++;
c = *p;
*p = 0;
tevt_show_events(buf, len, " > ", v);
printf("\t- %s\n", buf);
*p = c;
if (*p == ',')
p++;
else if (*p == '}')
break;
else {
printf("\t- ##INV\n");
break;
}
}
*buf = 0;
return buf;
}
int main(int argc, char **argv)
{
const char *name = argv[0];
char line[128];
char *value;
int multi = 0;
int use_stdin = 0;
char *err;
while (argc == 1)
usage_exit(name);
argv++; argc--;
if (argc > 1)
multi = 1;
if (strcmp(argv[0], "-") == 0)
use_stdin = 1;
while (argc > 0) {
if (use_stdin) {
value = fgets(line, sizeof(line), stdin);
if (!value)
break;
/* skip common leading delimiters that slip from copy-paste */
while (*value == ' ' || *value == '\t' || *value == ':' || *value == '=')
value++;
err = value;
while (*err && *err != '\n')
err++;
*err = 0;
}
else {
value = argv[0];
argv++; argc--;
}
if (multi)
printf("### %-8s : ", value);
if (*value == '{') {
if (!use_stdin)
printf("\n");
tevt_show_tuple_events(buf, bsz, value);
}
else
tevt_show_events(buf, bsz, " > ", value);
printf("%s\n", buf);
}
return 0;
}

View File

@ -3,9 +3,7 @@ DeviceAtlas Device Detection
In order to add DeviceAtlas Device Detection support, you would need to download In order to add DeviceAtlas Device Detection support, you would need to download
the API source code from https://deviceatlas.com/deviceatlas-haproxy-module. the API source code from https://deviceatlas.com/deviceatlas-haproxy-module.
Once extracted, two modes are supported : Once extracted :
1/ Build HAProxy and DeviceAtlas in one command
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=<path to the API root folder> $ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=<path to the API root folder>
@ -16,6 +14,10 @@ directory. Also, in the case the api cache support is not needed and/or a C++ to
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=<path to the API root folder> DEVICEATLAS_NOCACHE=1 $ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=<path to the API root folder> DEVICEATLAS_NOCACHE=1
However, if the API had been installed beforehand, DEVICEATLAS_SRC
can be omitted. Note that the DeviceAtlas C API version supported is from the 3.x
releases series (3.2.1 minimum recommended).
For HAProxy developers who need to verify that their changes didn't accidentally For HAProxy developers who need to verify that their changes didn't accidentally
break the DeviceAtlas code, it is possible to build a dummy library provided in break the DeviceAtlas code, it is possible to build a dummy library provided in
the addons/deviceatlas/dummy directory and to use it as an alternative for the the addons/deviceatlas/dummy directory and to use it as an alternative for the
@ -25,29 +27,6 @@ validate API changes :
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=$PWD/addons/deviceatlas/dummy $ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=$PWD/addons/deviceatlas/dummy
2/ Build and install DeviceAtlas according to https://docs.deviceatlas.com/apis/enterprise/c/<release version>/README.html
For example :
In the deviceatlas library folder :
$ cmake .
$ make
$ sudo make install
In the HAProxy folder :
$ make TARGET=<target> USE_DEVICEATLAS=1
Note that if the -DCMAKE_INSTALL_PREFIX cmake option had been used, it is necessary to set as well DEVICEATLAS_LIB and
DEVICEATLAS_INC as follow :
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_INC=<CMAKE_INSTALL_PREFIX value>/include DEVICEATLAS_LIB=<CMAKE_INSTALL_PREFIX value>/lib
For example :
$ cmake -DCMAKE_INSTALL_PREFIX=/opt/local
$ make
$ sudo make install
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_INC=/opt/local/include DEVICEATLAS_LIB=/opt/local/lib
Note that DEVICEATLAS_SRC is omitted in this case.
These are supported DeviceAtlas directives (see doc/configuration.txt) : These are supported DeviceAtlas directives (see doc/configuration.txt) :
- deviceatlas-json-file <path to the DeviceAtlas JSON data file>. - deviceatlas-json-file <path to the DeviceAtlas JSON data file>.
- deviceatlas-log-level <number> (0 to 3, level of information returned by - deviceatlas-log-level <number> (0 to 3, level of information returned by

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

View File

@ -1,12 +1,16 @@
----------------------------------------------- -----------------------------------------------
Stream Processing Offload Engine (SPOE) Stream Processing Offload Engine (SPOE)
Version 1.2 Version 1.2
( Last update: 2024-07-12 ) ( Last update: 2020-06-13 )
----------------------------------------------- -----------------------------------------------
Author : Christopher Faulet Author : Christopher Faulet
Contact : cfaulet at haproxy dot com Contact : cfaulet at haproxy dot com
WARNING: The SPOE is now deprecated and will be removed in future version.
SUMMARY SUMMARY
-------- --------
@ -69,10 +73,13 @@ systems (often at least the connect() is blocking). So, it is hard to properly
implement Single Sign On solution (SSO) in HAProxy. The SPOE will ease this implement Single Sign On solution (SSO) in HAProxy. The SPOE will ease this
kind of processing, or we hope so. kind of processing, or we hope so.
The aim of SPOE is to allow any kind of offloading on the streams. It can Now, the aim of SPOE is to allow any kind of offloading on the streams. First
offload the processing before "tcp-request content", "tcp-response content", releases won't do lot of things. As we will see, there are few handled events
"http-request" and "http-response" rules. It is also possible to offload the and even less actions supported. Actually, for now, the SPOE can offload the
processing via an TCP/HTTP rule. processing before "tcp-request content", "tcp-response content", "http-request"
and "http-response" rules. And it only supports variables definition. But, in
spite of these limited features, we can easily imagine to implement SSO
solution, ip reputation or ip geolocation services.
Some example implementations in various languages are linked to from the Some example implementations in various languages are linked to from the
HAProxy Wiki page dedicated to this mechanism: HAProxy Wiki page dedicated to this mechanism:
@ -82,8 +89,8 @@ HAProxy Wiki page dedicated to this mechanism:
2. SPOE configuration 2. SPOE configuration
---------------------- ----------------------
Because SPOE is implemented as a filter, To use it, a "filter spoe" line must Because SPOE is implemented as a filter, To use it, you must declare a "filter
be declared xin a proxy section (frontend/backend/listen) : spoe" line in a proxy section (frontend/backend/listen) :
frontend my-front frontend my-front
... ...
@ -96,10 +103,9 @@ the SPOE configuration. So it is possible to use the same SPOE configuration
for several engines. If no name is provided, the SPOE configuration must not for several engines. If no name is provided, the SPOE configuration must not
contain any scope directive. contain any scope directive.
Using a separate configuration file makes possible to disable completely an We use a separate configuration file on purpose. By commenting SPOE filter
engine by only commenting the SPOE filter line, including the parsing of line, you completely disable the feature, including the parsing of sections
sections reserved to SPOE. This is also a way to keep the HAProxy configuration reserved to SPOE. This is also a way to keep the HAProxy configuration clean.
clean.
A SPOE configuration file must contains, at least, the SPOA configuration A SPOE configuration file must contains, at least, the SPOA configuration
("spoe-agent" section) and SPOE messages/groups ("spoe-message" or "spoe-group" ("spoe-agent" section) and SPOE messages/groups ("spoe-message" or "spoe-group"
@ -112,13 +118,12 @@ file.
2.1. SPOE scope 2.1. SPOE scope
------------------------- -------------------------
If an engine name is specified on the SPOE filter line, then the corresponding If you specify an engine name on the SPOE filter line, then you need to define
scope must be defined in the SPOE configuration with the same name. It is scope in the SPOE configuration with the same name. You can have several SPOE
possible to have several SPOE scopes in the same file. In each scope, one and scope in the same file. In each scope, you must define one and only one
only one "spoe-agent" section must be defined, to configure the SPOA linked to "spoe-agent" section to configure the SPOA linked to your SPOE and several
the defined engine and several "spoe-message" and "spoe-group" sections to "spoe-message" and "spoe-group" sections to describe, respectively, messages and
describe, respectively, messages and group of messages sent to servers managed group of messages sent to servers managed by your SPOA.
the SPOA.
A SPOE scope starts with this kind of line : A SPOE scope starts with this kind of line :
@ -147,15 +152,15 @@ If no engine name is provided on the SPOE filter line, no SPOE scope must be
found in the SPOE configuration file. All the file is considered to be in the found in the SPOE configuration file. All the file is considered to be in the
same anonymous and implicit scope. same anonymous and implicit scope.
The engine name must be unique for a proxy. If no engine name is provided on The engine name must be uniq for a proxy. If no engine name is provided on the
the SPOE filter line, the SPOE agent name is used by default. SPOE filter line, the SPOE agent name is used by default.
2.2. "spoe-agent" section 2.2. "spoe-agent" section
-------------------------- --------------------------
For each engine, exactly one "spoe-agent" section must be defined. Enabled SPOE For each engine, you must define one and only one "spoe-agent" section. In this
messages are declared in this section, and all the parameters (timeout, section, you will declare SPOE messages and the backend you will use. You will
options, ...) used to customize the agent behavior. also set timeouts and options to customize your agent's behaviour.
spoe-agent <name> spoe-agent <name>
@ -168,10 +173,15 @@ spoe-agent <name>
following keywords are supported : following keywords are supported :
- groups - groups
- log - log
- maxconnrate
- maxerrrate
- max-frame-size - max-frame-size
- max-waiting-frames
- messages - messages
- [no] option async
- [no] option dontlog-normal - [no] option dontlog-normal
- [no] option pipelining - [no] option pipelining
- [no] option send-frag-payload
- option continue-on-error - option continue-on-error
- option force-set-var - option force-set-var
- option set-on-error - option set-on-error
@ -179,16 +189,9 @@ spoe-agent <name>
- option set-total-time - option set-total-time
- option var-prefix - option var-prefix
- register-var-names - register-var-names
- timeout processing - timeout hello|idle|processing
- use-backend - use-backend
following keywords are deprecated and ignored:
- maxconnrate
- maxerrrate
- max-waiting-frames
- [no] option async
- [no] option send-frag-payload
- timeout hello|idle
groups <grp-name> ... groups <grp-name> ...
Declare the list of SPOE groups that an agent will handle. Declare the list of SPOE groups that an agent will handle.
@ -197,11 +200,11 @@ groups <grp-name> ...
<grp-name> is the name of a SPOE group. <grp-name> is the name of a SPOE group.
Groups declared here must be found in the same engine scope, else an error is Groups declared here must be found in the same engine scope, else an error is
triggered during the configuration parsing. Several "groups" lines can be triggered during the configuration parsing. You can have many "groups" lines.
defined.
See also: "spoe-group" section. See also: "spoe-group" section.
log global log global
log <address> [len <length>] [format <format>] <facility> [<level> [<minlevel>]] log <address> [len <length>] [format <format>] <facility> [<level> [<minlevel>]]
no log no log
@ -212,35 +215,28 @@ no log
See the HAProxy Configuration Manual for details about this option. See the HAProxy Configuration Manual for details about this option.
maxconnrate <number> [DEPRECATED] maxconnrate <number>
Set the maximum number of connections per second to <number>. The SPOE will Set the maximum number of connections per second to <number>. The SPOE will
stop to open new connections if the maximum is reached and will wait to stop to open new connections if the maximum is reached and will wait to
acquire an existing one. So it is important to set "timeout hello" to a acquire an existing one. So it is important to set "timeout hello" to a
relatively small value. relatively small value.
This parameter is now deprecated and ignored. It will be removed in future
versions.
maxerrrate <number> [DEPRECATED] maxerrrate <number>
Set the maximum number of errors per second to <number>. The SPOE will stop Set the maximum number of errors per second to <number>. The SPOE will stop
its processing if the maximum is reached. its processing if the maximum is reached.
This parameter is now deprecated and ignored. It will be removed in future
versions.
max-frame-size <number> max-frame-size <number>
Set the maximum allowed size for frames exchanged between HAProxy and SPOA. Set the maximum allowed size for frames exchanged between HAProxy and SPOA.
It must be in the range [256, tune.bufsize-4] (4 bytes are reserved for the It must be in the range [256, tune.bufsize-4] (4 bytes are reserved for the
frame length). By default, it is set to (tune.bufsize-4). frame length). By default, it is set to (tune.bufsize-4).
max-waiting-frames <number> [DEPRECATED] max-waiting-frames <number>
Set the maximum number of frames waiting for an acknowledgement on the same Set the maximum number of frames waiting for an acknowledgement on the same
connection. This value is only used when the pipelinied or asynchronous connection. This value is only used when the pipelinied or asynchronous
exchanges between HAProxy and SPOA are enabled. By default, it is set to 20. exchanges between HAProxy and SPOA are enabled. By default, it is set to 20.
This parameter is now deprecated and ignored. It will be removed in future
versions.
messages <msg-name> ... messages <msg-name> ...
Declare the list of SPOE messages that an agent will handle. Declare the list of SPOE messages that an agent will handle.
@ -248,24 +244,23 @@ messages <msg-name> ...
<msg-name> is the name of a SPOE message. <msg-name> is the name of a SPOE message.
Messages declared here must be found in the same engine scope, else an error Messages declared here must be found in the same engine scope, else an error
is triggered during the configuration parsing. Several "messages" lines can is triggered during the configuration parsing. You can have many "messages"
be defined. lines.
See also: "spoe-message" section. See also: "spoe-message" section.
option async [DEPRECATED]
option async
no option async no option async
Enable or disable the support of asynchronous exchanges between HAProxy and Enable or disable the support of asynchronous exchanges between HAProxy and
SPOA. By default, this option is enabled. SPOA. By default, this option is enabled.
This parameter is now deprecated and ignored. It will be removed in future
versions.
option continue-on-error option continue-on-error
Do not stop the events processing when an error occurred on a stream. Do not stop the events processing when an error occurred on a stream.
By default, for a specific stream, when an abnormal/unexpected error occurs, By default, for a specific stream, when an abnormal/unexpected error occurs,
the SPOE is disabled for all the transaction. if several events are the SPOE is disabled for all the transaction. So if you have several events
configured, such error on an event will disabled all following. For TCP configured, such error on an event will disabled all following. For TCP
streams, this will disable the SPOE for the whole session. For HTTP streams, streams, this will disable the SPOE for the whole session. For HTTP streams,
this will disable it for the transaction (request and response). this will disable it for the transaction (request and response).
@ -273,6 +268,7 @@ option continue-on-error
When set, this option bypass this behaviour and only the current event will When set, this option bypass this behaviour and only the current event will
be ignored. be ignored.
option dontlog-normal option dontlog-normal
no option dontlog-normal no option dontlog-normal
Enable or disable logging of normal, successful processing. Enable or disable logging of normal, successful processing.
@ -281,27 +277,29 @@ no option dontlog-normal
See also: "log" and section 4 about logging. See also: "log" and section 4 about logging.
option force-set-var option force-set-var
By default, SPOE filter only register already known variables (mainly from By default, SPOE filter only register already known variables (mainly from
parsing of the configuration), and process-wide variables (those of scope parsing of the configuration), and process-wide variables (those of scope
"proc") cannot be created. If HAProxy trusts the agent and registers all "proc") cannot be created. If you want that haproxy trusts the agent and
variables (ex: can be useful for LUA workload), this option can be sets. registers all variables (ex: can be useful for LUA workload), activate this
option.
Caution : this option opens to a variety of attacks such as a rogue SPOA that Caution : this option opens to a variety of attacks such as a rogue SPOA that
asks to register too many variables. asks to register too many variables.
option pipelining option pipelining
no option pipelining no option pipelining
Enable or disable the support of pipelined exchanges between HAProxy and Enable or disable the support of pipelined exchanges between HAProxy and
SPOA. By default, this option is enabled. SPOA. By default, this option is enabled.
option send-frag-payload [DEPRECATED]
option send-frag-payload
no option send-frag-payload no option send-frag-payload
Enable or disable the sending of fragmented payload to SPOA. By default, this Enable or disable the sending of fragmented payload to SPOA. By default, this
option is enabled. option is enabled.
This parameter is now deprecated and ignored. It will be removed in future
versions.
option set-on-error <var name> option set-on-error <var name>
Define the variable to set when an error occurred during an event processing. Define the variable to set when an error occurred during an event processing.
@ -313,13 +311,13 @@ option set-on-error <var name>
This variable will only be set when an error occurred in the scope of the This variable will only be set when an error occurred in the scope of the
transaction. As for all other variables define by the SPOE, it will be transaction. As for all other variables define by the SPOE, it will be
prefixed. So, if the variable name is "error" and the prefix is prefixed. So, if your variable name is "error" and your prefix is
"my_spoe_pfx", the variable will be "txn.my_spoe_pfx.error". "my_spoe_pfx", the variable will be "txn.my_spoe_pfx.error".
When set, the variable is an integer representing the error reason. For values When set, the variable is an integer representing the error reason. For values
under 256, it represents an error coming from the engine. Below 256, it under 256, it represents an error coming from the engine. Below 256, it
reports a SPOP error. In this case, to retrieve the right SPOP status code, reports a SPOP error. In this case, to retrieve the right SPOP status code,
256 must be removed from this value. Here are possible values: you must remove 256 to this value. Here are possible values:
* 1 a timeout occurred during the event processing. * 1 a timeout occurred during the event processing.
@ -353,8 +351,8 @@ option set-process-time <var name>
contain characters 'a-z', 'A-Z', '0-9', '.' and '_'. contain characters 'a-z', 'A-Z', '0-9', '.' and '_'.
This variable will be set in the scope of the transaction. As for all other This variable will be set in the scope of the transaction. As for all other
variables define by the SPOE, it will be prefixed. So, if the variable name variables define by the SPOE, it will be prefixed. So, if your variable name
is "process_time" and the prefix is "my_spoe_pfx", the variable will be is "process_time" and your prefix is "my_spoe_pfx", the variable will be
"txn.my_spoe_pfx.process_time". "txn.my_spoe_pfx.process_time".
When set, the variable is an integer representing the delay to process the When set, the variable is an integer representing the delay to process the
@ -362,10 +360,11 @@ option set-process-time <var name>
latency added by the SPOE processing for the last handled event or group. latency added by the SPOE processing for the last handled event or group.
If several events or groups are processed for the same stream, this value If several events or groups are processed for the same stream, this value
will be overridden. will be overrideen.
See also: "option set-total-time". See also: "option set-total-time".
option set-total-time <var name> option set-total-time <var name>
Define the variable to set to report the total processing time SPOE for a Define the variable to set to report the total processing time SPOE for a
stream. stream.
@ -376,8 +375,8 @@ option set-total-time <var name>
contain characters 'a-z', 'A-Z', '0-9', '.' and '_'. contain characters 'a-z', 'A-Z', '0-9', '.' and '_'.
This variable will be set in the scope of the transaction. As for all other This variable will be set in the scope of the transaction. As for all other
variables define by the SPOE, it will be prefixed. So, if the variable name variables define by the SPOE, it will be prefixed. So, if your variable name
is "total_time" and the prefix is "my_spoe_pfx", the variable will be is "total_time" and your prefix is "my_spoe_pfx", the variable will be
"txn.my_spoe_pfx.total_time". "txn.my_spoe_pfx.total_time".
When set, the variable is an integer representing the sum of processing times When set, the variable is an integer representing the sum of processing times
@ -389,6 +388,7 @@ option set-total-time <var name>
See also: "option set-process-time". See also: "option set-process-time".
option var-prefix <prefix> option var-prefix <prefix>
Define the prefix used when variables are set by an agent. Define the prefix used when variables are set by an agent.
@ -403,19 +403,19 @@ option var-prefix <prefix>
The prefix will be added between the variable scope and its name, separated The prefix will be added between the variable scope and its name, separated
by a '.'. It may only contain characters 'a-z', 'A-Z', '0-9', '.' and '_', as by a '.'. It may only contain characters 'a-z', 'A-Z', '0-9', '.' and '_', as
for variables name. In HAProxy configuration, this prefix must be used as a for variables name. In HAProxy configuration, you need to use this prefix as
part of the variables name. For example, if an agent define the variable a part of the variables name. For example, if an agent define the variable
"myvar" in the "txn" scope, with the prefix "my_spoe_pfx", then "myvar" in the "txn" scope, with the prefix "my_spoe_pfx", then you should
"txn.my_spoe_pfx.myvar" name must be used in HAProxy configuration. use "txn.my_spoe_pfx.myvar" name in your HAProxy configuration.
By default, an agent will never set new variables at runtime: It can only set By default, an agent will never set new variables at runtime: It can only set
new value for existing ones. To change this behaviour, see "force-set-var" new value for existing ones. If you want a different behaviour, see
option and "register-var-names" directive. force-set-var option and register-var-names directive.
register-var-names <var name> ... register-var-names <var name> ...
Register some variable names. By default, an agent will not be allowed to set Register some variable names. By default, an agent will not be allowed to set
new variables at runtime. This rule can be totally relaxed by setting the new variables at runtime. This rule can be totally relaxed by setting the
option "force-set-var". If all the required variables are known, this option "force-set-var". If you know all the variables you will need, this
directive is a good way to register them without letting an agent doing what directive is a good way to register them without letting an agent doing what
it want. This is only required if these variables are not referenced anywhere it want. This is only required if these variables are not referenced anywhere
in the HAProxy configuration or the SPOE one. in the HAProxy configuration or the SPOE one.
@ -424,12 +424,12 @@ register-var-names <var name> ...
<var name> is a variable name without the scope. The name may only <var name> is a variable name without the scope. The name may only
contain characters 'a-z', 'A-Z', '0-9', '.' and '_'. contain characters 'a-z', 'A-Z', '0-9', '.' and '_'.
The prefix will be automatically added during the registration. Several The prefix will be automatically added during the registration. You can have
"register-var-names" lines can be used. many "register-var-names" lines.
See also: "option force-set-var", "option var-prefix". See also: "option force-set-var", "option var-prefix".
timeout hello <timeout> [DEPRECATED] timeout hello <timeout>
Set the maximum time to wait for an agent to receive the AGENT-HELLO frame. Set the maximum time to wait for an agent to receive the AGENT-HELLO frame.
It is applied on the stream that handle the connection with the agent. It is applied on the stream that handle the connection with the agent.
@ -441,10 +441,8 @@ timeout hello <timeout> [DEPRECATED]
This timeout is an applicative timeout. It differ from "timeout connect" This timeout is an applicative timeout. It differ from "timeout connect"
defined on backends. defined on backends.
This parameter is now deprecated and ignored. It will be removed in future
versions.
timeout idle <timeout> [DEPRECATED] timeout idle <timeout>
Set the maximum time to wait for an agent to close an idle connection. It is Set the maximum time to wait for an agent to close an idle connection. It is
applied on the stream that handle the connection with the agent. applied on the stream that handle the connection with the agent.
@ -453,8 +451,6 @@ timeout idle <timeout> [DEPRECATED]
can be in any other unit if the number is suffixed by the unit, can be in any other unit if the number is suffixed by the unit,
as explained at the top of this document. as explained at the top of this document.
This parameter is now deprecated and ignored. It will be removed in future
versions.
timeout processing <timeout> timeout processing <timeout>
Set the maximum time to wait for a stream to process an event, i.e to acquire Set the maximum time to wait for a stream to process an event, i.e to acquire
@ -490,19 +486,21 @@ spoe-message <name>
Arguments : Arguments :
<name> is the name of the SPOE message. <name> is the name of the SPOE message.
Here a message that can be referenced in a "spoe-agent" section is Here you define a message that can be referenced in a "spoe-agent"
defined. Following keywords are supported : section. Following keywords are supported :
- acl - acl
- args - args
- event - event
See also: "spoe-agent" section. See also: "spoe-agent" section.
acl <aclname> <criterion> [flags] [operator] <value> ... acl <aclname> <criterion> [flags] [operator] <value> ...
Declare or complete an access list. Declare or complete an access list.
See section 7 about ACL usage in the HAProxy Configuration Manual. See section 7 about ACL usage in the HAProxy Configuration Manual.
args [name=]<sample> ... args [name=]<sample> ...
Define arguments passed into the SPOE message. Define arguments passed into the SPOE message.
@ -516,6 +514,7 @@ args [name=]<sample> ...
For example: For example:
args frontend=fe_id src dst args frontend=fe_id src dst
event <name> [ { if | unless } <condition> ] event <name> [ { if | unless } <condition> ]
Set the event that triggers sending of the message. It may optionally be Set the event that triggers sending of the message. It may optionally be
followed by an ACL-based condition, in which case it will only be evaluated followed by an ACL-based condition, in which case it will only be evaluated
@ -557,12 +556,13 @@ spoe-group <name>
Arguments : Arguments :
<name> is the name of the SPOE group. <name> is the name of the SPOE group.
Here a group of SPOE messages is defined. It can be referenced in a Here you define a group of SPOE messages that can be referenced in a
"spoe-agent" section. Following keywords are supported : "spoe-agent" section. Following keywords are supported :
- messages - messages
See also: "spoe-agent" and "spoe-message" sections. See also: "spoe-agent" and "spoe-message" sections.
messages <msg-name> ... messages <msg-name> ...
Declare the list of SPOE messages belonging to the group. Declare the list of SPOE messages belonging to the group.
@ -571,7 +571,7 @@ messages <msg-name> ...
Messages declared here must be found in the same engine scope, else an error Messages declared here must be found in the same engine scope, else an error
is triggered during the configuration parsing. Furthermore, a message belongs is triggered during the configuration parsing. Furthermore, a message belongs
at most to a group. Several "messages" lines can be defined. at most to a group. You can have many "messages" lines.
See also: "spoe-message" section. See also: "spoe-message" section.
@ -602,7 +602,7 @@ and 0 a blacklisted IP with no doubt).
server http A.B.C.D:80 server http A.B.C.D:80
backend iprep-servers backend iprep-servers
mode spop mode tcp
balance roundrobin balance roundrobin
timeout connect 5s # greater than hello timeout timeout connect 5s # greater than hello timeout
@ -620,6 +620,8 @@ and 0 a blacklisted IP with no doubt).
option var-prefix iprep option var-prefix iprep
timeout hello 2s
timeout idle 2m
timeout processing 10ms timeout processing 10ms
use-backend iprep-servers use-backend iprep-servers
@ -716,37 +718,62 @@ actions.
+---+---+----------+ +---+---+----------+
FIN: Indicates that this is the final payload fragment. The first fragment FIN: Indicates that this is the final payload fragment. The first fragment
may also be the final fragment. The payload fragmentation was removed may also be the final fragment.
and is now deprecated. It means the FIN flag must be set on all
frames.
ABORT: Indicates that the processing of the current frame must be ABORT: Indicates that the processing of the current frame must be
cancelled. cancelled. This bit should be set on frames with a fragmented
payload. It can be ignore for frames with an unfragemnted
payload. When it is set, the FIN bit must also be set.
Frames cannot exceed a maximum size negotiated between HAProxy and agents Frames cannot exceed a maximum size negotiated between HAProxy and agents
during the HELLO handshake. Most of time, payload will be small enough to send during the HELLO handshake. Most of time, payload will be small enough to send
it in one frame. it in one frame. But when supported by the peer, it will be possible to
fragment huge payload on many frames. This ability is announced during the
HELLO handshake and it can be asynmetric (supported by agents but not by
HAProxy or the opposite). The following rules apply to fragmentation:
* An unfragemnted payload consists of a single frame with the FIN bit set.
* A fragemented payload consists of several frames with the FIN bit clear and
terminated by a single frame with the FIN bit set. All these frames must
share the same STREAM-ID and FRAME-ID. The first frame must set the right
FRAME-TYPE (e.g, NOTIFY). The following frames must have an unset type (0).
Beside the support of fragmented payload by a peer, some payload must not be
fragmented. See below for details.
IMPORTANT : The maximum size supported by peers for a frame must be greater IMPORTANT : The maximum size supported by peers for a frame must be greater
than or equal to 256 bytes. A good common value is the HAProxy than or equal to 256 bytes.
buffer size minus 4 bytes, reserved for the frame length
(tune.bufsize - 4). It is the default value announced by HAproxy.
3.2.1. Frame capabilities 3.2.1. Frame capabilities
-------------------------- --------------------------
Here are the list of official capabilities that HAProxy and agents can support: Here are the list of official capabilities that HAProxy and agents can support:
* fragmentation: This is the ability for a peer to support fragmented
payload in received frames. This is an asymmectical
capability, it only concerns the peer that announces
it. This is the responsibility to the other peer to use it
or not.
* pipelining: This is the ability for a peer to decouple NOTIFY and ACK * pipelining: This is the ability for a peer to decouple NOTIFY and ACK
frames. This is a symmectical capability. To be used, it must frames. This is a symmectical capability. To be used, it must
be supported by HAProxy and agents. Unlike HTTP pipelining, the be supported by HAProxy and agents. Unlike HTTP pipelining, the
ACK frames can be send in any order, but always on the same TCP ACK frames can be send in any order, but always on the same TCP
connection used for the corresponding NOTIFY frame. connection used for the corresponding NOTIFY frame.
* async: This ability is similar to the pipelining, but here any TCP
connection established between HAProxy and the agent can be used to
send ACK frames. if an agent accepts connections from multiple
HAProxy, it can use the "engine-id" value to group TCP
connections. See details about HAPROXY-HELLO frame.
Unsupported or unknown capabilities are silently ignored, when possible. Unsupported or unknown capabilities are silently ignored, when possible.
NOTE: Fragmentation and async capabilities were deprecated and are now ignored. NOTE: HAProxy does not support the fragmentation for now. This means it is not
able to handle fragmented frames. However, if an agent announces the
fragmentation support, HAProxy may choose to send fragemented frames.
3.2.2. Frame types overview 3.2.2. Frame types overview
---------------------------- ----------------------------
@ -755,6 +782,9 @@ Here are types of frame supported by SPOE. Frames sent by HAProxy come first,
then frames sent by agents : then frames sent by agents :
TYPE | ID | DESCRIPTION TYPE | ID | DESCRIPTION
-----------------------------+-----+-------------------------------------
UNSET | 0 | Used for all frames but the first when a
| | payload is fragmented.
-----------------------------+-----+------------------------------------- -----------------------------+-----+-------------------------------------
HAPROXY-HELLO | 1 | Sent by HAProxy when it opens a HAPROXY-HELLO | 1 | Sent by HAProxy when it opens a
| | connection on an agent. | | connection on an agent.
@ -775,8 +805,7 @@ then frames sent by agents :
ACK | 103 | Sent to acknowledge a NOTIFY frame ACK | 103 | Sent to acknowledge a NOTIFY frame
-----------------------------+-----+------------------------------------- -----------------------------+-----+-------------------------------------
Unknown frames may be silently skipped or trigger an error, depending on the Unknown frames may be silently skipped.
implementation.
3.2.3. Workflow 3.2.3. Workflow
---------------- ----------------
@ -840,6 +869,37 @@ implementation.
| <-------------------------- | | <-------------------------- |
| | | |
* Notify / Ack exchange (fragmented payload):
HAPROXY AGENT SRV
| NOTIFY (frag 1) |
| --------------------------> |
| |
| UNSET (frag 2) |
| --------------------------> |
| ... |
| UNSET (frag N) |
| --------------------------> |
| |
| ACK |
| <-------------------------- |
| |
* Aborted fragmentation of a NOTIFY frame:
HAPROXY AGENT SRV
| ... |
| UNSET (frag X) |
| --------------------------> |
| |
| ACK/ABORT |
| <-------------------------- |
| |
| UNSET (frag X+1) |
| -----------X |
| |
| |
* Connection closed by haproxy: * Connection closed by haproxy:
HAPROXY AGENT SRV HAPROXY AGENT SRV
@ -861,8 +921,8 @@ implementation.
---------------------------- ----------------------------
This frame is the first one exchanged between HAProxy and an agent, when the This frame is the first one exchanged between HAProxy and an agent, when the
connection is established. The payload of this frame is a KV-LIST. STREAM-ID connection is established. The payload of this frame is a KV-LIST. It cannot be
and FRAME-ID are must be set 0. fragmented. STREAM-ID and FRAME-ID are must be set 0.
Following items are mandatory in the KV-LIST: Following items are mandatory in the KV-LIST:
@ -907,7 +967,7 @@ AGENT-DISCONNECT frame must be returned.
This frame is sent in reply to a HAPROXY-HELLO frame to finish a HELLO This frame is sent in reply to a HAPROXY-HELLO frame to finish a HELLO
handshake. As for HAPROXY-HELLO frame, STREAM-ID and FRAME-ID are also set handshake. As for HAPROXY-HELLO frame, STREAM-ID and FRAME-ID are also set
0. The payload of this frame is a KV-LIST. 0. The payload of this frame is a KV-LIST and it cannot be fragmented.
Following items are mandatory in the KV-LIST: Following items are mandatory in the KV-LIST:
@ -941,7 +1001,8 @@ will close the connection at the end of the health check.
Information are sent to the agents inside NOTIFY frames. These frames are Information are sent to the agents inside NOTIFY frames. These frames are
attached to a stream, so STREAM-ID and FRAME-ID must be set. The payload of attached to a stream, so STREAM-ID and FRAME-ID must be set. The payload of
NOTIFY frames is a LIST-OF-MESSAGES. NOTIFY frames is a LIST-OF-MESSAGES and, if supported by agents, it can be
fragmented.
NOTIFY frames must be acknowledge by agents sending an ACK frame, repeating NOTIFY frames must be acknowledge by agents sending an ACK frame, repeating
right STREAM-ID and FRAME-ID. right STREAM-ID and FRAME-ID.
@ -951,7 +1012,8 @@ right STREAM-ID and FRAME-ID.
ACK frames must be sent by agents to reply to NOTIFY frames. STREAM-ID and ACK frames must be sent by agents to reply to NOTIFY frames. STREAM-ID and
FRAME-ID found in a NOTIFY frame must be reuse in the corresponding ACK FRAME-ID found in a NOTIFY frame must be reuse in the corresponding ACK
frame. The payload of ACK frames is a LIST-OF-ACTIONS. frame. The payload of ACK frames is a LIST-OF-ACTIONS and, if supported by
HAProxy, it can be fragmented.
3.2.8. Frame: HAPROXY-DISCONNECT 3.2.8. Frame: HAPROXY-DISCONNECT
--------------------------------- ---------------------------------
@ -961,8 +1023,8 @@ frame is sent with information describing the error. HAProxy will wait an
AGENT-DISCONNECT frame in reply. All other frames will be ignored. The agent AGENT-DISCONNECT frame in reply. All other frames will be ignored. The agent
must then close the socket. must then close the socket.
The payload of this frame is a KV-LIST. STREAM-ID and FRAME-ID are must be set The payload of this frame is a KV-LIST. It cannot be fragmented. STREAM-ID and
0. FRAME-ID are must be set 0.
Following items are mandatory in the KV-LIST: Following items are mandatory in the KV-LIST:
@ -984,8 +1046,8 @@ is sent, with information describing the error. such frame is also sent in reply
to a HAPROXY-DISCONNECT. The agent must close the socket just after sending to a HAPROXY-DISCONNECT. The agent must close the socket just after sending
this frame. this frame.
The payload of this frame is a KV-LIST. STREAM-ID and FRAME-ID are must be set The payload of this frame is a KV-LIST. It cannot be fragmented. STREAM-ID and
0. FRAME-ID are must be set 0.
Following items are mandatory in the KV-LIST: Following items are mandatory in the KV-LIST:
@ -1002,10 +1064,10 @@ For more information about known errors, see section "Errors & timeouts"
3.3. Events & Messages 3.3. Events & Messages
----------------------- -----------------------
Information about streams are sent in NOTIFY frames. It is possible to specify Information about streams are sent in NOTIFY frames. You can specify which kind
which kind of information to send by defining "spoe-message" sections in the of information to send by defining "spoe-message" sections in your SPOE
SPOE configuration file. for each "spoe-message" there will be a message in a configuration file. for each "spoe-message" there will be a message in a NOTIFY
NOTIFY frame when the right event is triggered. frame when the right event is triggered.
A NOTIFY frame is sent for an specific event when there is at least one A NOTIFY frame is sent for an specific event when there is at least one
"spoe-message" attached to this event. All messages for an event will be added "spoe-message" attached to this event. All messages for an event will be added
@ -1127,15 +1189,21 @@ An agent can define its own errors using a not yet assigned status code.
IMPORTANT NOTE: By default, for a specific stream, when an abnormal/unexpected IMPORTANT NOTE: By default, for a specific stream, when an abnormal/unexpected
error occurs, the SPOE is disabled for all the transaction. So error occurs, the SPOE is disabled for all the transaction. So
if several events are configured, such error on an event will if you have several events configured, such error on an event
disabled all following. For TCP streams, this will disable the will disabled all following. For TCP streams, this will
SPOE for the whole session. For HTTP streams, this will disable disable the SPOE for the whole session. For HTTP streams, this
it for the transaction (request and response). See 'option will disable it for the transaction (request and response).
continue-on-error' to bypass this limitation. See 'option continue-on-error' to bypass this limitation.
To avoid a stream to wait undefinetly, A processing timeout should be carefully To avoid a stream to wait undefinetly, you must carefully choose the
defined. Most of time, it will be quiet low. But it depends on the SPOA acknowledgement timeout. In most of cases, it will be quiet low. But it depends
responsivness. on the responsivness of your service.
You must also choose idle timeout carefully. Because connection with your
service depends on the backend configuration used by the SPOA, it is important
to use a lower value for idle timeout than the server timeout. Else the
connection will be closed by HAProxy. The same is true for hello timeout. You
should choose a lower value than the connect timeout.
4. Logging 4. Logging
----------- -----------
@ -1150,19 +1218,40 @@ LOG_NOTICE. Otherwise, the message is logged with the level LOG_WARNING.
The messages are logged using the agent's logger, if defined, and use the The messages are logged using the agent's logger, if defined, and use the
following format: following format:
SPOE: [AGENT] <TYPE:NAME> sid=STREAM-ID st=STATUS-CODE pT <nb_error>/<nb_processed> SPOE: [AGENT] <TYPE:NAME> sid=STREAM-ID st=STATUS-CODE reqT/qT/wT/resT/pT \
<idles>/<applets> <nb_sending>/<nb_waiting> <nb_error>/<nb_processed>
AGENT is the agent name AGENT is the agent name
TYPE is EVENT of GROUP TYPE is EVENT of GROUP
NAME is the event or the group name NAME is the event or the group name
STREAM-ID is an integer, the unique id of the stream STREAM-ID is an integer, the unique id of the stream
STATUS_CODE is the processing's status code STATUS_CODE is the processing's status code
pT is the delay to process the event or the group. reqT/qT/wT/resT/pT are the following time events:
From the stream point of view, it is the latency added
by the SPOE processing. * reqT : the encoding time. It includes ACLs processing, if any. For
fragmented frames, it is the sum of all fragments.
* qT : the delay before the request gets out the sending queue. For
fragmented frames, it is the sum of all fragments.
* wT : the delay before the response is received. No fragmentation
supported here.
* resT : the delay to process the response. No fragmentation supported
here.
* pT : the delay to process the event or the group. From the stream
point of view, it is the latency added by the SPOE processing.
It is more or less the sum of values above.
<idle> is the numbers of idle SPOE applets
<applets> is the numbers of SPOE applets
<nb_sending> is the numbers of streams waiting to send data
<nb_waiting> is the numbers of streams waiting for a ack
<nb_error> is the numbers of processing errors <nb_error> is the numbers of processing errors
<nb_processed> is the numbers of events/groups processed <nb_processed> is the numbers of events/groups processed
For all these time events, -1 means the processing was interrupted before the
end. So -1 for the queue time means the request was never dequeued. For
fragmented frames it is harder to know when the interruption happened.
/* /*
* Local variables: * Local variables:
* fill-column: 79 * fill-column: 79

1448
doc/architecture.txt Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,114 +0,0 @@
2024-10-28 - error reporting
----------------------------
- rules:
-> stream->current_rule ~= yielding rule or error
pb: not always set.
-> todo: curr_rule_in_progress points to &rule->conf (file+line)
- set on ACT_RET_ERR, ACT_RET_YIELD, ACT_RET_INV.
- sample_fetch: curr_rule
- filters:
-> strm_flt.filters[2] (1 per direction) ~= yielding filter or error
-> to check: what to do on forward filters (e.g. compression)
-> check spoe / waf (stream data)
-> sample_fetch: curr_filt
- cleanup:
- last_rule_line + last_rule_file can point to &rule->conf
- xprt:
- all handshakes use the dummy xprt "xprt_handshake" ("HS"). No data
exchange is possible there. The ctx is of type xprt_handshake_ctx
for all of them, and contains a wait_event.
=> conn->xprt_ctx->wait_event contains the sub for current handshake
*if* xprt points to xprt_handshake.
- at most 2 active xprt at once: top and bottom (bottom=raw_sock)
- proposal:
- combine 2 bits for muxc, 2 bits for xprt, 4 bits for fd (active,ready).
=> 8 bits for muxc and below. QUIC uses something different TBD.
- muxs uses 6 bits max (ex: h2 send_list, fctl_list, full etc; h1: full,
blocked connect...).
- 2 bits for sc's sub
- mux_sctl to retrieve a 32-bit code padded right, limited to 16 bits
for now.
=> [ 0000 | 0000 | 0000 | 0000 | SC | MUXS | MUXC | XPRT | FD ]
2 6 2 2 4
- sample-fetch for each side.
- shut / abort
- history, almost human-readable.
- event locations:
- fd (detected by rawsock)
- handshake (detected by xprt_handshake). Eg. parsing or address encoding
- xprt (ssl)
- muxc
- se: muxs / applet
- stream
< 8 total. +8 to distinguish front from back at stream level.
suggest:
- F, H, X, M, E, S front or back
- f, h, x, m, e, s back or front
- event types:
- 0 = no event yet
- 1 = timeout
- 2 = intercepted (rule, etc)
- 3 unused
// shutr / shutw: +1 if other side already shut
- 4 = aligned shutr
- 6 = aligned recv error
- 8 = early shutr (truncation)
- 10 = early error (truncation)
- 12 = shutw
- 14 = send error
- event location = MSB
event type = LSB
appending a single event:
-- if code not full --
code <<= 8;
code |= location << 4;
code |= event type;
- up to 4 events per connection in 32-bit mode stored on connection
(since raw_sock & ssl_sock need to access it).
- SE (muxs/applet) store their event log in the SD: se_event_log (64 bits).
- muxs must aggregate the connection's flags with its own:
- store last known connection state in SD: conn_event_log
- detect changes at the connection level by comparing with SD conn_event_log
- create a new SD event with difference(s) into SD se_event_log
- update connection state in SD conn_event_log
- stream
- store their event log in the stream: strm_event_log (64 bits).
- for each side:
- store last known SE state in SD: last_se_event_log
- detect changes at the SE level by comparing with SD se_event_log
- create a new STREAM event with difference(s) into STREAM strm_event_log
and patch the location depending on front vs back (+8 for back).
- update SE state in SD last_se_event_log
=> strm_event_log contains a composite of each side + stream.
- converted to string using the location letters
- if more event types needed later, can enlarge bits and use another letter.
- note: also possible to create an exhaustive enumeration of all possible codes
(types+locations).
- sample fetch to retrieve strm_event_log.
- Note that fc_err and fc_err_str are already usable
- questions:
- htx layer needed ?
- ability to map EOI/EOS etc to SE activity ?
- we'd like to detect an HTTP response before end of POST.

View File

@ -1,750 +0,0 @@
#FIG 3.2 Produced by xfig version 3.1
Landscape
Center
Metric
A4
100.00
Single
-2
1200 2
0 32 #8e8e8e
2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
450 450 450 6750
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
450 547 2250 637
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
450 592 2250 682
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
450 637 2250 727
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
450 682 2250 772
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
450 900 2250 990
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
450 945 2250 1035
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
450 990 2250 1080
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
450 1035 2250 1125
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
450 1080 2250 1170
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
450 1125 2250 1215
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
450 1168 2250 1258
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
450 1213 2250 1303
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
450 1429 2250 1519
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
450 1384 2250 1474
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
450 1339 2250 1429
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
450 1303 2250 1393
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
448 1253 2248 1343
2 1 0 1 12 7 52 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
2251 794 451 884
2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
2250 450 2250 6750
2 1 0 1 12 7 52 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
2251 1130 451 1220
2 1 0 1 12 7 52 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
2251 1309 451 1399
2 1 0 1 4 7 53 -1 -1 0.000 0 0 -1 0 1 2
10 1 1.00 60.00 120.00
2295 810 2475 810
2 1 0 1 4 7 53 -1 -1 0.000 0 0 -1 0 1 2
10 1 1.00 60.00 120.00
2295 1305 2475 1305
2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
10800 450 10800 7155
2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
9000 450 9000 7155
2 1 0 2 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 547 10800 1440
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 592 10800 1485
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 637 10800 1530
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 682 10800 1575
2 1 0 2 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 2437 10800 3330
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 2482 10800 3375
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 2527 10800 3420
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 2572 10800 3465
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 2617 10800 3510
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 2707 10800 3600
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 2752 10800 3645
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 2662 10800 3555
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 4327 10800 5220
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 4372 10800 5265
2 1 0 2 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 4462 10800 5355
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 4417 10800 5310
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 4507 10800 5400
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 4552 10800 5445
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 4597 10800 5490
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 4642 10800 5535
2 1 0 1 12 7 52 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
10801 5334 9001 6189
2 1 0 1 12 7 52 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
10801 5532 9001 6387
2 1 0 1 12 7 52 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
10801 3629 9001 4484
2 1 0 1 12 7 52 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
10801 3476 9001 4331
2 1 0 1 12 7 52 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
10801 1575 9001 2430
2 1 0 1 4 7 53 -1 -1 0.000 0 0 -1 0 1 2
10 1 1.00 60.00 120.00
10845 1575 11610 1575
2 1 0 1 4 7 53 -1 -1 0.000 0 0 -1 0 1 2
10 1 1.00 60.00 120.00
10845 3645 11565 3645
2 1 0 1 4 7 53 -1 -1 0.000 0 0 -1 0 1 2
10 1 1.00 60.00 120.00
10845 6120 11610 6120
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
10813 1487 10948 1366 10948 1456 11173 1276
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
10813 1741 10948 1620 10948 1710 11173 1530
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
10813 3406 10948 3285 10948 3375 11173 3195
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
10813 3681 10948 3560 10948 3650 11173 3470
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
10813 3996 10948 3875 10948 3965 11173 3785
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
10813 4266 10948 4145 10948 4235 11173 4055
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
10813 5278 10948 5157 10948 5247 11173 5067
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
10813 5537 10948 5416 10948 5506 11173 5326
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 5002 10800 5895
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 5047 10800 5940
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 5092 10800 5985
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 5137 10800 6030
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 5182 10800 6075
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 5227 10800 6120
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 6802 10800 7695
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 6847 10800 7740
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 6892 10800 7785
2 1 0 2 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 6982 10800 7875
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 7027 10800 7920
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 7072 10800 7965
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 6937 10800 7830
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 7117 10800 8010
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 7162 10800 8055
2 1 0 1 12 7 52 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
10801 6129 9001 6984
2 1 0 1 12 7 52 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
10801 5942 9001 6797
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 4950 10800 5843
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 4905 10800 5798
2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
3150 450 3150 6750
2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
4905 450 4905 6750
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 592 4950 1485
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 637 4950 1530
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 547 4950 1440
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 682 4950 1575
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 2572 4950 3465
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 2527 4950 3420
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 2482 4950 3375
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 2437 4950 3330
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 2617 4950 3510
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 2662 4950 3555
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 2707 4950 3600
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 2752 4950 3645
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 4552 4950 5445
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 4597 4950 5490
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 4642 4950 5535
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 4687 4950 5580
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 4867 4950 5760
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 4912 4950 5805
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 5047 4950 5940
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 5092 4950 5985
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 4822 4950 5715
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 4777 4950 5670
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 4732 4950 5625
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 4957 4950 5850
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 5002 4950 5895
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 5137 4950 6030
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 5227 4950 6120
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
3150 5182 4950 6075
2 1 0 1 12 7 52 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
4951 1575 3151 2430
2 1 0 1 12 7 52 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
4951 3673 3151 4528
2 1 0 1 4 7 53 -1 -1 0.000 0 0 -1 0 1 2
10 1 1.00 60.00 120.00
4995 1575 5175 1575
2 1 0 1 4 7 53 -1 -1 0.000 0 0 -1 0 1 2
10 1 1.00 60.00 120.00
4995 3645 5175 3645
2 1 0 1 4 7 53 -1 -1 0.000 0 0 -1 0 1 2
10 1 1.00 60.00 120.00
4995 6120 5175 6120
2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
7650 450 7650 7155
2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
5850 450 5850 7155
2 1 0 2 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 547 7650 1440
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 592 7650 1485
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 637 7650 1530
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 682 7650 1575
2 1 0 2 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 2437 7650 3330
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 2482 7650 3375
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 2527 7650 3420
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 2572 7650 3465
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 2617 7650 3510
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 2707 7650 3600
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 2752 7650 3645
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 2662 7650 3555
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 4327 7650 5220
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 4372 7650 5265
2 1 0 2 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 4462 7650 5355
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 4417 7650 5310
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 4507 7650 5400
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 4552 7650 5445
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 4597 7650 5490
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 4642 7650 5535
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 4687 7650 5580
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 4732 7650 5625
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 4777 7650 5670
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 4822 7650 5715
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 4867 7650 5760
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 4912 7650 5805
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 4957 7650 5850
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 5002 7650 5895
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 6213 7650 7106
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 6262 7650 7155
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 6307 7650 7200
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 6352 7650 7245
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 6397 7650 7290
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 6487 7650 7380
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 6532 7650 7425
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 6577 7650 7470
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 6442 7650 7335
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 6622 7650 7515
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 6667 7650 7560
2 1 0 2 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 6757 7650 7650
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 6802 7650 7695
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 6847 7650 7740
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 6712 7650 7605
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 6892 7650 7785
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
5850 6937 7650 7830
2 1 0 1 12 7 52 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
7651 5334 5851 6189
2 1 0 1 12 7 52 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
7651 5532 5851 6387
2 1 0 1 12 7 52 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
7651 5698 5851 6553
2 1 0 1 12 7 52 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
7651 5917 5851 6772
2 1 0 1 12 7 52 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
7651 3629 5851 4484
2 1 0 1 12 7 52 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
7651 3476 5851 4331
2 1 0 1 12 7 52 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
7651 1575 5851 2430
2 1 0 1 4 7 53 -1 -1 0.000 0 0 -1 0 1 2
10 1 1.00 60.00 120.00
7695 1575 8460 1575
2 1 0 1 4 7 53 -1 -1 0.000 0 0 -1 0 1 2
10 1 1.00 60.00 120.00
7695 3645 8415 3645
2 1 0 1 4 7 53 -1 -1 0.000 0 0 -1 0 1 2
10 1 1.00 60.00 120.00
7695 6120 8460 6120
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
7663 1487 7798 1366 7798 1456 8023 1276
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
7663 1741 7798 1620 7798 1710 8023 1530
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
7663 3406 7798 3285 7798 3375 8023 3195
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
7663 3681 7798 3560 7798 3650 8023 3470
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
7663 3996 7798 3875 7798 3965 8023 3785
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
7663 4266 7798 4145 7798 4235 8023 4055
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
7663 5278 7798 5157 7798 5247 8023 5067
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
7663 5537 7798 5416 7798 5506 8023 5326
2 1 0 1 4 7 53 -1 -1 0.000 0 0 -1 0 0 4
8955 4680 8910 4680 8910 4860 8955 4860
2 1 0 1 4 7 53 -1 -1 0.000 0 0 -1 0 0 4
8955 6570 8910 6570 8910 6750 8955 6750
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
10813 5791 10948 5670 10948 5760 11173 5580
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
10813 6060 10948 5939 10948 6029 11173 5849
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
10813 6372 10948 6251 10948 6341 11173 6161
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
10813 6601 10948 6480 10948 6570 11173 6390
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
10813 6781 10948 6660 10948 6750 11173 6570
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
10813 6970 10948 6849 10948 6939 11173 6759
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
7663 5791 7798 5670 7798 5760 8023 5580
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
7663 6060 7798 5939 7798 6029 8023 5849
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
7663 6372 7798 6251 7798 6341 8023 6161
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
7663 6601 7798 6480 7798 6570 8023 6390
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
7663 6781 7798 6660 7798 6750 8023 6570
2 1 0 1 5 7 54 -1 -1 0.000 0 0 -1 1 0 4
1 1 1.00 60.00 120.00
7663 6970 7798 6849 7798 6939 8023 6759
2 1 0 1 12 7 52 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
10801 7245 9001 8100
2 1 0 1 12 7 52 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
10801 7425 9001 8280
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 7920 10800 8813
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 7965 10800 8858
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 8010 10800 8903
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 8055 10800 8948
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 8100 10800 8993
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 8145 10800 9038
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 8190 10800 9083
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 8235 10800 9128
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 7560 10800 8453
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 7605 10800 8498
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 7650 10800 8543
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 7695 10800 8588
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 7740 10800 8633
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 7785 10800 8678
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 7830 10800 8723
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 7875 10800 8768
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 7200 10800 8093
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 7245 10800 8138
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 7290 10800 8183
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 7335 10800 8228
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 7380 10800 8273
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 7425 10800 8318
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 7470 10800 8363
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 7515 10800 8408
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 6210 10800 7103
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 6255 10800 7148
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 6300 10800 7193
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 6345 10800 7238
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 6390 10800 7283
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 6435 10800 7328
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 6480 10800 7373
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 6525 10800 7418
2 1 0 1 4 7 53 -1 -1 0.000 0 0 -1 1 0 2
2 1 1.00 60.00 120.00
8190 8280 8955 8280
2 1 0 1 1 7 51 -1 -1 0.000 0 0 -1 1 0 2
1 1 1.00 60.00 120.00
9000 8282 10800 9175
3 0 0 1 4 7 53 -1 -1 0.000 0 1 0 5
1 1 1.00 60.00 120.00
8910 4905 8820 5310 8775 5805 8865 6345 8910 6525
0.000 1.000 1.000 1.000 0.000
4 0 0 53 -1 16 6 0.0000 4 105 495 2520 1350 WP1 @12\001
4 0 0 53 -1 16 6 0.0000 4 105 435 2565 855 WP0 @4\001
4 0 0 53 -1 16 6 0.0000 4 75 390 2565 1005 => +8\001
4 1 0 52 -1 16 8 0.4363 4 105 765 9945 4050 WU: win=16\001
4 1 0 52 -1 16 8 0.4363 4 105 690 9945 1935 WU: win=8\001
4 0 0 54 -1 16 6 0.0000 4 75 270 11205 1305 -2 = 0\001
4 0 0 54 -1 16 6 0.0000 4 75 270 11205 1485 -2 = 0\001
4 0 0 54 -1 16 6 0.0000 4 75 270 11205 3195 -2 = 0\001
4 0 0 54 -1 16 6 0.0000 4 75 270 11205 3465 -2 = 4\001
4 0 0 54 -1 16 6 0.0000 4 75 270 11205 3825 -2 = 2\001
4 0 20 54 -1 18 6 0.0000 4 75 270 11205 4095 -2 = 0\001
4 0 0 54 -1 16 6 0.0000 4 75 270 11205 5085 -2 = 0\001
4 0 0 54 -1 16 6 0.0000 4 75 270 11205 5355 -2 = 4\001
4 0 0 53 -1 16 6 0.0000 4 105 495 11340 3645 WP1 @12\001
4 0 0 53 -1 16 6 0.0000 4 105 495 11295 6075 WP2 @28\001
4 0 0 53 -1 16 6 0.0000 4 105 435 11340 1710 WP0 @4\001
4 0 0 53 -1 16 6 0.0000 4 75 360 11340 1860 => +8\001
4 1 0 52 -1 16 8 0.4363 4 105 765 9945 6480 WU: win=32\001
4 0 0 53 -1 16 6 0.0000 4 105 495 5220 3690 WP1 @12\001
4 0 0 53 -1 16 6 0.0000 4 105 495 5220 6165 WP2 @28\001
4 0 0 53 -1 16 6 0.0000 4 105 435 5220 1620 WP0 @4\001
4 0 0 53 -1 16 6 0.0000 4 75 390 5220 1770 => +8\001
4 1 0 52 -1 16 8 0.4363 4 105 765 6795 6300 WU: win=32\001
4 1 0 52 -1 16 8 0.4363 4 105 765 6795 4050 WU: win=16\001
4 1 0 52 -1 16 8 0.4363 4 105 690 6795 1935 WU: win=8\001
4 0 0 54 -1 16 6 0.0000 4 75 270 8055 1305 -2 = 0\001
4 0 0 54 -1 16 6 0.0000 4 75 270 8055 1485 -2 = 0\001
4 0 0 54 -1 16 6 0.0000 4 75 270 8055 3195 -2 = 0\001
4 0 0 54 -1 16 6 0.0000 4 75 270 8055 3465 -2 = 4\001
4 0 0 54 -1 16 6 0.0000 4 75 270 8055 3825 -2 = 2\001
4 0 20 54 -1 18 6 0.0000 4 75 270 8055 4095 -2 = 0\001
4 0 0 54 -1 16 6 0.0000 4 75 270 8055 5085 -2 = 0\001
4 0 0 54 -1 16 6 0.0000 4 75 270 8055 5355 -2 = 4\001
4 0 0 53 -1 16 6 0.0000 4 105 495 8190 3645 WP1 @12\001
4 0 0 53 -1 16 6 0.0000 4 105 495 8145 6075 WP2 @28\001
4 0 0 53 -1 16 6 0.0000 4 105 435 8190 1710 WP0 @4\001
4 0 0 53 -1 16 6 0.0000 4 75 360 8190 1860 => +8\001
4 2 0 53 -1 16 6 0.0000 4 90 315 8865 4770 Pause\001
4 2 0 53 -1 16 6 0.0000 4 90 210 8865 6660 Zero\001
4 2 0 53 -1 16 6 0.0000 4 90 390 8865 6750 Window\001
4 0 0 54 -1 16 6 0.0000 4 75 270 11205 5625 -2 = 3\001
4 0 0 54 -1 16 6 0.0000 4 75 270 11205 6435 -2 = 4\001
4 0 0 54 -1 16 6 0.0000 4 75 270 11205 6615 -2 = 2\001
4 0 20 54 -1 18 6 0.0000 4 75 270 11205 6795 -2 = 0\001
4 0 0 54 -1 16 6 0.0000 4 75 270 8055 5625 -2 = 8\001
4 0 0 54 -1 16 6 0.0000 4 75 270 8055 5850 -2 = 8\001
4 0 0 54 -1 16 6 0.0000 4 75 270 8055 6210 -2 = 6\001
4 0 0 54 -1 16 6 0.0000 4 75 270 8055 6435 -2 = 4\001
4 0 0 54 -1 16 6 0.0000 4 75 270 8055 6615 -2 = 2\001
4 0 20 54 -1 18 6 0.0000 4 75 270 8055 6795 -2 = 0\001
4 0 0 54 -1 16 6 0.0000 4 75 270 11205 5850 -2 = 7\001
4 0 0 54 -1 16 6 0.0000 4 75 270 11205 6210 -2 = 6\001
4 2 0 53 -1 16 6 0.0000 4 90 270 8910 8190 Fixed\001

File diff suppressed because it is too large Load Diff

View File

@ -548,15 +548,11 @@ buffer_almost_full | const buffer *buf| returns true if the buffer is not null
| | are used. A waiting buffer will match. | | are used. A waiting buffer will match.
--------------------+------------------+--------------------------------------- --------------------+------------------+---------------------------------------
b_alloc | buffer *buf | ensures that <buf> is allocated or b_alloc | buffer *buf | ensures that <buf> is allocated or
| enum dynbuf_crit | allocates a buffer and assigns it to | ret: buffer * | allocates a buffer and assigns it to
| criticality | *buf. If no memory is available, (1) | | *buf. If no memory is available, (1)
| ret: buffer * | is assigned instead with a zero size. | | is assigned instead with a zero size.
| | The allocated buffer is returned, or | | The allocated buffer is returned, or
| | NULL in case no memory is available. | | NULL in case no memory is available
| | The criticality indicates the how the
| | buffer might be used and how likely it
| | is that the allocated memory will be
| | quickly released.
--------------------+------------------+--------------------------------------- --------------------+------------------+---------------------------------------
__b_free | buffer *buf | releases <buf> which must be allocated __b_free | buffer *buf | releases <buf> which must be allocated
| ret: void | and marks it empty | ret: void | and marks it empty

View File

@ -1,128 +0,0 @@
2024-09-30 - Buffer List API
1. Use case
The buffer list API allows one to share a certain amount of buffers between
multiple entities, which will each see their own as lists of buffers, while
keeping a sharedd free list. The immediate use case is for muxes, which may
want to allocate up to a certain number of buffers per connection, shared
among all streams. In this case, each stream will first request a new list
for its own use, then may request extra entries from the free list. At any
moment it will be possible to enumerate all allocated lists and to know which
buffer follows which one.
2. Representation
The buffer list is an array of struct bl_elem. It can hold up to N-1 buffers
for N elements. The first one serves as the bookkeeping head and creates the
free list.
Each bl_elem contains a struct buffer, a pointer to the next cell, and a few
flags. The struct buffer is a real struct buffer for all cells, except the
first one where it holds useful data to describe the state of the array:
struct bl_elem {
struct buffer {
size_t size; // head: size of the array in number of elements
char *area; // head: not used (0)
size_t data; // head: number of elements allocated
size_t head; // head: number of users
} buf;
uint32_t next;
uint32_t flags;
};
There are a few important properties here:
- for the free list, the first element isn't part of the list, otherwise
there wouldn't be any head storage anymore.
- the head's buf.data doesn't include the first cell of the array, thus its
maximum value is buf.size - 1.
- allocations are always made by appending to end of the existing list
- releases are always made by releasing the beginning of the existing list
- next == 0 for an allocatable cell implies that all the cells from this
element to the last one of the array are free. This allows to simply
initialize a whole new array with memset(array, 0, sizeof(array))
- next == ~0 for an allocated cell indicates we've reached the last element
of the current list.
- for the head of the list, next points to the first available cell, or 0 if
the free list is depleted.
3. Example
The array starts like this, created with a calloc() and having size initialized
to the total number of cells. The number represented is the 'next' value. "~"
here standands for ~0 (i.e. end marker).
[1|0|0|0|0|0|0|0|0|0] => array entirely free
strm1: bl_get(0) -> 1 = assign 1 to strm1's first cell
[2|~|0|0|0|0|0|0|0|0] => strm1 allocated at [1]
1
strm1: bl_get(1) -> 2 = allocate one cell after cell 1
[3|2|~|0|0|0|0|0|0|0]
1
strm1: bl_get(2) -> 3 = allocate one cell after cell 2
[4|2|3|~|0|0|0|0|0|0]
1
strm2: bl_get(0) -> 4 = assign 4 to strm2's first cell
[5|2|3|~|~|0|0|0|0|0]
1 2
strm1: bl_put(1) -> 2 = release cell 1, jump to next one (2)
[1|5|3|~|~|0|0|0|0|0]
1 2
4. Manipulating buffer lists
The API is very simple, it allows to reserve a buffer for a new stream or for
an existing one, to release a stream's first buffer or release the entire
stream, and to initialize / release the whole array.
====================+==================+=======================================
Function | Arguments/Return | Description
--------------------+------------------+---------------------------------------
bl_users() | const bl_elem *b | returns the current number of users on
| ret: uint32_t | the array (i.e. buf.head).
--------------------+------------------+---------------------------------------
bl_size() | const bl_elem *b | returns the total number of
| ret: uint32_t | allocatable cells (i.e. buf.size-1)
--------------------+------------------+---------------------------------------
bl_used() | const bl_elem *b | returns the number of cells currently
| ret: uint32_t | in use (i.e. buf.data)
--------------------+------------------+---------------------------------------
bl_avail() | const bl_elem *b | returns the number of cells still
| ret: uint32_t | available.
--------------------+------------------+---------------------------------------
bl_init() | bl_elem *b | initializes b for n elements. All are
| uint32_t n | in the free list.
--------------------+------------------+---------------------------------------
bl_put() | bl_elem *b | releases cell <idx> to the free list,
| uint32_t n | possibly deleting the user. Returns
| ret: uint32_t | next cell idx or 0 if none (last one).
--------------------+------------------+---------------------------------------
bl_deinit() | bl_elem *b | only when DEBUG_STRICT==2, scans the
| | array to check for leaks.
--------------------+------------------+---------------------------------------
bl_get() | bl_elem *b | allocates a new cell after to add to n
| uint32_t n | or a new stream. Returns the cell or 0
| ret: uint32_t | if no more space.
====================+==================+=======================================

View File

@ -1,12 +1,12 @@
----------------------------------------- -----------------------------------------
event_hdl Guide - version 3.1 event_hdl Guide - version 2.8
( Last update: 2024-06-21 ) ( Last update: 2022-11-14 )
------------------------------------------ ------------------------------------------
ABSTRACT ABSTRACT
-------- --------
The event_hdl support is a new feature of HAProxy 2.8. It is a way to easily The event_hdl support is a new feature of HAProxy 2.7. It is a way to easily
handle general events in a simple to maintain fashion, while keeping core code handle general events in a simple to maintain fashion, while keeping core code
impact to the bare minimum. impact to the bare minimum.
@ -38,7 +38,7 @@ SUMMARY
1. EVENT_HDL INTRODUCTION 1. EVENT_HDL INTRODUCTION
------------------------- -----------------------
EVENT_HDL provides two complementary APIs, both are implemented EVENT_HDL provides two complementary APIs, both are implemented
in src/event_hdl.c and include/haproxy/event_hdl(-t).h: in src/event_hdl.c and include/haproxy/event_hdl(-t).h:
@ -52,7 +52,7 @@ an event that is happening in the process.
(See section 3.) (See section 3.)
2. HOW TO HANDLE EXISTING EVENTS 2. HOW TO HANDLE EXISTING EVENTS
-------------------------------- ---------------------
To handle existing events, you must first decide which events you're To handle existing events, you must first decide which events you're
interested in. interested in.
@ -197,7 +197,7 @@ event subscription is performed using the function:
As the name implies, anonymous subscriptions don't support lookups. As the name implies, anonymous subscriptions don't support lookups.
2.1 SYNC MODE 2.1 SYNC MODE
------------- ---------------------
Example, you want to register a sync handler that will be called when Example, you want to register a sync handler that will be called when
a new server is added. a new server is added.
@ -280,12 +280,12 @@ identified subscription where freeing private is required when subscription ends
``` ```
2.2 ASYNC MODE 2.2 ASYNC MODE
-------------- ---------------------
As mentioned before, async mode comes in 2 flavors, normal and task. As mentioned before, async mode comes in 2 flavors, normal and task.
2.2.1 NORMAL VERSION 2.2.1 NORMAL VERSION
-------------------- ---------------------
Normal is meant to be really easy to use, and highly compatible with sync mode. Normal is meant to be really easy to use, and highly compatible with sync mode.
@ -379,7 +379,7 @@ identified subscription where freeing private is required when subscription ends
``` ```
2.2.2 TASK VERSION 2.2.2 TASK VERSION
------------------ ---------------------
task version requires a bit more setup, but it's pretty task version requires a bit more setup, but it's pretty
straightforward actually. straightforward actually.
@ -510,14 +510,14 @@ Note: it is not recommended to perform multiple subscriptions
that might already be freed. Thus UAF will occur. that might already be freed. Thus UAF will occur.
2.3 ADVANCED FEATURES 2.3 ADVANCED FEATURES
--------------------- -----------------------
We've already covered some of these features in the previous examples. We've already covered some of these features in the previous examples.
Here is a documented recap. Here is a documented recap.
2.3.1 SUB MGMT 2.3.1 SUB MGMT
-------------- -----------------------
From an event handler context, either sync or async mode: From an event handler context, either sync or async mode:
You have the ability to directly manage the subscription You have the ability to directly manage the subscription
@ -565,7 +565,7 @@ task and notify async modes (from the event):
``` ```
2.3.2 SUBSCRIPTION EXTERNAL LOOKUPS 2.3.2 SUBSCRIPTION EXTERNAL LOOKUPS
----------------------------------- -----------------------
As you've seen in 2.3.1, managing the subscription directly As you've seen in 2.3.1, managing the subscription directly
from the handler is a possibility. from the handler is a possibility.
@ -620,7 +620,7 @@ unsubscribing:
``` ```
2.3.3 SUBSCRIPTION PTR 2.3.3 SUBSCRIPTION PTR
---------------------- -----------------------
To manage existing subscriptions from external code, To manage existing subscriptions from external code,
we already talked about identified subscriptions that we already talked about identified subscriptions that
@ -720,7 +720,7 @@ Example:
``` ```
2.3.4 PRIVATE FREE 2.3.4 PRIVATE FREE
------------------ -----------------------
Upon handler subscription, you have the ability to provide Upon handler subscription, you have the ability to provide
a private data pointer that will be passed to the handler a private data pointer that will be passed to the handler
@ -777,7 +777,7 @@ Then:
``` ```
3 HOW TO ADD SUPPORT FOR NEW EVENTS 3 HOW TO ADD SUPPORT FOR NEW EVENTS
----------------------------------- -----------------------
Adding support for a new event is pretty straightforward. Adding support for a new event is pretty straightforward.
@ -787,20 +787,9 @@ First, you need to declare a new event subtype in event_hdl-t.h file
You might want to declare a whole new event family, in which case You might want to declare a whole new event family, in which case
you declare both the new family and the associated subtypes (if any). you declare both the new family and the associated subtypes (if any).
Up to 256 families containing 16 subtypes each are supported by the API.
Family 0 is reserved for special events, which means there are 255 usable
families.
You can declare a family using EVENT_HDL_SUB_FAMILY(x) where x is the
family.
You can declare a subtype using EVENT_HDL_SUB_TYPE(x, y) where x is the
family previously declared and y the subtype, Subtypes range from 1 to
16 (included), 0 is not a valid subtype.
``` ```
#define EVENT_HDL_SUB_NEW_FAMILY EVENT_HDL_SUB_FAMILY(4) #define EVENT_HDL_SUB_NEW_FAMILY EVENT_HDL_SUB_FAMILY(4)
#define EVENT_HDL_SUB_NEW_FAMILY_SUBTYPE_1 EVENT_HDL_SUB_TYPE(4,1) #define EVENT_HDL_SUB_NEW_FAMILY_SUBTYPE_1 EVENT_HDL_SUB_TYPE(4,0)
``` ```
Then, you need to update the event_hdl_sub_type_map map, Then, you need to update the event_hdl_sub_type_map map,
@ -814,7 +803,7 @@ Please follow this procedure:
You added a new family: go to section 3.1 You added a new family: go to section 3.1
3.1 DECLARING A NEW EVENT DATA STRUCTURE 3.1 DECLARING A NEW EVENT DATA STRUCTURE
---------------------------------------- -----------------------
You have the ability to provide additional data for a given You have the ability to provide additional data for a given
event family when such events occur. event family when such events occur.
@ -954,7 +943,7 @@ Event publishing can be performed from anywhere in the code.
-------------------------------------------------------------------------------- --------------------------------------------------------------------------------
4 SUBSCRIPTION LISTS 4 SUBSCRIPTION LISTS
-------------------- -----------------------
As you may already know, EVENT_HDL API main functions rely on As you may already know, EVENT_HDL API main functions rely on
subscription lists. subscription lists.

View File

@ -540,15 +540,14 @@ message. These functions are used by HTX analyzers or by multiplexers.
the amount of data drained. the amount of data drained.
- htx_xfer_blks() transfers HTX blocks from an HTX message to another, - htx_xfer_blks() transfers HTX blocks from an HTX message to another,
stopping after the first block of a specified type is transferred or when stopping on the first block of a specified type or when a specific amount
a specific amount of bytes, including meta-data, was moved. If the tail of bytes, including meta-data, was moved. If the tail block is a DATA
block is a DATA block, it may be partially moved. All other block are block, it may be partially moved. All other block are transferred at once
transferred at once or kept. This function returns a mixed value, with the or kept. This function returns a mixed value, with the last block moved,
last block moved, or NULL if nothing was moved, and the amount of data or NULL if nothing was moved, and the amount of data transferred. When
transferred. When HEADERS or TRAILERS blocks must be transferred, this HEADERS or TRAILERS blocks must be transferred, this function transfers
function transfers all of them. Otherwise, if it is not possible, it all of them. Otherwise, if it is not possible, it triggers an error. It is
triggers an error. It is the caller responsibility to transfer all headers the caller responsibility to transfer all headers or trailers at once.
or trailers at once.
- htx_append_msg() append an HTX message to another one. All the message is - htx_append_msg() append an HTX message to another one. All the message is
copied or nothing. So, if an error occurred, a rollback is performed. This copied or nothing. So, if an error occurred, a rollback is performed. This

View File

@ -314,16 +314,6 @@ alphanumerically ordered:
call to cfg_register_section() with the three arguments at stage call to cfg_register_section() with the three arguments at stage
STG_REGISTER. STG_REGISTER.
You can only register a section once, but you can register post callbacks
multiple time for this section with REGISTER_CONFIG_POST_SECTION().
- REGISTER_CONFIG_POST_SECTION(name, post)
Registers a function which will be called after a section is parsed. This is
the same as the <post> argument in REGISTER_CONFIG_SECTION(), the difference
is that it allows to register multiple <post> callbacks and to register them
elsewhere in the code.
- REGISTER_PER_THREAD_ALLOC(fct) - REGISTER_PER_THREAD_ALLOC(fct)
Registers a call to register_per_thread_alloc(fct) at stage STG_REGISTER. Registers a call to register_per_thread_alloc(fct) at stage STG_REGISTER.

View File

@ -1,86 +0,0 @@
2025-08-13 - Memory allocation in HAProxy 3.3
The vast majority of dynamic memory allocations are performed from pools. Pools
are optimized to store pre-calibrated objects of the right size for a given
usage, try to favor locality and hot objects as much as possible, and are
heavily instrumented to detect and help debug a wide class of bugs including
buffer overflows, use-after-free, etc.
For objects of random sizes, or those used only at configuration time, pools
are not suited, and the regular malloc/free family is available, in addition of
a few others.
The standard allocation calls are intercepted at the code level (#define) when
the code is compiled with -DDEBUG_MEM_STATS. For this reason, these calls are
redefined as macros in "bug.h", and one must not try to use the pointers to
such functions, as this may break DEBUG_MEM_STATS. This provides fine-grained
stats about allocation/free per line of source code using locally implemented
counters that can be consulted by "debug dev memstats". The calls are
categorized into one of "calloc", "free", "malloc", "realloc", "strdup",
"p_alloc", "p_free", the latter two designating pools. Extra calls such as
memalign() and similar are also intercepted and counted as malloc.
Due to the nature of this replacement, DEBUG_MEM_STATS cannot see operations
performed in libraries or dependencies.
In addition to DEBUG_MEM_STATS, when haproxy is built with USE_MEMORY_PROFILING
the standard functions are wrapped by new ones defined in "activity.c", which
also hold counters by call place. These ones are able to trace activity in
libraries because the functions check the return pointer to figure where the
call was made. The approach is different and relies on a large hash table. The
files, function names and line numbers are not know, but by passing the pointer
to dladdr(), we can often resolve most of these symbols. These operations are
consulted via "show profiling memory". It must first be enabled either in the
global config "profiling.memory on" or the CLI using "set profiling memory on".
Memory profiling can also track pool allocations and frees thanks to knowing
the size of the element and knowing a place where to store it. Some future
evolutions might consider making this possible as well for pure malloc/free
too by leveraging malloc_usable_size() a bit more.
Finally, 3.3 brought aligned allocations. These are made available via a new
family of functions around ha_aligned_alloc() that simply map to either
posix_memalign(), memalign() or _aligned_malloc() for CYGWIN, depending on
which one is available. This latter one requires to pass the pointer to
_aligned_free() instead of free(), so for this reason, all aligned allocations
have to be released using ha_aligned_free(). Since this mostly happens on
configuration elements, in practice it's not as inconvenient as it can sound.
These functions are in reality macros handled in "bug.h" like the previous
ones in order to deal with DEBUG_MEM_STATS. All "alloc" variants are reported
in memstats as "malloc". All "zalloc" variants are reported in memstats as
"calloc".
The currently available allocators are the following:
- void *ha_aligned_alloc(size_t align, size_t size)
- void *ha_aligned_zalloc(size_t align, size_t size)
Equivalent of malloc() but aligned to <align> bytes. The alignment MUST be
at least as large as one word and MUST be a power of two. The "zalloc"
variant also zeroes the area on success. Both return NULL on failure.
- void *ha_aligned_alloc_safe(size_t align, size_t size)
- void *ha_aligned_zalloc_safe(size_t align, size_t size)
Equivalent of malloc() but aligned to <align> bytes. The alignment is
automatically adjusted to the nearest larger power of two that is at least
as large as a word. The "zalloc" variant also zeroes the area on
success. Both return NULL on failure.
- (type *)ha_aligned_alloc_typed(size_t count, type)
(type *)ha_aligned_zalloc_typed(size_t count, type)
This macro returns an area aligned to the required alignment for type
<type>, large enough for <count> objects of this type, and the result is a
pointer of this type. The goal is to ease allocation of known structures
whose alignment is not necessarily known to the developer (and to avoid
encouraging to hard-code alignment). The cast in return also provides a
last-minute control in case a wrong type is mistakenly used due to a poor
copy-paste or an extra "*" after the type. When DEBUG_MEM_STATS is in use,
the type is stored as a string in the ".extra" field so that it can be
displayed in "debug dev memstats". The "zalloc" variant also zeroes the
area on success. Both return NULL on failure.
- void ha_aligned_free(void *ptr)
Frees the area pointed to by ptr. It is the equivalent of free() but for
objects allocated using one of the functions above.

Some files were not shown because too many files have changed in this diff Show More