mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2026-01-16 14:21:03 +01:00
Compare commits
No commits in common. "master" and "v3.2-dev13" have entirely different histories.
master
...
v3.2-dev13
@ -1,7 +1,7 @@
|
|||||||
FreeBSD_task:
|
FreeBSD_task:
|
||||||
freebsd_instance:
|
freebsd_instance:
|
||||||
matrix:
|
matrix:
|
||||||
image_family: freebsd-14-3
|
image_family: freebsd-14-2
|
||||||
only_if: $CIRRUS_BRANCH =~ 'master|next'
|
only_if: $CIRRUS_BRANCH =~ 'master|next'
|
||||||
install_script:
|
install_script:
|
||||||
- pkg update -f && pkg upgrade -y && pkg install -y openssl git gmake lua54 socat pcre2
|
- pkg update -f && pkg upgrade -y && pkg install -y openssl git gmake lua54 socat pcre2
|
||||||
|
|||||||
34
.github/actions/setup-vtest/action.yml
vendored
34
.github/actions/setup-vtest/action.yml
vendored
@ -1,34 +0,0 @@
|
|||||||
name: 'setup VTest'
|
|
||||||
description: 'ssss'
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: "composite"
|
|
||||||
steps:
|
|
||||||
|
|
||||||
- name: Setup coredumps
|
|
||||||
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
sudo sysctl -w fs.suid_dumpable=1
|
|
||||||
sudo sysctl kernel.core_pattern=/tmp/core.%h.%e.%t
|
|
||||||
|
|
||||||
- name: Setup ulimit for core dumps
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
# This is required for macOS which does not actually allow to increase
|
|
||||||
# the '-n' soft limit to the hard limit, thus failing to run.
|
|
||||||
ulimit -n 65536
|
|
||||||
ulimit -c unlimited
|
|
||||||
|
|
||||||
- name: Install VTest
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
scripts/build-vtest.sh
|
|
||||||
|
|
||||||
- name: Install problem matcher for VTest
|
|
||||||
shell: bash
|
|
||||||
# This allows one to more easily see which tests fail.
|
|
||||||
run: echo "::add-matcher::.github/vtest.json"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
2
.github/h2spec.config
vendored
2
.github/h2spec.config
vendored
@ -19,7 +19,7 @@ defaults
|
|||||||
|
|
||||||
frontend h2
|
frontend h2
|
||||||
mode http
|
mode http
|
||||||
bind 127.0.0.1:8443 ssl crt reg-tests/ssl/certs/common.pem alpn h2,http/1.1
|
bind 127.0.0.1:8443 ssl crt reg-tests/ssl/common.pem alpn h2,http/1.1
|
||||||
default_backend h2b
|
default_backend h2b
|
||||||
|
|
||||||
backend h2b
|
backend h2b
|
||||||
|
|||||||
88
.github/matrix.py
vendored
88
.github/matrix.py
vendored
@ -125,11 +125,9 @@ def main(ref_name):
|
|||||||
# Ubuntu
|
# Ubuntu
|
||||||
|
|
||||||
if "haproxy-" in ref_name:
|
if "haproxy-" in ref_name:
|
||||||
os = "ubuntu-24.04" # stable branch
|
os = "ubuntu-22.04" # stable branch
|
||||||
os_arm = "ubuntu-24.04-arm" # stable branch
|
|
||||||
else:
|
else:
|
||||||
os = "ubuntu-24.04" # development branch
|
os = "ubuntu-24.04" # development branch
|
||||||
os_arm = "ubuntu-24.04-arm" # development branch
|
|
||||||
|
|
||||||
TARGET = "linux-glibc"
|
TARGET = "linux-glibc"
|
||||||
for CC in ["gcc", "clang"]:
|
for CC in ["gcc", "clang"]:
|
||||||
@ -174,37 +172,36 @@ def main(ref_name):
|
|||||||
|
|
||||||
# ASAN
|
# ASAN
|
||||||
|
|
||||||
for os_asan in [os, os_arm]:
|
matrix.append(
|
||||||
matrix.append(
|
{
|
||||||
{
|
"name": "{}, {}, ASAN, all features".format(os, CC),
|
||||||
"name": "{}, {}, ASAN, all features".format(os_asan, CC),
|
"os": os,
|
||||||
"os": os_asan,
|
"TARGET": TARGET,
|
||||||
"TARGET": TARGET,
|
"CC": CC,
|
||||||
"CC": CC,
|
"FLAGS": [
|
||||||
"FLAGS": [
|
"USE_OBSOLETE_LINKER=1",
|
||||||
"USE_OBSOLETE_LINKER=1",
|
'ARCH_FLAGS="-g -fsanitize=address"',
|
||||||
'ARCH_FLAGS="-g -fsanitize=address"',
|
'OPT_CFLAGS="-O1"',
|
||||||
'OPT_CFLAGS="-O1"',
|
"USE_ZLIB=1",
|
||||||
"USE_ZLIB=1",
|
"USE_OT=1",
|
||||||
"USE_OT=1",
|
"OT_INC=${HOME}/opt-ot/include",
|
||||||
"OT_INC=${HOME}/opt-ot/include",
|
"OT_LIB=${HOME}/opt-ot/lib",
|
||||||
"OT_LIB=${HOME}/opt-ot/lib",
|
"OT_RUNPATH=1",
|
||||||
"OT_RUNPATH=1",
|
"USE_PCRE2=1",
|
||||||
"USE_PCRE2=1",
|
"USE_PCRE2_JIT=1",
|
||||||
"USE_PCRE2_JIT=1",
|
"USE_LUA=1",
|
||||||
"USE_LUA=1",
|
"USE_OPENSSL=1",
|
||||||
"USE_OPENSSL=1",
|
"USE_WURFL=1",
|
||||||
"USE_WURFL=1",
|
"WURFL_INC=addons/wurfl/dummy",
|
||||||
"WURFL_INC=addons/wurfl/dummy",
|
"WURFL_LIB=addons/wurfl/dummy",
|
||||||
"WURFL_LIB=addons/wurfl/dummy",
|
"USE_DEVICEATLAS=1",
|
||||||
"USE_DEVICEATLAS=1",
|
"DEVICEATLAS_SRC=addons/deviceatlas/dummy",
|
||||||
"DEVICEATLAS_SRC=addons/deviceatlas/dummy",
|
"USE_PROMEX=1",
|
||||||
"USE_PROMEX=1",
|
"USE_51DEGREES=1",
|
||||||
"USE_51DEGREES=1",
|
"51DEGREES_SRC=addons/51degrees/dummy/pattern",
|
||||||
"51DEGREES_SRC=addons/51degrees/dummy/pattern",
|
],
|
||||||
],
|
}
|
||||||
}
|
)
|
||||||
)
|
|
||||||
|
|
||||||
for compression in ["USE_ZLIB=1"]:
|
for compression in ["USE_ZLIB=1"]:
|
||||||
matrix.append(
|
matrix.append(
|
||||||
@ -221,7 +218,6 @@ def main(ref_name):
|
|||||||
"stock",
|
"stock",
|
||||||
"OPENSSL_VERSION=1.0.2u",
|
"OPENSSL_VERSION=1.0.2u",
|
||||||
"OPENSSL_VERSION=1.1.1s",
|
"OPENSSL_VERSION=1.1.1s",
|
||||||
"OPENSSL_VERSION=3.5.1",
|
|
||||||
"QUICTLS=yes",
|
"QUICTLS=yes",
|
||||||
"WOLFSSL_VERSION=5.7.0",
|
"WOLFSSL_VERSION=5.7.0",
|
||||||
"AWS_LC_VERSION=1.39.0",
|
"AWS_LC_VERSION=1.39.0",
|
||||||
@ -236,7 +232,8 @@ def main(ref_name):
|
|||||||
|
|
||||||
for ssl in ssl_versions:
|
for ssl in ssl_versions:
|
||||||
flags = ["USE_OPENSSL=1"]
|
flags = ["USE_OPENSSL=1"]
|
||||||
skipdup=0
|
if ssl == "BORINGSSL=yes" or ssl == "QUICTLS=yes" or "LIBRESSL" in ssl or "WOLFSSL" in ssl or "AWS_LC" in ssl:
|
||||||
|
flags.append("USE_QUIC=1")
|
||||||
if "WOLFSSL" in ssl:
|
if "WOLFSSL" in ssl:
|
||||||
flags.append("USE_OPENSSL_WOLFSSL=1")
|
flags.append("USE_OPENSSL_WOLFSSL=1")
|
||||||
if "AWS_LC" in ssl:
|
if "AWS_LC" in ssl:
|
||||||
@ -246,23 +243,8 @@ def main(ref_name):
|
|||||||
flags.append("SSL_INC=${HOME}/opt/include")
|
flags.append("SSL_INC=${HOME}/opt/include")
|
||||||
if "LIBRESSL" in ssl and "latest" in ssl:
|
if "LIBRESSL" in ssl and "latest" in ssl:
|
||||||
ssl = determine_latest_libressl(ssl)
|
ssl = determine_latest_libressl(ssl)
|
||||||
skipdup=1
|
|
||||||
if "OPENSSL" in ssl and "latest" in ssl:
|
if "OPENSSL" in ssl and "latest" in ssl:
|
||||||
ssl = determine_latest_openssl(ssl)
|
ssl = determine_latest_openssl(ssl)
|
||||||
skipdup=1
|
|
||||||
|
|
||||||
# if "latest" equals a version already in the list
|
|
||||||
if ssl in ssl_versions and skipdup == 1:
|
|
||||||
continue
|
|
||||||
|
|
||||||
openssl_supports_quic = False
|
|
||||||
try:
|
|
||||||
openssl_supports_quic = version.Version(ssl.split("OPENSSL_VERSION=",1)[1]) >= version.Version("3.5.0")
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if ssl == "BORINGSSL=yes" or ssl == "QUICTLS=yes" or "LIBRESSL" in ssl or "WOLFSSL" in ssl or "AWS_LC" in ssl or openssl_supports_quic:
|
|
||||||
flags.append("USE_QUIC=1")
|
|
||||||
|
|
||||||
matrix.append(
|
matrix.append(
|
||||||
{
|
{
|
||||||
@ -280,7 +262,7 @@ def main(ref_name):
|
|||||||
if "haproxy-" in ref_name:
|
if "haproxy-" in ref_name:
|
||||||
os = "macos-13" # stable branch
|
os = "macos-13" # stable branch
|
||||||
else:
|
else:
|
||||||
os = "macos-26" # development branch
|
os = "macos-15" # development branch
|
||||||
|
|
||||||
TARGET = "osx"
|
TARGET = "osx"
|
||||||
for CC in ["clang"]:
|
for CC in ["clang"]:
|
||||||
|
|||||||
80
.github/workflows/aws-lc-fips.yml
vendored
80
.github/workflows/aws-lc-fips.yml
vendored
@ -5,8 +5,82 @@ on:
|
|||||||
- cron: "0 0 * * 4"
|
- cron: "0 0 * * 4"
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
uses: ./.github/workflows/aws-lc-template.yml
|
runs-on: ubuntu-latest
|
||||||
with:
|
steps:
|
||||||
command: "from matrix import determine_latest_aws_lc_fips; print(determine_latest_aws_lc_fips(''))"
|
- uses: actions/checkout@v4
|
||||||
|
- name: Install VTest
|
||||||
|
run: |
|
||||||
|
scripts/build-vtest.sh
|
||||||
|
- name: Determine latest AWS-LC release
|
||||||
|
id: get_aws_lc_release
|
||||||
|
run: |
|
||||||
|
result=$(cd .github && python3 -c "from matrix import determine_latest_aws_lc_fips; print(determine_latest_aws_lc_fips(''))")
|
||||||
|
echo $result
|
||||||
|
echo "result=$result" >> $GITHUB_OUTPUT
|
||||||
|
- name: Cache AWS-LC
|
||||||
|
id: cache_aws_lc
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: '~/opt/'
|
||||||
|
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
|
||||||
|
- name: Install apt dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
||||||
|
sudo apt-get --no-install-recommends -y install socat gdb
|
||||||
|
- name: Install AWS-LC
|
||||||
|
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
|
||||||
|
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
|
||||||
|
- name: Compile HAProxy
|
||||||
|
run: |
|
||||||
|
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
||||||
|
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
|
||||||
|
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
||||||
|
DEBUG="-DDEBUG_POOL_INTEGRITY" \
|
||||||
|
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
||||||
|
sudo make install
|
||||||
|
- name: Show HAProxy version
|
||||||
|
id: show-version
|
||||||
|
run: |
|
||||||
|
ldd $(which haproxy)
|
||||||
|
haproxy -vv
|
||||||
|
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||||
|
- name: Install problem matcher for VTest
|
||||||
|
run: echo "::add-matcher::.github/vtest.json"
|
||||||
|
- name: Run VTest for HAProxy
|
||||||
|
id: vtest
|
||||||
|
run: |
|
||||||
|
# This is required for macOS which does not actually allow to increase
|
||||||
|
# the '-n' soft limit to the hard limit, thus failing to run.
|
||||||
|
ulimit -n 65536
|
||||||
|
# allow to catch coredumps
|
||||||
|
ulimit -c unlimited
|
||||||
|
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||||
|
- name: Show VTest results
|
||||||
|
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||||
|
run: |
|
||||||
|
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
||||||
|
printf "::group::"
|
||||||
|
cat $folder/INFO
|
||||||
|
cat $folder/LOG
|
||||||
|
echo "::endgroup::"
|
||||||
|
done
|
||||||
|
exit 1
|
||||||
|
- name: Show coredumps
|
||||||
|
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||||
|
run: |
|
||||||
|
failed=false
|
||||||
|
shopt -s nullglob
|
||||||
|
for file in /tmp/core.*; do
|
||||||
|
failed=true
|
||||||
|
printf "::group::"
|
||||||
|
gdb -ex 'thread apply all bt full' ./haproxy $file
|
||||||
|
echo "::endgroup::"
|
||||||
|
done
|
||||||
|
if [ "$failed" = true ]; then
|
||||||
|
exit 1;
|
||||||
|
fi
|
||||||
|
|||||||
94
.github/workflows/aws-lc-template.yml
vendored
94
.github/workflows/aws-lc-template.yml
vendored
@ -1,94 +0,0 @@
|
|||||||
name: AWS-LC template
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
inputs:
|
|
||||||
command:
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
- name: Determine latest AWS-LC release
|
|
||||||
id: get_aws_lc_release
|
|
||||||
run: |
|
|
||||||
result=$(cd .github && python3 -c "${{ inputs.command }}")
|
|
||||||
echo $result
|
|
||||||
echo "result=$result" >> $GITHUB_OUTPUT
|
|
||||||
- name: Cache AWS-LC
|
|
||||||
id: cache_aws_lc
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: '~/opt/'
|
|
||||||
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
|
|
||||||
- name: Install apt dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
|
||||||
sudo apt-get --no-install-recommends -y install socat gdb jose
|
|
||||||
- name: Install AWS-LC
|
|
||||||
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
|
|
||||||
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
|
|
||||||
- name: Compile HAProxy
|
|
||||||
run: |
|
|
||||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
|
||||||
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
|
|
||||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
|
||||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
|
||||||
sudo make install
|
|
||||||
- name: Show HAProxy version
|
|
||||||
id: show-version
|
|
||||||
run: |
|
|
||||||
ldd $(which haproxy)
|
|
||||||
haproxy -vv
|
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
|
||||||
- uses: ./.github/actions/setup-vtest
|
|
||||||
- name: Run VTest for HAProxy
|
|
||||||
id: vtest
|
|
||||||
run: |
|
|
||||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
|
||||||
- name: Run Unit tests
|
|
||||||
id: unittests
|
|
||||||
run: |
|
|
||||||
make unit-tests
|
|
||||||
- name: Show VTest results
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $folder/INFO
|
|
||||||
cat $folder/LOG
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
- name: Show coredumps
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
failed=false
|
|
||||||
shopt -s nullglob
|
|
||||||
for file in /tmp/core.*; do
|
|
||||||
failed=true
|
|
||||||
printf "::group::"
|
|
||||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
if [ "$failed" = true ]; then
|
|
||||||
exit 1;
|
|
||||||
fi
|
|
||||||
- name: Show Unit-Tests results
|
|
||||||
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $result
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
|
|
||||||
80
.github/workflows/aws-lc.yml
vendored
80
.github/workflows/aws-lc.yml
vendored
@ -5,8 +5,82 @@ on:
|
|||||||
- cron: "0 0 * * 4"
|
- cron: "0 0 * * 4"
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
uses: ./.github/workflows/aws-lc-template.yml
|
runs-on: ubuntu-latest
|
||||||
with:
|
steps:
|
||||||
command: "from matrix import determine_latest_aws_lc; print(determine_latest_aws_lc(''))"
|
- uses: actions/checkout@v4
|
||||||
|
- name: Install VTest
|
||||||
|
run: |
|
||||||
|
scripts/build-vtest.sh
|
||||||
|
- name: Determine latest AWS-LC release
|
||||||
|
id: get_aws_lc_release
|
||||||
|
run: |
|
||||||
|
result=$(cd .github && python3 -c "from matrix import determine_latest_aws_lc; print(determine_latest_aws_lc(''))")
|
||||||
|
echo $result
|
||||||
|
echo "result=$result" >> $GITHUB_OUTPUT
|
||||||
|
- name: Cache AWS-LC
|
||||||
|
id: cache_aws_lc
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: '~/opt/'
|
||||||
|
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
|
||||||
|
- name: Install apt dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
||||||
|
sudo apt-get --no-install-recommends -y install socat gdb
|
||||||
|
- name: Install AWS-LC
|
||||||
|
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
|
||||||
|
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
|
||||||
|
- name: Compile HAProxy
|
||||||
|
run: |
|
||||||
|
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
||||||
|
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
|
||||||
|
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
||||||
|
DEBUG="-DDEBUG_POOL_INTEGRITY" \
|
||||||
|
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
||||||
|
sudo make install
|
||||||
|
- name: Show HAProxy version
|
||||||
|
id: show-version
|
||||||
|
run: |
|
||||||
|
ldd $(which haproxy)
|
||||||
|
haproxy -vv
|
||||||
|
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||||
|
- name: Install problem matcher for VTest
|
||||||
|
run: echo "::add-matcher::.github/vtest.json"
|
||||||
|
- name: Run VTest for HAProxy
|
||||||
|
id: vtest
|
||||||
|
run: |
|
||||||
|
# This is required for macOS which does not actually allow to increase
|
||||||
|
# the '-n' soft limit to the hard limit, thus failing to run.
|
||||||
|
ulimit -n 65536
|
||||||
|
# allow to catch coredumps
|
||||||
|
ulimit -c unlimited
|
||||||
|
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||||
|
- name: Show VTest results
|
||||||
|
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||||
|
run: |
|
||||||
|
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
||||||
|
printf "::group::"
|
||||||
|
cat $folder/INFO
|
||||||
|
cat $folder/LOG
|
||||||
|
echo "::endgroup::"
|
||||||
|
done
|
||||||
|
exit 1
|
||||||
|
- name: Show coredumps
|
||||||
|
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||||
|
run: |
|
||||||
|
failed=false
|
||||||
|
shopt -s nullglob
|
||||||
|
for file in /tmp/core.*; do
|
||||||
|
failed=true
|
||||||
|
printf "::group::"
|
||||||
|
gdb -ex 'thread apply all bt full' ./haproxy $file
|
||||||
|
echo "::endgroup::"
|
||||||
|
done
|
||||||
|
if [ "$failed" = true ]; then
|
||||||
|
exit 1;
|
||||||
|
fi
|
||||||
|
|||||||
2
.github/workflows/codespell.yml
vendored
2
.github/workflows/codespell.yml
vendored
@ -13,7 +13,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
- uses: codespell-project/codespell-problem-matcher@v1.2.0
|
- uses: codespell-project/codespell-problem-matcher@v1.2.0
|
||||||
- uses: codespell-project/actions-codespell@master
|
- uses: codespell-project/actions-codespell@master
|
||||||
with:
|
with:
|
||||||
|
|||||||
17
.github/workflows/compliance.yml
vendored
17
.github/workflows/compliance.yml
vendored
@ -11,10 +11,15 @@ permissions:
|
|||||||
jobs:
|
jobs:
|
||||||
h2spec:
|
h2spec:
|
||||||
name: h2spec
|
name: h2spec
|
||||||
runs-on: ubuntu-latest
|
runs-on: ${{ matrix.os }}
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- TARGET: linux-glibc
|
||||||
|
CC: gcc
|
||||||
|
os: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
- name: Install h2spec
|
- name: Install h2spec
|
||||||
id: install-h2spec
|
id: install-h2spec
|
||||||
run: |
|
run: |
|
||||||
@ -23,12 +28,12 @@ jobs:
|
|||||||
tar xvf h2spec.tar.gz
|
tar xvf h2spec.tar.gz
|
||||||
sudo install -m755 h2spec /usr/local/bin/h2spec
|
sudo install -m755 h2spec /usr/local/bin/h2spec
|
||||||
echo "version=${H2SPEC_VERSION}" >> $GITHUB_OUTPUT
|
echo "version=${H2SPEC_VERSION}" >> $GITHUB_OUTPUT
|
||||||
- name: Compile HAProxy with gcc
|
- name: Compile HAProxy with ${{ matrix.CC }}
|
||||||
run: |
|
run: |
|
||||||
make -j$(nproc) all \
|
make -j$(nproc) all \
|
||||||
ERR=1 \
|
ERR=1 \
|
||||||
TARGET=linux-glibc \
|
TARGET=${{ matrix.TARGET }} \
|
||||||
CC=gcc \
|
CC=${{ matrix.CC }} \
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY" \
|
DEBUG="-DDEBUG_POOL_INTEGRITY" \
|
||||||
USE_OPENSSL=1
|
USE_OPENSSL=1
|
||||||
sudo make install
|
sudo make install
|
||||||
|
|||||||
2
.github/workflows/contrib.yml
vendored
2
.github/workflows/contrib.yml
vendored
@ -10,7 +10,7 @@ jobs:
|
|||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
- name: Compile admin/halog/halog
|
- name: Compile admin/halog/halog
|
||||||
run: |
|
run: |
|
||||||
make admin/halog/halog
|
make admin/halog/halog
|
||||||
|
|||||||
4
.github/workflows/coverity.yml
vendored
4
.github/workflows/coverity.yml
vendored
@ -17,7 +17,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
- name: Install apt dependencies
|
- name: Install apt dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
||||||
@ -38,7 +38,7 @@ jobs:
|
|||||||
- name: Build with Coverity build tool
|
- name: Build with Coverity build tool
|
||||||
run: |
|
run: |
|
||||||
export PATH=`pwd`/coverity_tool/bin:$PATH
|
export PATH=`pwd`/coverity_tool/bin:$PATH
|
||||||
cov-build --dir cov-int make CC=clang TARGET=linux-glibc USE_ZLIB=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_LUA=1 USE_OPENSSL=1 USE_QUIC=1 USE_WURFL=1 WURFL_INC=addons/wurfl/dummy WURFL_LIB=addons/wurfl/dummy USE_DEVICEATLAS=1 DEVICEATLAS_SRC=addons/deviceatlas/dummy USE_51DEGREES=1 51DEGREES_SRC=addons/51degrees/dummy/pattern ADDLIB=\"-Wl,-rpath,$HOME/opt/lib/\" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include DEBUG+=-DDEBUG_STRICT=2 DEBUG+=-DDEBUG_USE_ABORT=1
|
cov-build --dir cov-int make CC=clang TARGET=linux-glibc USE_ZLIB=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_LUA=1 USE_OPENSSL=1 USE_QUIC=1 USE_WURFL=1 WURFL_INC=addons/wurfl/dummy WURFL_LIB=addons/wurfl/dummy USE_DEVICEATLAS=1 DEVICEATLAS_SRC=addons/deviceatlas/dummy USE_51DEGREES=1 51DEGREES_SRC=addons/51degrees/dummy/pattern ADDLIB=\"-Wl,-rpath,$HOME/opt/lib/\" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include DEBUG+=-DDEBUG_STRICT=1 DEBUG+=-DDEBUG_USE_ABORT=1
|
||||||
- name: Submit build result to Coverity Scan
|
- name: Submit build result to Coverity Scan
|
||||||
run: |
|
run: |
|
||||||
tar czvf cov.tar.gz cov-int
|
tar czvf cov.tar.gz cov-int
|
||||||
|
|||||||
2
.github/workflows/cross-zoo.yml
vendored
2
.github/workflows/cross-zoo.yml
vendored
@ -99,7 +99,7 @@ jobs:
|
|||||||
sudo apt-get -yq --force-yes install \
|
sudo apt-get -yq --force-yes install \
|
||||||
gcc-${{ matrix.platform.arch }} \
|
gcc-${{ matrix.platform.arch }} \
|
||||||
${{ matrix.platform.libs }}
|
${{ matrix.platform.libs }}
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
|
||||||
- name: install quictls
|
- name: install quictls
|
||||||
|
|||||||
8
.github/workflows/fedora-rawhide.yml
vendored
8
.github/workflows/fedora-rawhide.yml
vendored
@ -18,19 +18,19 @@ jobs:
|
|||||||
{ name: x86, cc: gcc, QUICTLS_EXTRA_ARGS: "-m32 linux-generic32", ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" },
|
{ name: x86, cc: gcc, QUICTLS_EXTRA_ARGS: "-m32 linux-generic32", ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" },
|
||||||
{ name: x86, cc: clang, QUICTLS_EXTRA_ARGS: "-m32 linux-generic32", ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" }
|
{ name: x86, cc: clang, QUICTLS_EXTRA_ARGS: "-m32 linux-generic32", ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" }
|
||||||
]
|
]
|
||||||
fail-fast: false
|
|
||||||
name: ${{ matrix.platform.cc }}.${{ matrix.platform.name }}
|
name: ${{ matrix.platform.cc }}.${{ matrix.platform.name }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||||
container:
|
container:
|
||||||
image: fedora:rawhide
|
image: fedora:rawhide
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
dnf -y install awk diffutils git pcre-devel zlib-devel pcre2-devel 'perl(FindBin)' perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils systemd-devel clang
|
dnf -y install awk diffutils git pcre-devel zlib-devel pcre2-devel 'perl(FindBin)' perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils systemd-devel clang
|
||||||
dnf -y install 'perl(FindBin)' 'perl(File::Compare)' perl-IPC-Cmd 'perl(File::Copy)' glibc-devel.i686 lua-devel.i686 lua-devel.x86_64 systemd-devel.i686 zlib-ng-compat-devel.i686 pcre-devel.i686 libatomic.i686
|
dnf -y install 'perl(FindBin)' 'perl(File::Compare)' perl-IPC-Cmd 'perl(File::Copy)' glibc-devel.i686 lua-devel.i686 lua-devel.x86_64 systemd-devel.i686 zlib-ng-compat-devel.i686 pcre-devel.i686 libatomic.i686
|
||||||
- uses: ./.github/actions/setup-vtest
|
- name: Install VTest
|
||||||
|
run: scripts/build-vtest.sh
|
||||||
- name: Install QuicTLS
|
- name: Install QuicTLS
|
||||||
run: QUICTLS=yes QUICTLS_EXTRA_ARGS="${{ matrix.platform.QUICTLS_EXTRA_ARGS }}" scripts/build-ssl.sh
|
run: QUICTLS=yes QUICTLS_EXTRA_ARGS="${{ matrix.platform.QUICTLS_EXTRA_ARGS }}" scripts/build-ssl.sh
|
||||||
- name: Build contrib tools
|
- name: Build contrib tools
|
||||||
@ -67,4 +67,4 @@ jobs:
|
|||||||
- name: Run Unit tests
|
- name: Run Unit tests
|
||||||
id: unittests
|
id: unittests
|
||||||
run: |
|
run: |
|
||||||
make unit-tests
|
make unit-tests
|
||||||
2
.github/workflows/illumos.yml
vendored
2
.github/workflows/illumos.yml
vendored
@ -13,7 +13,7 @@ jobs:
|
|||||||
contents: read
|
contents: read
|
||||||
steps:
|
steps:
|
||||||
- name: "Checkout repository"
|
- name: "Checkout repository"
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: "Build on VM"
|
- name: "Build on VM"
|
||||||
uses: vmactions/solaris-vm@v1
|
uses: vmactions/solaris-vm@v1
|
||||||
|
|||||||
20
.github/workflows/musl.yml
vendored
20
.github/workflows/musl.yml
vendored
@ -20,13 +20,13 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
ulimit -c unlimited
|
ulimit -c unlimited
|
||||||
echo '/tmp/core/core.%h.%e.%t' > /proc/sys/kernel/core_pattern
|
echo '/tmp/core/core.%h.%e.%t' > /proc/sys/kernel/core_pattern
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: apk add gcc gdb make tar git python3 libc-dev linux-headers pcre-dev pcre2-dev openssl-dev lua5.3-dev grep socat curl musl-dbg lua5.3-dbg jose
|
run: apk add gcc gdb make tar git python3 libc-dev linux-headers pcre-dev pcre2-dev openssl-dev lua5.3-dev grep socat curl musl-dbg lua5.3-dbg
|
||||||
- name: Install VTest
|
- name: Install VTest
|
||||||
run: scripts/build-vtest.sh
|
run: scripts/build-vtest.sh
|
||||||
- name: Build
|
- name: Build
|
||||||
run: make -j$(nproc) TARGET=linux-musl DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" ARCH_FLAGS='-ggdb3' CC=cc V=1 USE_LUA=1 LUA_INC=/usr/include/lua5.3 LUA_LIB=/usr/lib/lua5.3 USE_OPENSSL=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_PROMEX=1
|
run: make -j$(nproc) TARGET=linux-musl ARCH_FLAGS='-ggdb3' CC=cc V=1 USE_LUA=1 LUA_INC=/usr/include/lua5.3 LUA_LIB=/usr/lib/lua5.3 USE_OPENSSL=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_PROMEX=1
|
||||||
- name: Show version
|
- name: Show version
|
||||||
run: ./haproxy -vv
|
run: ./haproxy -vv
|
||||||
- name: Show linked libraries
|
- name: Show linked libraries
|
||||||
@ -37,10 +37,6 @@ jobs:
|
|||||||
- name: Run VTest
|
- name: Run VTest
|
||||||
id: vtest
|
id: vtest
|
||||||
run: make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
run: make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||||
- name: Run Unit tests
|
|
||||||
id: unittests
|
|
||||||
run: |
|
|
||||||
make unit-tests
|
|
||||||
- name: Show coredumps
|
- name: Show coredumps
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||||
run: |
|
run: |
|
||||||
@ -64,13 +60,3 @@ jobs:
|
|||||||
cat $folder/LOG
|
cat $folder/LOG
|
||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
done
|
done
|
||||||
- name: Show Unit-Tests results
|
|
||||||
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $result
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/netbsd.yml
vendored
2
.github/workflows/netbsd.yml
vendored
@ -13,7 +13,7 @@ jobs:
|
|||||||
contents: read
|
contents: read
|
||||||
steps:
|
steps:
|
||||||
- name: "Checkout repository"
|
- name: "Checkout repository"
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: "Build on VM"
|
- name: "Build on VM"
|
||||||
uses: vmactions/netbsd-vm@v1
|
uses: vmactions/netbsd-vm@v1
|
||||||
|
|||||||
82
.github/workflows/openssl-ech.yml
vendored
82
.github/workflows/openssl-ech.yml
vendored
@ -1,82 +0,0 @@
|
|||||||
name: openssl ECH
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 3 * * *"
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
- name: Install VTest
|
|
||||||
run: |
|
|
||||||
scripts/build-vtest.sh
|
|
||||||
- name: Install apt dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
|
||||||
sudo apt-get --no-install-recommends -y install socat gdb
|
|
||||||
sudo apt-get --no-install-recommends -y install libpsl-dev
|
|
||||||
- name: Install OpenSSL+ECH
|
|
||||||
run: env OPENSSL_VERSION="git-feature/ech" GIT_TYPE="branch" scripts/build-ssl.sh
|
|
||||||
- name: Install curl+ECH
|
|
||||||
run: env SSL_LIB=${HOME}/opt/ scripts/build-curl.sh
|
|
||||||
- name: Compile HAProxy
|
|
||||||
run: |
|
|
||||||
make -j$(nproc) CC=gcc TARGET=linux-glibc \
|
|
||||||
USE_QUIC=1 USE_OPENSSL=1 USE_ECH=1 \
|
|
||||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
|
||||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
|
|
||||||
ARCH_FLAGS="-ggdb3 -fsanitize=address"
|
|
||||||
sudo make install
|
|
||||||
- name: Show HAProxy version
|
|
||||||
id: show-version
|
|
||||||
run: |
|
|
||||||
ldd $(which haproxy)
|
|
||||||
haproxy -vv
|
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
|
||||||
- name: Install problem matcher for VTest
|
|
||||||
run: echo "::add-matcher::.github/vtest.json"
|
|
||||||
- name: Run VTest for HAProxy
|
|
||||||
id: vtest
|
|
||||||
run: |
|
|
||||||
# This is required for macOS which does not actually allow to increase
|
|
||||||
# the '-n' soft limit to the hard limit, thus failing to run.
|
|
||||||
ulimit -n 65536
|
|
||||||
# allow to catch coredumps
|
|
||||||
ulimit -c unlimited
|
|
||||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
|
||||||
- name: Show VTest results
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $folder/INFO
|
|
||||||
cat $folder/LOG
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
- name: Run Unit tests
|
|
||||||
id: unittests
|
|
||||||
run: |
|
|
||||||
make unit-tests
|
|
||||||
- name: Show coredumps
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
failed=false
|
|
||||||
shopt -s nullglob
|
|
||||||
for file in /tmp/core.*; do
|
|
||||||
failed=true
|
|
||||||
printf "::group::"
|
|
||||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
if [ "$failed" = true ]; then
|
|
||||||
exit 1;
|
|
||||||
fi
|
|
||||||
77
.github/workflows/openssl-master.yml
vendored
77
.github/workflows/openssl-master.yml
vendored
@ -1,77 +0,0 @@
|
|||||||
name: openssl master
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 3 * * *"
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
- name: Install apt dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
|
||||||
sudo apt-get --no-install-recommends -y install socat gdb
|
|
||||||
sudo apt-get --no-install-recommends -y install libpsl-dev
|
|
||||||
- uses: ./.github/actions/setup-vtest
|
|
||||||
- name: Install OpenSSL master
|
|
||||||
run: env OPENSSL_VERSION="git-master" GIT_TYPE="branch" scripts/build-ssl.sh
|
|
||||||
- name: Compile HAProxy
|
|
||||||
run: |
|
|
||||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
|
||||||
USE_QUIC=1 USE_OPENSSL=1 \
|
|
||||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
|
||||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
|
||||||
sudo make install
|
|
||||||
- name: Show HAProxy version
|
|
||||||
id: show-version
|
|
||||||
run: |
|
|
||||||
ldd $(which haproxy)
|
|
||||||
haproxy -vv
|
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
|
||||||
- name: Install problem matcher for VTest
|
|
||||||
run: echo "::add-matcher::.github/vtest.json"
|
|
||||||
- name: Run VTest for HAProxy
|
|
||||||
id: vtest
|
|
||||||
run: |
|
|
||||||
# This is required for macOS which does not actually allow to increase
|
|
||||||
# the '-n' soft limit to the hard limit, thus failing to run.
|
|
||||||
ulimit -n 65536
|
|
||||||
# allow to catch coredumps
|
|
||||||
ulimit -c unlimited
|
|
||||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
|
||||||
- name: Show VTest results
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $folder/INFO
|
|
||||||
cat $folder/LOG
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
- name: Run Unit tests
|
|
||||||
id: unittests
|
|
||||||
run: |
|
|
||||||
make unit-tests
|
|
||||||
- name: Show coredumps
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
failed=false
|
|
||||||
shopt -s nullglob
|
|
||||||
for file in /tmp/core.*; do
|
|
||||||
failed=true
|
|
||||||
printf "::group::"
|
|
||||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
if [ "$failed" = true ]; then
|
|
||||||
exit 1;
|
|
||||||
fi
|
|
||||||
34
.github/workflows/openssl-nodeprecated.yml
vendored
Normal file
34
.github/workflows/openssl-nodeprecated.yml
vendored
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
#
|
||||||
|
# special purpose CI: test against OpenSSL built in "no-deprecated" mode
|
||||||
|
# let us run those builds weekly
|
||||||
|
#
|
||||||
|
# for example, OpenWRT uses such OpenSSL builds (those builds are smaller)
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# some details might be found at NL: https://www.mail-archive.com/haproxy@formilux.org/msg35759.html
|
||||||
|
# GH: https://github.com/haproxy/haproxy/issues/367
|
||||||
|
|
||||||
|
name: openssl no-deprecated
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "0 0 * * 4"
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Install VTest
|
||||||
|
run: |
|
||||||
|
scripts/build-vtest.sh
|
||||||
|
- name: Compile HAProxy
|
||||||
|
run: |
|
||||||
|
make DEFINE="-DOPENSSL_API_COMPAT=0x10100000L -DOPENSSL_NO_DEPRECATED" -j3 CC=gcc ERR=1 TARGET=linux-glibc USE_OPENSSL=1
|
||||||
|
- name: Run VTest
|
||||||
|
run: |
|
||||||
|
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||||
6
.github/workflows/quic-interop-aws-lc.yml
vendored
6
.github/workflows/quic-interop-aws-lc.yml
vendored
@ -19,7 +19,7 @@ jobs:
|
|||||||
packages: write
|
packages: write
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Log in to the Container registry
|
- name: Log in to the Container registry
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
@ -35,7 +35,7 @@ jobs:
|
|||||||
context: https://github.com/haproxytech/haproxy-qns.git
|
context: https://github.com/haproxytech/haproxy-qns.git
|
||||||
push: true
|
push: true
|
||||||
build-args: |
|
build-args: |
|
||||||
SSLLIB=AWS-LC
|
SSLLIB: AWS-LC
|
||||||
tags: ghcr.io/${{ github.repository }}:aws-lc
|
tags: ghcr.io/${{ github.repository }}:aws-lc
|
||||||
|
|
||||||
- name: Cleanup registry
|
- name: Cleanup registry
|
||||||
@ -64,7 +64,7 @@ jobs:
|
|||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Log in to the Container registry
|
- name: Log in to the Container registry
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
|
|||||||
6
.github/workflows/quic-interop-libressl.yml
vendored
6
.github/workflows/quic-interop-libressl.yml
vendored
@ -19,7 +19,7 @@ jobs:
|
|||||||
packages: write
|
packages: write
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Log in to the Container registry
|
- name: Log in to the Container registry
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
@ -35,7 +35,7 @@ jobs:
|
|||||||
context: https://github.com/haproxytech/haproxy-qns.git
|
context: https://github.com/haproxytech/haproxy-qns.git
|
||||||
push: true
|
push: true
|
||||||
build-args: |
|
build-args: |
|
||||||
SSLLIB=LibreSSL
|
SSLLIB: LibreSSL
|
||||||
tags: ghcr.io/${{ github.repository }}:libressl
|
tags: ghcr.io/${{ github.repository }}:libressl
|
||||||
|
|
||||||
- name: Cleanup registry
|
- name: Cleanup registry
|
||||||
@ -62,7 +62,7 @@ jobs:
|
|||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Log in to the Container registry
|
- name: Log in to the Container registry
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
|
|||||||
14
.github/workflows/quictls.yml
vendored
14
.github/workflows/quictls.yml
vendored
@ -15,9 +15,11 @@ permissions:
|
|||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
|
- name: Install VTest
|
||||||
|
run: |
|
||||||
|
scripts/build-vtest.sh
|
||||||
- name: Install apt dependencies
|
- name: Install apt dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
||||||
@ -39,10 +41,16 @@ jobs:
|
|||||||
ldd $(which haproxy)
|
ldd $(which haproxy)
|
||||||
haproxy -vv
|
haproxy -vv
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||||
- uses: ./.github/actions/setup-vtest
|
- name: Install problem matcher for VTest
|
||||||
|
run: echo "::add-matcher::.github/vtest.json"
|
||||||
- name: Run VTest for HAProxy
|
- name: Run VTest for HAProxy
|
||||||
id: vtest
|
id: vtest
|
||||||
run: |
|
run: |
|
||||||
|
# This is required for macOS which does not actually allow to increase
|
||||||
|
# the '-n' soft limit to the hard limit, thus failing to run.
|
||||||
|
ulimit -n 65536
|
||||||
|
# allow to catch coredumps
|
||||||
|
ulimit -c unlimited
|
||||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||||
- name: Show VTest results
|
- name: Show VTest results
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||||
|
|||||||
34
.github/workflows/vtest.yml
vendored
34
.github/workflows/vtest.yml
vendored
@ -23,7 +23,7 @@ jobs:
|
|||||||
outputs:
|
outputs:
|
||||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
- name: Generate Build Matrix
|
- name: Generate Build Matrix
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@ -44,10 +44,16 @@ jobs:
|
|||||||
TMPDIR: /tmp
|
TMPDIR: /tmp
|
||||||
OT_CPP_VERSION: 1.6.0
|
OT_CPP_VERSION: 1.6.0
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 100
|
fetch-depth: 100
|
||||||
|
|
||||||
|
- name: Setup coredumps
|
||||||
|
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
|
||||||
|
run: |
|
||||||
|
sudo sysctl -w fs.suid_dumpable=1
|
||||||
|
sudo sysctl kernel.core_pattern=/tmp/core.%h.%e.%t
|
||||||
|
|
||||||
#
|
#
|
||||||
# Github Action cache key cannot contain comma, so we calculate it based on job name
|
# Github Action cache key cannot contain comma, so we calculate it based on job name
|
||||||
#
|
#
|
||||||
@ -70,7 +76,7 @@ jobs:
|
|||||||
uses: actions/cache@v4
|
uses: actions/cache@v4
|
||||||
with:
|
with:
|
||||||
path: '~/opt-ot/'
|
path: '~/opt-ot/'
|
||||||
key: ${{ matrix.os }}-ot-${{ matrix.CC }}-${{ env.OT_CPP_VERSION }}-${{ contains(matrix.name, 'ASAN') }}
|
key: ot-${{ matrix.CC }}-${{ env.OT_CPP_VERSION }}-${{ contains(matrix.name, 'ASAN') }}
|
||||||
- name: Install apt dependencies
|
- name: Install apt dependencies
|
||||||
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
|
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
|
||||||
run: |
|
run: |
|
||||||
@ -87,7 +93,9 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
brew install socat
|
brew install socat
|
||||||
brew install lua
|
brew install lua
|
||||||
- uses: ./.github/actions/setup-vtest
|
- name: Install VTest
|
||||||
|
run: |
|
||||||
|
scripts/build-vtest.sh
|
||||||
- name: Install SSL ${{ matrix.ssl }}
|
- name: Install SSL ${{ matrix.ssl }}
|
||||||
if: ${{ matrix.ssl && matrix.ssl != 'stock' && steps.cache_ssl.outputs.cache-hit != 'true' }}
|
if: ${{ matrix.ssl && matrix.ssl != 'stock' && steps.cache_ssl.outputs.cache-hit != 'true' }}
|
||||||
run: env ${{ matrix.ssl }} scripts/build-ssl.sh
|
run: env ${{ matrix.ssl }} scripts/build-ssl.sh
|
||||||
@ -113,16 +121,7 @@ jobs:
|
|||||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
||||||
${{ join(matrix.FLAGS, ' ') }} \
|
${{ join(matrix.FLAGS, ' ') }} \
|
||||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
||||||
sudo make install-bin
|
sudo make install
|
||||||
- name: Compile admin/halog/halog
|
|
||||||
run: |
|
|
||||||
make -j$(nproc) admin/halog/halog \
|
|
||||||
ERR=1 \
|
|
||||||
TARGET=${{ matrix.TARGET }} \
|
|
||||||
CC=${{ matrix.CC }} \
|
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
|
||||||
${{ join(matrix.FLAGS, ' ') }} \
|
|
||||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
|
||||||
- name: Show HAProxy version
|
- name: Show HAProxy version
|
||||||
id: show-version
|
id: show-version
|
||||||
run: |
|
run: |
|
||||||
@ -137,9 +136,16 @@ jobs:
|
|||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
haproxy -vv
|
haproxy -vv
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||||
|
- name: Install problem matcher for VTest
|
||||||
|
# This allows one to more easily see which tests fail.
|
||||||
|
run: echo "::add-matcher::.github/vtest.json"
|
||||||
- name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
|
- name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
|
||||||
id: vtest
|
id: vtest
|
||||||
run: |
|
run: |
|
||||||
|
# This is required for macOS which does not actually allow to increase
|
||||||
|
# the '-n' soft limit to the hard limit, thus failing to run.
|
||||||
|
ulimit -n 65536
|
||||||
|
ulimit -c unlimited
|
||||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||||
- name: Show VTest results
|
- name: Show VTest results
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||||
|
|||||||
2
.github/workflows/windows.yml
vendored
2
.github/workflows/windows.yml
vendored
@ -35,7 +35,7 @@ jobs:
|
|||||||
- USE_THREAD=1
|
- USE_THREAD=1
|
||||||
- USE_ZLIB=1
|
- USE_ZLIB=1
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
- uses: msys2/setup-msys2@v2
|
- uses: msys2/setup-msys2@v2
|
||||||
with:
|
with:
|
||||||
install: >-
|
install: >-
|
||||||
|
|||||||
32
.github/workflows/wolfssl.yml
vendored
32
.github/workflows/wolfssl.yml
vendored
@ -11,13 +11,15 @@ permissions:
|
|||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
|
- name: Install VTest
|
||||||
|
run: |
|
||||||
|
scripts/build-vtest.sh
|
||||||
- name: Install apt dependencies
|
- name: Install apt dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
||||||
sudo apt-get --no-install-recommends -y install socat gdb jose
|
sudo apt-get --no-install-recommends -y install socat gdb
|
||||||
- name: Install WolfSSL
|
- name: Install WolfSSL
|
||||||
run: env WOLFSSL_VERSION=git-master WOLFSSL_DEBUG=1 scripts/build-ssl.sh
|
run: env WOLFSSL_VERSION=git-master WOLFSSL_DEBUG=1 scripts/build-ssl.sh
|
||||||
- name: Compile HAProxy
|
- name: Compile HAProxy
|
||||||
@ -25,7 +27,7 @@ jobs:
|
|||||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
||||||
USE_OPENSSL_WOLFSSL=1 USE_QUIC=1 \
|
USE_OPENSSL_WOLFSSL=1 USE_QUIC=1 \
|
||||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
DEBUG="-DDEBUG_POOL_INTEGRITY" \
|
||||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
|
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
|
||||||
ARCH_FLAGS="-ggdb3 -fsanitize=address"
|
ARCH_FLAGS="-ggdb3 -fsanitize=address"
|
||||||
sudo make install
|
sudo make install
|
||||||
@ -35,15 +37,17 @@ jobs:
|
|||||||
ldd $(which haproxy)
|
ldd $(which haproxy)
|
||||||
haproxy -vv
|
haproxy -vv
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||||
- uses: ./.github/actions/setup-vtest
|
- name: Install problem matcher for VTest
|
||||||
|
run: echo "::add-matcher::.github/vtest.json"
|
||||||
- name: Run VTest for HAProxy
|
- name: Run VTest for HAProxy
|
||||||
id: vtest
|
id: vtest
|
||||||
run: |
|
run: |
|
||||||
|
# This is required for macOS which does not actually allow to increase
|
||||||
|
# the '-n' soft limit to the hard limit, thus failing to run.
|
||||||
|
ulimit -n 65536
|
||||||
|
# allow to catch coredumps
|
||||||
|
ulimit -c unlimited
|
||||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||||
- name: Run Unit tests
|
|
||||||
id: unittests
|
|
||||||
run: |
|
|
||||||
make unit-tests
|
|
||||||
- name: Show VTest results
|
- name: Show VTest results
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||||
run: |
|
run: |
|
||||||
@ -68,13 +72,3 @@ jobs:
|
|||||||
if [ "$failed" = true ]; then
|
if [ "$failed" = true ]; then
|
||||||
exit 1;
|
exit 1;
|
||||||
fi
|
fi
|
||||||
- name: Show Unit-Tests results
|
|
||||||
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $result
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
|
|
||||||
|
|||||||
12
BRANCHES
12
BRANCHES
@ -171,17 +171,7 @@ feedback for developers:
|
|||||||
as the previous releases that had 6 months to stabilize. In terms of
|
as the previous releases that had 6 months to stabilize. In terms of
|
||||||
stability it really means that the point zero version already accumulated
|
stability it really means that the point zero version already accumulated
|
||||||
6 months of fixes and that it is much safer to use even just after it is
|
6 months of fixes and that it is much safer to use even just after it is
|
||||||
released. There is one exception though, features marked as "experimental"
|
released.
|
||||||
are not guaranteed to be maintained beyond the release of the next LTS
|
|
||||||
branch. The rationale here is that the experimental status is made to
|
|
||||||
expose an early preview of a feature, that is often incomplete, not always
|
|
||||||
in its definitive form regarding configuration, and for which developers
|
|
||||||
are seeking feedback from the users. It is even possible that changes will
|
|
||||||
be brought within the stable branch and it may happen that the feature
|
|
||||||
breaks. It is not imaginable to always be able to backport bug fixes too
|
|
||||||
far in this context since the code and configuration may change quite a
|
|
||||||
bit. Users who want to try experimental features are expected to upgrade
|
|
||||||
quickly to benefit from the improvements made to that feature.
|
|
||||||
|
|
||||||
- for developers, given that the odd versions are solely used by highly
|
- for developers, given that the odd versions are solely used by highly
|
||||||
skilled users, it's easier to get advanced traces and captures, and there
|
skilled users, it's easier to get advanced traces and captures, and there
|
||||||
|
|||||||
45
INSTALL
45
INSTALL
@ -111,7 +111,7 @@ HAProxy requires a working GCC or Clang toolchain and GNU make :
|
|||||||
may want to retry with "gmake" which is the name commonly used for GNU make
|
may want to retry with "gmake" which is the name commonly used for GNU make
|
||||||
on BSD systems.
|
on BSD systems.
|
||||||
|
|
||||||
- GCC >= 4.7 (up to 15 tested). Older versions are no longer supported due to
|
- GCC >= 4.7 (up to 14 tested). Older versions are no longer supported due to
|
||||||
the latest mt_list update which only uses c11-like atomics. Newer versions
|
the latest mt_list update which only uses c11-like atomics. Newer versions
|
||||||
may sometimes break due to compiler regressions or behaviour changes. The
|
may sometimes break due to compiler regressions or behaviour changes. The
|
||||||
version shipped with your operating system is very likely to work with no
|
version shipped with your operating system is very likely to work with no
|
||||||
@ -237,7 +237,7 @@ to forcefully enable it using "USE_LIBCRYPT=1".
|
|||||||
-----------------
|
-----------------
|
||||||
For SSL/TLS, it is necessary to use a cryptography library. HAProxy currently
|
For SSL/TLS, it is necessary to use a cryptography library. HAProxy currently
|
||||||
supports the OpenSSL library, and is known to build and work with branches
|
supports the OpenSSL library, and is known to build and work with branches
|
||||||
1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, and 3.0 to 3.6. It is recommended to use
|
1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, and 3.0 to 3.4. It is recommended to use
|
||||||
at least OpenSSL 1.1.1 to have support for all SSL keywords and configuration
|
at least OpenSSL 1.1.1 to have support for all SSL keywords and configuration
|
||||||
in HAProxy. OpenSSL follows a long-term support cycle similar to HAProxy's,
|
in HAProxy. OpenSSL follows a long-term support cycle similar to HAProxy's,
|
||||||
and each of the branches above receives its own fixes, without forcing you to
|
and each of the branches above receives its own fixes, without forcing you to
|
||||||
@ -259,15 +259,11 @@ reported to work as well. While there are some efforts from the community to
|
|||||||
ensure they work well, OpenSSL remains the primary target and this means that
|
ensure they work well, OpenSSL remains the primary target and this means that
|
||||||
in case of conflicting choices, OpenSSL support will be favored over other
|
in case of conflicting choices, OpenSSL support will be favored over other
|
||||||
options. Note that QUIC is not fully supported when haproxy is built with
|
options. Note that QUIC is not fully supported when haproxy is built with
|
||||||
OpenSSL < 3.5.2 version. In this case, QUICTLS or AWS-LC are the preferred
|
OpenSSL. In this case, QUICTLS is the preferred alternative. As of writing
|
||||||
alternatives. As of writing this, the QuicTLS project follows OpenSSL very
|
this, the QuicTLS project follows OpenSSL very closely and provides update
|
||||||
closely and provides update simultaneously, but being a volunteer-driven
|
simultaneously, but being a volunteer-driven project, its long-term future does
|
||||||
project, its long-term future does not look certain enough to convince
|
not look certain enough to convince operating systems to package it, so it
|
||||||
operating systems to package it, so it needs to be build locally. Recent
|
needs to be build locally. See the section about QUIC in this document.
|
||||||
versions of AWS-LC (>= 1.22 and the FIPS branches) are pretty complete and
|
|
||||||
generally more performant than other OpenSSL derivatives, but may behave
|
|
||||||
slightly differently, particularly when dealing with outdated setups. See
|
|
||||||
the section about QUIC in this document.
|
|
||||||
|
|
||||||
A fifth option is wolfSSL (https://github.com/wolfSSL/wolfssl). It is the only
|
A fifth option is wolfSSL (https://github.com/wolfSSL/wolfssl). It is the only
|
||||||
supported alternative stack not based on OpenSSL, yet which implements almost
|
supported alternative stack not based on OpenSSL, yet which implements almost
|
||||||
@ -504,11 +500,10 @@ QUIC is the new transport layer protocol and is required for HTTP/3. This
|
|||||||
protocol stack is currently supported as an experimental feature in haproxy on
|
protocol stack is currently supported as an experimental feature in haproxy on
|
||||||
the frontend side. In order to enable it, use "USE_QUIC=1 USE_OPENSSL=1".
|
the frontend side. In order to enable it, use "USE_QUIC=1 USE_OPENSSL=1".
|
||||||
|
|
||||||
Note that QUIC is not always fully supported by the OpenSSL library depending on
|
Note that QUIC is not fully supported by the OpenSSL library. Indeed QUIC 0-RTT
|
||||||
its version. Indeed QUIC 0-RTT cannot be supported by OpenSSL for versions before
|
cannot be supported by OpenSSL contrary to others libraries with full QUIC
|
||||||
3.5 contrary to others libraries with full QUIC support. The preferred option is
|
support. The preferred option is to use QUICTLS. This is a fork of OpenSSL with
|
||||||
to use QUICTLS. This is a fork of OpenSSL with a QUIC-compatible API. Its
|
a QUIC-compatible API. Its repository is available at this location:
|
||||||
repository is available at this location:
|
|
||||||
|
|
||||||
https://github.com/quictls/openssl
|
https://github.com/quictls/openssl
|
||||||
|
|
||||||
@ -536,18 +531,14 @@ way assuming that wolfSSL was installed in /opt/wolfssl-5.6.0 as shown in 4.5:
|
|||||||
SSL_INC=/opt/wolfssl-5.6.0/include SSL_LIB=/opt/wolfssl-5.6.0/lib
|
SSL_INC=/opt/wolfssl-5.6.0/include SSL_LIB=/opt/wolfssl-5.6.0/lib
|
||||||
LDFLAGS="-Wl,-rpath,/opt/wolfssl-5.6.0/lib"
|
LDFLAGS="-Wl,-rpath,/opt/wolfssl-5.6.0/lib"
|
||||||
|
|
||||||
As last resort, haproxy may be compiled against OpenSSL as follows from 3.5
|
As last resort, haproxy may be compiled against OpenSSL as follows:
|
||||||
version with 0-RTT support:
|
|
||||||
|
|
||||||
$ make TARGET=generic USE_OPENSSL=1 USE_QUIC=1
|
|
||||||
|
|
||||||
or as follows for all OpenSSL versions but without O-RTT support:
|
|
||||||
|
|
||||||
$ make TARGET=generic USE_OPENSSL=1 USE_QUIC=1 USE_QUIC_OPENSSL_COMPAT=1
|
$ make TARGET=generic USE_OPENSSL=1 USE_QUIC=1 USE_QUIC_OPENSSL_COMPAT=1
|
||||||
|
|
||||||
In addition to this requirements, the QUIC listener bindings must be explicitly
|
Note that QUIC 0-RTT is not supported by haproxy QUIC stack when built against
|
||||||
enabled with a specific QUIC tuning parameter. (see "limited-quic" global
|
OpenSSL. In addition to this compilation requirements, the QUIC listener
|
||||||
parameter of haproxy Configuration Manual).
|
bindings must be explicitly enabled with a specific QUIC tuning parameter.
|
||||||
|
(see "limited-quic" global parameter of haproxy Configuration Manual).
|
||||||
|
|
||||||
|
|
||||||
5) How to build HAProxy
|
5) How to build HAProxy
|
||||||
@ -563,9 +554,9 @@ It goes into more details with the main options.
|
|||||||
To build haproxy, you have to choose your target OS amongst the following ones
|
To build haproxy, you have to choose your target OS amongst the following ones
|
||||||
and assign it to the TARGET variable :
|
and assign it to the TARGET variable :
|
||||||
|
|
||||||
- linux-glibc for Linux kernel 4.17 and above
|
- linux-glibc for Linux kernel 2.6.28 and above
|
||||||
- linux-glibc-legacy for Linux kernel 2.6.28 and above without new features
|
- linux-glibc-legacy for Linux kernel 2.6.28 and above without new features
|
||||||
- linux-musl for Linux kernel 4.17 and above with musl libc
|
- linux-musl for Linux kernel 2.6.28 and above with musl libc
|
||||||
- solaris for Solaris 10 and above
|
- solaris for Solaris 10 and above
|
||||||
- freebsd for FreeBSD 10 and above
|
- freebsd for FreeBSD 10 and above
|
||||||
- dragonfly for DragonFlyBSD 4.3 and above
|
- dragonfly for DragonFlyBSD 4.3 and above
|
||||||
|
|||||||
56
Makefile
56
Makefile
@ -35,7 +35,6 @@
|
|||||||
# USE_OPENSSL : enable use of OpenSSL. Recommended, but see below.
|
# USE_OPENSSL : enable use of OpenSSL. Recommended, but see below.
|
||||||
# USE_OPENSSL_AWSLC : enable use of AWS-LC
|
# USE_OPENSSL_AWSLC : enable use of AWS-LC
|
||||||
# USE_OPENSSL_WOLFSSL : enable use of wolfSSL with the OpenSSL API
|
# USE_OPENSSL_WOLFSSL : enable use of wolfSSL with the OpenSSL API
|
||||||
# USE_ECH : enable use of ECH with the OpenSSL API
|
|
||||||
# USE_QUIC : enable use of QUIC with the quictls API (quictls, libressl, boringssl)
|
# USE_QUIC : enable use of QUIC with the quictls API (quictls, libressl, boringssl)
|
||||||
# USE_QUIC_OPENSSL_COMPAT : enable use of QUIC with the standard openssl API (limited features)
|
# USE_QUIC_OPENSSL_COMPAT : enable use of QUIC with the standard openssl API (limited features)
|
||||||
# USE_ENGINE : enable use of OpenSSL Engine.
|
# USE_ENGINE : enable use of OpenSSL Engine.
|
||||||
@ -63,8 +62,6 @@
|
|||||||
# USE_MEMORY_PROFILING : enable the memory profiler. Linux-glibc only.
|
# USE_MEMORY_PROFILING : enable the memory profiler. Linux-glibc only.
|
||||||
# USE_LIBATOMIC : force to link with/without libatomic. Automatic.
|
# USE_LIBATOMIC : force to link with/without libatomic. Automatic.
|
||||||
# USE_PTHREAD_EMULATION : replace pthread's rwlocks with ours
|
# USE_PTHREAD_EMULATION : replace pthread's rwlocks with ours
|
||||||
# USE_SHM_OPEN : use shm_open() for features that can make use of shared memory
|
|
||||||
# USE_KTLS : use kTLS.(requires at least Linux 4.17).
|
|
||||||
#
|
#
|
||||||
# Options can be forced by specifying "USE_xxx=1" or can be disabled by using
|
# Options can be forced by specifying "USE_xxx=1" or can be disabled by using
|
||||||
# "USE_xxx=" (empty string). The list of enabled and disabled options for a
|
# "USE_xxx=" (empty string). The list of enabled and disabled options for a
|
||||||
@ -214,8 +211,7 @@ UNIT_TEST_SCRIPT=./scripts/run-unittests.sh
|
|||||||
# undefined behavior to silently produce invalid code. For this reason we have
|
# undefined behavior to silently produce invalid code. For this reason we have
|
||||||
# to use -fwrapv or -fno-strict-overflow to guarantee the intended behavior.
|
# to use -fwrapv or -fno-strict-overflow to guarantee the intended behavior.
|
||||||
# It is preferable not to change this option in order to avoid breakage.
|
# It is preferable not to change this option in order to avoid breakage.
|
||||||
STD_CFLAGS := $(call cc-opt-alt,-fwrapv,-fno-strict-overflow) \
|
STD_CFLAGS := $(call cc-opt-alt,-fwrapv,-fno-strict-overflow)
|
||||||
$(call cc-opt,-fvect-cost-model=very-cheap)
|
|
||||||
|
|
||||||
#### Compiler-specific flags to enable certain classes of warnings.
|
#### Compiler-specific flags to enable certain classes of warnings.
|
||||||
# Some are hard-coded, others are enabled only if supported.
|
# Some are hard-coded, others are enabled only if supported.
|
||||||
@ -342,16 +338,14 @@ use_opts = USE_EPOLL USE_KQUEUE USE_NETFILTER USE_POLL \
|
|||||||
USE_TPROXY USE_LINUX_TPROXY USE_LINUX_CAP \
|
USE_TPROXY USE_LINUX_TPROXY USE_LINUX_CAP \
|
||||||
USE_LINUX_SPLICE USE_LIBCRYPT USE_CRYPT_H USE_ENGINE \
|
USE_LINUX_SPLICE USE_LIBCRYPT USE_CRYPT_H USE_ENGINE \
|
||||||
USE_GETADDRINFO USE_OPENSSL USE_OPENSSL_WOLFSSL USE_OPENSSL_AWSLC \
|
USE_GETADDRINFO USE_OPENSSL USE_OPENSSL_WOLFSSL USE_OPENSSL_AWSLC \
|
||||||
USE_ECH \
|
|
||||||
USE_SSL USE_LUA USE_ACCEPT4 USE_CLOSEFROM USE_ZLIB USE_SLZ \
|
USE_SSL USE_LUA USE_ACCEPT4 USE_CLOSEFROM USE_ZLIB USE_SLZ \
|
||||||
USE_CPU_AFFINITY USE_TFO USE_NS USE_DL USE_RT USE_LIBATOMIC \
|
USE_CPU_AFFINITY USE_TFO USE_NS USE_DL USE_RT USE_LIBATOMIC \
|
||||||
USE_MATH USE_DEVICEATLAS USE_51DEGREES \
|
USE_MATH USE_DEVICEATLAS USE_51DEGREES \
|
||||||
USE_WURFL USE_OBSOLETE_LINKER USE_PRCTL USE_PROCCTL \
|
USE_WURFL USE_OBSOLETE_LINKER USE_PRCTL USE_PROCCTL \
|
||||||
USE_THREAD_DUMP USE_EVPORTS USE_OT USE_QUIC USE_PROMEX \
|
USE_THREAD_DUMP USE_EVPORTS USE_OT USE_QUIC USE_PROMEX \
|
||||||
USE_MEMORY_PROFILING USE_SHM_OPEN \
|
USE_MEMORY_PROFILING \
|
||||||
USE_STATIC_PCRE USE_STATIC_PCRE2 \
|
USE_STATIC_PCRE USE_STATIC_PCRE2 \
|
||||||
USE_PCRE USE_PCRE_JIT USE_PCRE2 USE_PCRE2_JIT \
|
USE_PCRE USE_PCRE_JIT USE_PCRE2 USE_PCRE2_JIT USE_QUIC_OPENSSL_COMPAT
|
||||||
USE_QUIC_OPENSSL_COMPAT USE_KTLS
|
|
||||||
|
|
||||||
# preset all variables for all supported build options among use_opts
|
# preset all variables for all supported build options among use_opts
|
||||||
$(reset_opts_vars)
|
$(reset_opts_vars)
|
||||||
@ -382,13 +376,13 @@ ifeq ($(TARGET),haiku)
|
|||||||
set_target_defaults = $(call default_opts,USE_POLL USE_TPROXY USE_OBSOLETE_LINKER)
|
set_target_defaults = $(call default_opts,USE_POLL USE_TPROXY USE_OBSOLETE_LINKER)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# For linux >= 4.17 and glibc
|
# For linux >= 2.6.28 and glibc
|
||||||
ifeq ($(TARGET),linux-glibc)
|
ifeq ($(TARGET),linux-glibc)
|
||||||
set_target_defaults = $(call default_opts, \
|
set_target_defaults = $(call default_opts, \
|
||||||
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
|
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
|
||||||
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
|
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
|
||||||
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
|
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
|
||||||
USE_GETADDRINFO USE_BACKTRACE USE_SHM_OPEN USE_KTLS)
|
USE_GETADDRINFO USE_BACKTRACE)
|
||||||
INSTALL = install -v
|
INSTALL = install -v
|
||||||
endif
|
endif
|
||||||
|
|
||||||
@ -401,13 +395,13 @@ ifeq ($(TARGET),linux-glibc-legacy)
|
|||||||
INSTALL = install -v
|
INSTALL = install -v
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# For linux >= 4.17 and musl
|
# For linux >= 2.6.28 and musl
|
||||||
ifeq ($(TARGET),linux-musl)
|
ifeq ($(TARGET),linux-musl)
|
||||||
set_target_defaults = $(call default_opts, \
|
set_target_defaults = $(call default_opts, \
|
||||||
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
|
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
|
||||||
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
|
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
|
||||||
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
|
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
|
||||||
USE_GETADDRINFO USE_BACKTRACE USE_SHM_OPEN USE_KTLS)
|
USE_GETADDRINFO USE_BACKTRACE)
|
||||||
INSTALL = install -v
|
INSTALL = install -v
|
||||||
endif
|
endif
|
||||||
|
|
||||||
@ -601,10 +595,6 @@ ifneq ($(USE_BACKTRACE:0=),)
|
|||||||
BACKTRACE_CFLAGS = -fno-omit-frame-pointer
|
BACKTRACE_CFLAGS = -fno-omit-frame-pointer
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifneq ($(USE_MEMORY_PROFILING:0=),)
|
|
||||||
MEMORY_PROFILING_CFLAGS = -fno-optimize-sibling-calls
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifneq ($(USE_CPU_AFFINITY:0=),)
|
ifneq ($(USE_CPU_AFFINITY:0=),)
|
||||||
OPTIONS_OBJS += src/cpuset.o
|
OPTIONS_OBJS += src/cpuset.o
|
||||||
OPTIONS_OBJS += src/cpu_topo.o
|
OPTIONS_OBJS += src/cpu_topo.o
|
||||||
@ -643,7 +633,7 @@ ifneq ($(USE_OPENSSL:0=),)
|
|||||||
OPTIONS_OBJS += src/ssl_sock.o src/ssl_ckch.o src/ssl_ocsp.o src/ssl_crtlist.o \
|
OPTIONS_OBJS += src/ssl_sock.o src/ssl_ckch.o src/ssl_ocsp.o src/ssl_crtlist.o \
|
||||||
src/ssl_sample.o src/cfgparse-ssl.o src/ssl_gencert.o \
|
src/ssl_sample.o src/cfgparse-ssl.o src/ssl_gencert.o \
|
||||||
src/ssl_utils.o src/jwt.o src/ssl_clienthello.o src/jws.o src/acme.o \
|
src/ssl_utils.o src/jwt.o src/ssl_clienthello.o src/jws.o src/acme.o \
|
||||||
src/ssl_trace.o src/jwe.o
|
src/ssl_trace.o
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifneq ($(USE_ENGINE:0=),)
|
ifneq ($(USE_ENGINE:0=),)
|
||||||
@ -670,7 +660,7 @@ OPTIONS_OBJS += src/mux_quic.o src/h3.o src/quic_rx.o src/quic_tx.o \
|
|||||||
src/quic_cc_nocc.o src/quic_cc.o src/quic_pacing.o \
|
src/quic_cc_nocc.o src/quic_cc.o src/quic_pacing.o \
|
||||||
src/h3_stats.o src/quic_stats.o src/qpack-enc.o \
|
src/h3_stats.o src/quic_stats.o src/qpack-enc.o \
|
||||||
src/qpack-tbl.o src/quic_cc_drs.o src/quic_fctl.o \
|
src/qpack-tbl.o src/quic_cc_drs.o src/quic_fctl.o \
|
||||||
src/quic_enc.o
|
src/cbuf.o src/quic_enc.o
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifneq ($(USE_QUIC_OPENSSL_COMPAT:0=),)
|
ifneq ($(USE_QUIC_OPENSSL_COMPAT:0=),)
|
||||||
@ -969,15 +959,15 @@ OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \
|
|||||||
src/cache.o src/stconn.o src/http_htx.o src/debug.o \
|
src/cache.o src/stconn.o src/http_htx.o src/debug.o \
|
||||||
src/check.o src/stats-html.o src/haproxy.o src/listener.o \
|
src/check.o src/stats-html.o src/haproxy.o src/listener.o \
|
||||||
src/applet.o src/pattern.o src/cfgparse-listen.o \
|
src/applet.o src/pattern.o src/cfgparse-listen.o \
|
||||||
src/flt_spoe.o src/cebis_tree.o src/http_ext.o \
|
src/flt_spoe.o src/cebuis_tree.o src/http_ext.o \
|
||||||
src/http_act.o src/http_fetch.o src/cebs_tree.o \
|
src/http_act.o src/http_fetch.o src/cebus_tree.o \
|
||||||
src/cebib_tree.o src/http_client.o src/dns.o \
|
src/cebuib_tree.o src/http_client.o src/dns.o \
|
||||||
src/cebb_tree.o src/vars.o src/event_hdl.o src/tcp_rules.o \
|
src/cebub_tree.o src/vars.o src/event_hdl.o src/tcp_rules.o \
|
||||||
src/trace.o src/stats-proxy.o src/pool.o src/stats.o \
|
src/trace.o src/stats-proxy.o src/pool.o src/stats.o \
|
||||||
src/cfgparse-global.o src/filters.o src/mux_pt.o \
|
src/cfgparse-global.o src/filters.o src/mux_pt.o \
|
||||||
src/flt_http_comp.o src/sock.o src/h1.o src/sink.o \
|
src/flt_http_comp.o src/sock.o src/h1.o src/sink.o \
|
||||||
src/ceba_tree.o src/session.o src/payload.o src/htx.o \
|
src/cebua_tree.o src/session.o src/payload.o src/htx.o \
|
||||||
src/cebl_tree.o src/ceb32_tree.o src/ceb64_tree.o \
|
src/cebul_tree.o src/cebu32_tree.o src/cebu64_tree.o \
|
||||||
src/server_state.o src/proto_rhttp.o src/flt_trace.o src/fd.o \
|
src/server_state.o src/proto_rhttp.o src/flt_trace.o src/fd.o \
|
||||||
src/task.o src/map.o src/fcgi-app.o src/h2.o src/mworker.o \
|
src/task.o src/map.o src/fcgi-app.o src/h2.o src/mworker.o \
|
||||||
src/tcp_sample.o src/mjson.o src/h1_htx.o src/tcp_act.o \
|
src/tcp_sample.o src/mjson.o src/h1_htx.o src/tcp_act.o \
|
||||||
@ -992,9 +982,9 @@ OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \
|
|||||||
src/cfgcond.o src/proto_udp.o src/lb_fwlc.o src/ebmbtree.o \
|
src/cfgcond.o src/proto_udp.o src/lb_fwlc.o src/ebmbtree.o \
|
||||||
src/proto_uxdg.o src/cfgdiag.o src/sock_unix.o src/sha1.o \
|
src/proto_uxdg.o src/cfgdiag.o src/sock_unix.o src/sha1.o \
|
||||||
src/lb_fas.o src/clock.o src/sock_inet.o src/ev_select.o \
|
src/lb_fas.o src/clock.o src/sock_inet.o src/ev_select.o \
|
||||||
src/lb_map.o src/shctx.o src/hpack-dec.o src/net_helper.o \
|
src/lb_map.o src/shctx.o src/mworker-prog.o src/hpack-dec.o \
|
||||||
src/arg.o src/signal.o src/fix.o src/dynbuf.o src/guid.o \
|
src/arg.o src/signal.o src/fix.o src/dynbuf.o src/guid.o \
|
||||||
src/cfgparse-tcp.o src/lb_ss.o src/chunk.o src/counters.o \
|
src/cfgparse-tcp.o src/lb_ss.o src/chunk.o \
|
||||||
src/cfgparse-unix.o src/regex.o src/fcgi.o src/uri_auth.o \
|
src/cfgparse-unix.o src/regex.o src/fcgi.o src/uri_auth.o \
|
||||||
src/eb64tree.o src/eb32tree.o src/eb32sctree.o src/lru.o \
|
src/eb64tree.o src/eb32tree.o src/eb32sctree.o src/lru.o \
|
||||||
src/limits.o src/ebimtree.o src/wdt.o src/hpack-tbl.o \
|
src/limits.o src/ebimtree.o src/wdt.o src/hpack-tbl.o \
|
||||||
@ -1002,7 +992,7 @@ OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \
|
|||||||
src/ebsttree.o src/freq_ctr.o src/systemd.o src/init.o \
|
src/ebsttree.o src/freq_ctr.o src/systemd.o src/init.o \
|
||||||
src/http_acl.o src/dict.o src/dgram.o src/pipe.o \
|
src/http_acl.o src/dict.o src/dgram.o src/pipe.o \
|
||||||
src/hpack-huff.o src/hpack-enc.o src/ebtree.o src/hash.o \
|
src/hpack-huff.o src/hpack-enc.o src/ebtree.o src/hash.o \
|
||||||
src/httpclient_cli.o src/version.o src/ncbmbuf.o src/ech.o
|
src/version.o
|
||||||
|
|
||||||
ifneq ($(TRACE),)
|
ifneq ($(TRACE),)
|
||||||
OBJS += src/calltrace.o
|
OBJS += src/calltrace.o
|
||||||
@ -1123,11 +1113,6 @@ install-doc:
|
|||||||
$(INSTALL) -m 644 doc/$$x.txt "$(DESTDIR)$(DOCDIR)" ; \
|
$(INSTALL) -m 644 doc/$$x.txt "$(DESTDIR)$(DOCDIR)" ; \
|
||||||
done
|
done
|
||||||
|
|
||||||
install-admin:
|
|
||||||
$(Q)$(INSTALL) -d "$(DESTDIR)$(SBINDIR)"
|
|
||||||
$(Q)$(INSTALL) admin/cli/haproxy-dump-certs "$(DESTDIR)$(SBINDIR)"
|
|
||||||
$(Q)$(INSTALL) admin/cli/haproxy-reload "$(DESTDIR)$(SBINDIR)"
|
|
||||||
|
|
||||||
install-bin:
|
install-bin:
|
||||||
$(Q)for i in haproxy $(EXTRA); do \
|
$(Q)for i in haproxy $(EXTRA); do \
|
||||||
if ! [ -e "$$i" ]; then \
|
if ! [ -e "$$i" ]; then \
|
||||||
@ -1138,7 +1123,7 @@ install-bin:
|
|||||||
$(Q)$(INSTALL) -d "$(DESTDIR)$(SBINDIR)"
|
$(Q)$(INSTALL) -d "$(DESTDIR)$(SBINDIR)"
|
||||||
$(Q)$(INSTALL) haproxy $(EXTRA) "$(DESTDIR)$(SBINDIR)"
|
$(Q)$(INSTALL) haproxy $(EXTRA) "$(DESTDIR)$(SBINDIR)"
|
||||||
|
|
||||||
install: install-bin install-admin install-man install-doc
|
install: install-bin install-man install-doc
|
||||||
|
|
||||||
uninstall:
|
uninstall:
|
||||||
$(Q)rm -f "$(DESTDIR)$(MANDIR)"/man1/haproxy.1
|
$(Q)rm -f "$(DESTDIR)$(MANDIR)"/man1/haproxy.1
|
||||||
@ -1295,8 +1280,6 @@ unit-tests:
|
|||||||
# options for all commits within RANGE. RANGE may be either a git range
|
# options for all commits within RANGE. RANGE may be either a git range
|
||||||
# such as ref1..ref2 or a single commit, in which case all commits from
|
# such as ref1..ref2 or a single commit, in which case all commits from
|
||||||
# the master branch to this one will be tested.
|
# the master branch to this one will be tested.
|
||||||
# Will execute TEST_CMD for each commit if defined, and will stop in case of
|
|
||||||
# failure.
|
|
||||||
|
|
||||||
range:
|
range:
|
||||||
$(Q)[ -d .git/. ] || { echo "## Fatal: \"make $@\" may only be used inside a Git repository."; exit 1; }
|
$(Q)[ -d .git/. ] || { echo "## Fatal: \"make $@\" may only be used inside a Git repository."; exit 1; }
|
||||||
@ -1322,7 +1305,6 @@ range:
|
|||||||
echo "[ $$index/$$count ] $$commit #############################"; \
|
echo "[ $$index/$$count ] $$commit #############################"; \
|
||||||
git checkout -q $$commit || die 1; \
|
git checkout -q $$commit || die 1; \
|
||||||
$(MAKE) all || die 1; \
|
$(MAKE) all || die 1; \
|
||||||
[ -z "$(TEST_CMD)" ] || $(TEST_CMD) || die 1; \
|
|
||||||
index=$$((index + 1)); \
|
index=$$((index + 1)); \
|
||||||
done; \
|
done; \
|
||||||
echo;echo "Done! $${count} commit(s) built successfully for RANGE $${RANGE}" ; \
|
echo;echo "Done! $${count} commit(s) built successfully for RANGE $${RANGE}" ; \
|
||||||
|
|||||||
@ -5,8 +5,7 @@ CXX := c++
|
|||||||
CXXLIB := -lstdc++
|
CXXLIB := -lstdc++
|
||||||
|
|
||||||
ifeq ($(DEVICEATLAS_SRC),)
|
ifeq ($(DEVICEATLAS_SRC),)
|
||||||
OPTIONS_CFLAGS += -I$(DEVICEATLAS_INC)
|
OPTIONS_LDFLAGS += -lda
|
||||||
OPTIONS_LDFLAGS += -Wl,-rpath,$(DEVICEATLAS_LIB) -L$(DEVICEATLAS_LIB) -lda
|
|
||||||
else
|
else
|
||||||
DEVICEATLAS_INC = $(DEVICEATLAS_SRC)
|
DEVICEATLAS_INC = $(DEVICEATLAS_SRC)
|
||||||
DEVICEATLAS_LIB = $(DEVICEATLAS_SRC)
|
DEVICEATLAS_LIB = $(DEVICEATLAS_SRC)
|
||||||
|
|||||||
@ -389,9 +389,6 @@ listed below. Metrics from extra counters are not listed.
|
|||||||
| haproxy_server_max_connect_time_seconds |
|
| haproxy_server_max_connect_time_seconds |
|
||||||
| haproxy_server_max_response_time_seconds |
|
| haproxy_server_max_response_time_seconds |
|
||||||
| haproxy_server_max_total_time_seconds |
|
| haproxy_server_max_total_time_seconds |
|
||||||
| haproxy_server_agent_status |
|
|
||||||
| haproxy_server_agent_code |
|
|
||||||
| haproxy_server_agent_duration_seconds |
|
|
||||||
| haproxy_server_internal_errors_total |
|
| haproxy_server_internal_errors_total |
|
||||||
| haproxy_server_unsafe_idle_connections_current |
|
| haproxy_server_unsafe_idle_connections_current |
|
||||||
| haproxy_server_safe_idle_connections_current |
|
| haproxy_server_safe_idle_connections_current |
|
||||||
|
|||||||
@ -32,7 +32,7 @@
|
|||||||
|
|
||||||
/* Prometheus exporter flags (ctx->flags) */
|
/* Prometheus exporter flags (ctx->flags) */
|
||||||
#define PROMEX_FL_METRIC_HDR 0x00000001
|
#define PROMEX_FL_METRIC_HDR 0x00000001
|
||||||
#define PROMEX_FL_BODYLESS_RESP 0x00000002
|
/* unused: 0x00000002 */
|
||||||
/* unused: 0x00000004 */
|
/* unused: 0x00000004 */
|
||||||
/* unused: 0x00000008 */
|
/* unused: 0x00000008 */
|
||||||
/* unused: 0x00000010 */
|
/* unused: 0x00000010 */
|
||||||
|
|||||||
@ -173,8 +173,6 @@ const struct ist promex_st_metric_desc[ST_I_PX_MAX] = {
|
|||||||
[ST_I_PX_CTIME] = IST("Avg. connect time for last 1024 successful connections."),
|
[ST_I_PX_CTIME] = IST("Avg. connect time for last 1024 successful connections."),
|
||||||
[ST_I_PX_RTIME] = IST("Avg. response time for last 1024 successful connections."),
|
[ST_I_PX_RTIME] = IST("Avg. response time for last 1024 successful connections."),
|
||||||
[ST_I_PX_TTIME] = IST("Avg. total time for last 1024 successful connections."),
|
[ST_I_PX_TTIME] = IST("Avg. total time for last 1024 successful connections."),
|
||||||
[ST_I_PX_AGENT_STATUS] = IST("Status of last agent check, per state label value."),
|
|
||||||
[ST_I_PX_AGENT_DURATION] = IST("Total duration of the latest server agent check, in seconds."),
|
|
||||||
[ST_I_PX_QT_MAX] = IST("Maximum observed time spent in the queue"),
|
[ST_I_PX_QT_MAX] = IST("Maximum observed time spent in the queue"),
|
||||||
[ST_I_PX_CT_MAX] = IST("Maximum observed time spent waiting for a connection to complete"),
|
[ST_I_PX_CT_MAX] = IST("Maximum observed time spent waiting for a connection to complete"),
|
||||||
[ST_I_PX_RT_MAX] = IST("Maximum observed time spent waiting for a server response"),
|
[ST_I_PX_RT_MAX] = IST("Maximum observed time spent waiting for a server response"),
|
||||||
@ -242,8 +240,8 @@ void promex_register_module(struct promex_module *m)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Pools used to allocate ref on Promex modules and filters */
|
/* Pools used to allocate ref on Promex modules and filters */
|
||||||
DECLARE_STATIC_TYPED_POOL(pool_head_promex_mod_ref, "promex_module_ref", struct promex_module_ref);
|
DECLARE_STATIC_POOL(pool_head_promex_mod_ref, "promex_module_ref", sizeof(struct promex_module_ref));
|
||||||
DECLARE_STATIC_TYPED_POOL(pool_head_promex_metric_flt, "promex_metric_filter", struct promex_metric_filter);
|
DECLARE_STATIC_POOL(pool_head_promex_metric_flt, "promex_metric_filter", sizeof(struct promex_metric_filter));
|
||||||
|
|
||||||
/* Return the server status. */
|
/* Return the server status. */
|
||||||
enum promex_srv_state promex_srv_status(struct server *sv)
|
enum promex_srv_state promex_srv_status(struct server *sv)
|
||||||
@ -427,8 +425,9 @@ static int promex_dump_global_metrics(struct appctx *appctx, struct htx *htx)
|
|||||||
static struct ist prefix = IST("haproxy_process_");
|
static struct ist prefix = IST("haproxy_process_");
|
||||||
struct promex_ctx *ctx = appctx->svcctx;
|
struct promex_ctx *ctx = appctx->svcctx;
|
||||||
struct field val;
|
struct field val;
|
||||||
|
struct channel *chn = sc_ic(appctx_sc(appctx));
|
||||||
struct ist name, desc, out = ist2(trash.area, 0);
|
struct ist name, desc, out = ist2(trash.area, 0);
|
||||||
size_t max = htx_get_max_blksz(htx, applet_htx_output_room(appctx));
|
size_t max = htx_get_max_blksz(htx, channel_htx_recv_max(chn, htx));
|
||||||
int ret = 1;
|
int ret = 1;
|
||||||
|
|
||||||
if (!stats_fill_info(stat_line_info, ST_I_INF_MAX, 0))
|
if (!stats_fill_info(stat_line_info, ST_I_INF_MAX, 0))
|
||||||
@ -494,6 +493,7 @@ static int promex_dump_global_metrics(struct appctx *appctx, struct htx *htx)
|
|||||||
if (out.len) {
|
if (out.len) {
|
||||||
if (!htx_add_data_atonce(htx, out))
|
if (!htx_add_data_atonce(htx, out))
|
||||||
return -1; /* Unexpected and unrecoverable error */
|
return -1; /* Unexpected and unrecoverable error */
|
||||||
|
channel_add_input(chn, out.len);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
full:
|
full:
|
||||||
@ -510,8 +510,9 @@ static int promex_dump_front_metrics(struct appctx *appctx, struct htx *htx)
|
|||||||
struct proxy *px = ctx->p[0];
|
struct proxy *px = ctx->p[0];
|
||||||
struct stats_module *mod = ctx->p[1];
|
struct stats_module *mod = ctx->p[1];
|
||||||
struct field val;
|
struct field val;
|
||||||
|
struct channel *chn = sc_ic(appctx_sc(appctx));
|
||||||
struct ist name, desc, out = ist2(trash.area, 0);
|
struct ist name, desc, out = ist2(trash.area, 0);
|
||||||
size_t max = htx_get_max_blksz(htx, applet_htx_output_room(appctx));
|
size_t max = htx_get_max_blksz(htx, channel_htx_recv_max(chn, htx));
|
||||||
struct field *stats = stat_lines[STATS_DOMAIN_PROXY];
|
struct field *stats = stat_lines[STATS_DOMAIN_PROXY];
|
||||||
int ret = 1;
|
int ret = 1;
|
||||||
enum promex_front_state state;
|
enum promex_front_state state;
|
||||||
@ -691,6 +692,7 @@ static int promex_dump_front_metrics(struct appctx *appctx, struct htx *htx)
|
|||||||
if (out.len) {
|
if (out.len) {
|
||||||
if (!htx_add_data_atonce(htx, out))
|
if (!htx_add_data_atonce(htx, out))
|
||||||
return -1; /* Unexpected and unrecoverable error */
|
return -1; /* Unexpected and unrecoverable error */
|
||||||
|
channel_add_input(chn, out.len);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Save pointers (0=current proxy, 1=current stats module) of the current context */
|
/* Save pointers (0=current proxy, 1=current stats module) of the current context */
|
||||||
@ -712,8 +714,9 @@ static int promex_dump_listener_metrics(struct appctx *appctx, struct htx *htx)
|
|||||||
struct listener *li = ctx->p[1];
|
struct listener *li = ctx->p[1];
|
||||||
struct stats_module *mod = ctx->p[2];
|
struct stats_module *mod = ctx->p[2];
|
||||||
struct field val;
|
struct field val;
|
||||||
|
struct channel *chn = sc_ic(appctx_sc(appctx));
|
||||||
struct ist name, desc, out = ist2(trash.area, 0);
|
struct ist name, desc, out = ist2(trash.area, 0);
|
||||||
size_t max = htx_get_max_blksz(htx, applet_htx_output_room(appctx));
|
size_t max = htx_get_max_blksz(htx, channel_htx_recv_max(chn, htx));
|
||||||
struct field *stats = stat_lines[STATS_DOMAIN_PROXY];
|
struct field *stats = stat_lines[STATS_DOMAIN_PROXY];
|
||||||
int ret = 1;
|
int ret = 1;
|
||||||
enum li_status status;
|
enum li_status status;
|
||||||
@ -894,6 +897,7 @@ static int promex_dump_listener_metrics(struct appctx *appctx, struct htx *htx)
|
|||||||
if (out.len) {
|
if (out.len) {
|
||||||
if (!htx_add_data_atonce(htx, out))
|
if (!htx_add_data_atonce(htx, out))
|
||||||
return -1; /* Unexpected and unrecoverable error */
|
return -1; /* Unexpected and unrecoverable error */
|
||||||
|
channel_add_input(chn, out.len);
|
||||||
}
|
}
|
||||||
/* Save pointers (0=current proxy, 1=current listener, 2=current stats module) of the current context */
|
/* Save pointers (0=current proxy, 1=current listener, 2=current stats module) of the current context */
|
||||||
ctx->p[0] = px;
|
ctx->p[0] = px;
|
||||||
@ -915,8 +919,9 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
|
|||||||
struct stats_module *mod = ctx->p[1];
|
struct stats_module *mod = ctx->p[1];
|
||||||
struct server *sv;
|
struct server *sv;
|
||||||
struct field val;
|
struct field val;
|
||||||
|
struct channel *chn = sc_ic(appctx_sc(appctx));
|
||||||
struct ist name, desc, out = ist2(trash.area, 0);
|
struct ist name, desc, out = ist2(trash.area, 0);
|
||||||
size_t max = htx_get_max_blksz(htx, applet_htx_output_room(appctx));
|
size_t max = htx_get_max_blksz(htx, channel_htx_recv_max(chn, htx));
|
||||||
struct field *stats = stat_lines[STATS_DOMAIN_PROXY];
|
struct field *stats = stat_lines[STATS_DOMAIN_PROXY];
|
||||||
int ret = 1;
|
int ret = 1;
|
||||||
double secs;
|
double secs;
|
||||||
@ -1178,6 +1183,7 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
|
|||||||
if (out.len) {
|
if (out.len) {
|
||||||
if (!htx_add_data_atonce(htx, out))
|
if (!htx_add_data_atonce(htx, out))
|
||||||
return -1; /* Unexpected and unrecoverable error */
|
return -1; /* Unexpected and unrecoverable error */
|
||||||
|
channel_add_input(chn, out.len);
|
||||||
}
|
}
|
||||||
/* Save pointers (0=current proxy, 1=current stats module) of the current context */
|
/* Save pointers (0=current proxy, 1=current stats module) of the current context */
|
||||||
ctx->p[0] = px;
|
ctx->p[0] = px;
|
||||||
@ -1198,8 +1204,9 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
|||||||
struct server *sv = ctx->p[1];
|
struct server *sv = ctx->p[1];
|
||||||
struct stats_module *mod = ctx->p[2];
|
struct stats_module *mod = ctx->p[2];
|
||||||
struct field val;
|
struct field val;
|
||||||
|
struct channel *chn = sc_ic(appctx_sc(appctx));
|
||||||
struct ist name, desc, out = ist2(trash.area, 0);
|
struct ist name, desc, out = ist2(trash.area, 0);
|
||||||
size_t max = htx_get_max_blksz(htx, applet_htx_output_room(appctx));
|
size_t max = htx_get_max_blksz(htx, channel_htx_recv_max(chn, htx));
|
||||||
struct field *stats = stat_lines[STATS_DOMAIN_PROXY];
|
struct field *stats = stat_lines[STATS_DOMAIN_PROXY];
|
||||||
int ret = 1;
|
int ret = 1;
|
||||||
double secs;
|
double secs;
|
||||||
@ -1335,7 +1342,6 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
|||||||
secs = (double)sv->check.duration / 1000.0;
|
secs = (double)sv->check.duration / 1000.0;
|
||||||
val = mkf_flt(FN_DURATION, secs);
|
val = mkf_flt(FN_DURATION, secs);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case ST_I_PX_REQ_TOT:
|
case ST_I_PX_REQ_TOT:
|
||||||
if (px->mode != PR_MODE_HTTP) {
|
if (px->mode != PR_MODE_HTTP) {
|
||||||
sv = NULL;
|
sv = NULL;
|
||||||
@ -1358,36 +1364,6 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
|||||||
labels[lb_idx+1].value = promex_hrsp_code[ctx->field_num - ST_I_PX_HRSP_1XX];
|
labels[lb_idx+1].value = promex_hrsp_code[ctx->field_num - ST_I_PX_HRSP_1XX];
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case ST_I_PX_AGENT_STATUS:
|
|
||||||
if ((sv->agent.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) != CHK_ST_ENABLED)
|
|
||||||
goto next_sv;
|
|
||||||
|
|
||||||
for (; ctx->obj_state < HCHK_STATUS_SIZE; ctx->obj_state++) {
|
|
||||||
if (get_check_status_result(ctx->obj_state) < CHK_RES_FAILED)
|
|
||||||
continue;
|
|
||||||
val = mkf_u32(FO_STATUS, sv->agent.status == ctx->obj_state);
|
|
||||||
check_state = get_check_status_info(ctx->obj_state);
|
|
||||||
labels[lb_idx+1].name = ist("state");
|
|
||||||
labels[lb_idx+1].value = ist(check_state);
|
|
||||||
if (!promex_dump_ts(appctx, prefix, name, desc,
|
|
||||||
type,
|
|
||||||
&val, labels, &out, max))
|
|
||||||
goto full;
|
|
||||||
}
|
|
||||||
ctx->obj_state = 0;
|
|
||||||
goto next_sv;
|
|
||||||
case ST_I_PX_AGENT_CODE:
|
|
||||||
if ((sv->agent.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) != CHK_ST_ENABLED)
|
|
||||||
goto next_sv;
|
|
||||||
val = mkf_u32(FN_OUTPUT, (sv->agent.status < HCHK_STATUS_L57DATA) ? 0 : sv->agent.code);
|
|
||||||
break;
|
|
||||||
case ST_I_PX_AGENT_DURATION:
|
|
||||||
if (sv->agent.status < HCHK_STATUS_CHECKED)
|
|
||||||
goto next_sv;
|
|
||||||
secs = (double)sv->agent.duration / 1000.0;
|
|
||||||
val = mkf_flt(FN_DURATION, secs);
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1498,6 +1474,7 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
|||||||
if (out.len) {
|
if (out.len) {
|
||||||
if (!htx_add_data_atonce(htx, out))
|
if (!htx_add_data_atonce(htx, out))
|
||||||
return -1; /* Unexpected and unrecoverable error */
|
return -1; /* Unexpected and unrecoverable error */
|
||||||
|
channel_add_input(chn, out.len);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Decrement server refcount if it was saved through ctx.p[1]. */
|
/* Decrement server refcount if it was saved through ctx.p[1]. */
|
||||||
@ -1593,8 +1570,9 @@ static int promex_dump_ref_modules_metrics(struct appctx *appctx, struct htx *ht
|
|||||||
{
|
{
|
||||||
struct promex_ctx *ctx = appctx->svcctx;
|
struct promex_ctx *ctx = appctx->svcctx;
|
||||||
struct promex_module_ref *ref = ctx->p[0];
|
struct promex_module_ref *ref = ctx->p[0];
|
||||||
|
struct channel *chn = sc_ic(appctx_sc(appctx));
|
||||||
struct ist out = ist2(trash.area, 0);
|
struct ist out = ist2(trash.area, 0);
|
||||||
size_t max = htx_get_max_blksz(htx, applet_htx_output_room(appctx));
|
size_t max = htx_get_max_blksz(htx, channel_htx_recv_max(chn, htx));
|
||||||
int ret = 1;
|
int ret = 1;
|
||||||
|
|
||||||
if (!ref) {
|
if (!ref) {
|
||||||
@ -1618,6 +1596,7 @@ static int promex_dump_ref_modules_metrics(struct appctx *appctx, struct htx *ht
|
|||||||
if (out.len) {
|
if (out.len) {
|
||||||
if (!htx_add_data_atonce(htx, out))
|
if (!htx_add_data_atonce(htx, out))
|
||||||
return -1; /* Unexpected and unrecoverable error */
|
return -1; /* Unexpected and unrecoverable error */
|
||||||
|
channel_add_input(chn, out.len);
|
||||||
}
|
}
|
||||||
ctx->p[0] = ref;
|
ctx->p[0] = ref;
|
||||||
return ret;
|
return ret;
|
||||||
@ -1632,8 +1611,9 @@ static int promex_dump_all_modules_metrics(struct appctx *appctx, struct htx *ht
|
|||||||
{
|
{
|
||||||
struct promex_ctx *ctx = appctx->svcctx;
|
struct promex_ctx *ctx = appctx->svcctx;
|
||||||
struct promex_module *mod = ctx->p[0];
|
struct promex_module *mod = ctx->p[0];
|
||||||
|
struct channel *chn = sc_ic(appctx_sc(appctx));
|
||||||
struct ist out = ist2(trash.area, 0);
|
struct ist out = ist2(trash.area, 0);
|
||||||
size_t max = htx_get_max_blksz(htx, applet_htx_output_room(appctx));
|
size_t max = htx_get_max_blksz(htx, channel_htx_recv_max(chn, htx));
|
||||||
int ret = 1;
|
int ret = 1;
|
||||||
|
|
||||||
if (!mod) {
|
if (!mod) {
|
||||||
@ -1657,6 +1637,7 @@ static int promex_dump_all_modules_metrics(struct appctx *appctx, struct htx *ht
|
|||||||
if (out.len) {
|
if (out.len) {
|
||||||
if (!htx_add_data_atonce(htx, out))
|
if (!htx_add_data_atonce(htx, out))
|
||||||
return -1; /* Unexpected and unrecoverable error */
|
return -1; /* Unexpected and unrecoverable error */
|
||||||
|
channel_add_input(chn, out.len);
|
||||||
}
|
}
|
||||||
ctx->p[0] = mod;
|
ctx->p[0] = mod;
|
||||||
return ret;
|
return ret;
|
||||||
@ -1671,7 +1652,7 @@ static int promex_dump_all_modules_metrics(struct appctx *appctx, struct htx *ht
|
|||||||
* Uses <appctx.ctx.stats.px> as a pointer to the current proxy and <sv>/<li>
|
* Uses <appctx.ctx.stats.px> as a pointer to the current proxy and <sv>/<li>
|
||||||
* as pointers to the current server/listener respectively.
|
* as pointers to the current server/listener respectively.
|
||||||
*/
|
*/
|
||||||
static int promex_dump_metrics(struct appctx *appctx, struct htx *htx)
|
static int promex_dump_metrics(struct appctx *appctx, struct stconn *sc, struct htx *htx)
|
||||||
{
|
{
|
||||||
struct promex_ctx *ctx = appctx->svcctx;
|
struct promex_ctx *ctx = appctx->svcctx;
|
||||||
int ret;
|
int ret;
|
||||||
@ -1795,7 +1776,7 @@ static int promex_dump_metrics(struct appctx *appctx, struct htx *htx)
|
|||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
full:
|
full:
|
||||||
applet_have_more_data(appctx);
|
sc_need_room(sc, channel_htx_recv_max(sc_ic(appctx_sc(appctx)), htx) + 1);
|
||||||
return 0;
|
return 0;
|
||||||
error:
|
error:
|
||||||
/* unrecoverable error */
|
/* unrecoverable error */
|
||||||
@ -1808,11 +1789,12 @@ static int promex_dump_metrics(struct appctx *appctx, struct htx *htx)
|
|||||||
|
|
||||||
/* Parse the query string of request URI to filter the metrics. It returns 1 on
|
/* Parse the query string of request URI to filter the metrics. It returns 1 on
|
||||||
* success and -1 on error. */
|
* success and -1 on error. */
|
||||||
static int promex_parse_uri(struct appctx *appctx)
|
static int promex_parse_uri(struct appctx *appctx, struct stconn *sc)
|
||||||
{
|
{
|
||||||
struct promex_ctx *ctx = appctx->svcctx;
|
struct promex_ctx *ctx = appctx->svcctx;
|
||||||
struct buffer *outbuf;
|
struct channel *req = sc_oc(sc);
|
||||||
struct htx *req_htx;
|
struct channel *res = sc_ic(sc);
|
||||||
|
struct htx *req_htx, *res_htx;
|
||||||
struct htx_sl *sl;
|
struct htx_sl *sl;
|
||||||
char *p, *key, *value;
|
char *p, *key, *value;
|
||||||
const char *end;
|
const char *end;
|
||||||
@ -1822,13 +1804,10 @@ static int promex_parse_uri(struct appctx *appctx)
|
|||||||
int len;
|
int len;
|
||||||
|
|
||||||
/* Get the query-string */
|
/* Get the query-string */
|
||||||
req_htx = htxbuf(DISGUISE(applet_get_inbuf(appctx)));
|
req_htx = htxbuf(&req->buf);
|
||||||
sl = http_get_stline(req_htx);
|
sl = http_get_stline(req_htx);
|
||||||
if (!sl)
|
if (!sl)
|
||||||
goto bad_req_error;
|
goto error;
|
||||||
if (sl->info.req.meth == HTTP_METH_HEAD)
|
|
||||||
ctx->flags |= PROMEX_FL_BODYLESS_RESP;
|
|
||||||
|
|
||||||
p = http_find_param_list(HTX_SL_REQ_UPTR(sl), HTX_SL_REQ_ULEN(sl), '?');
|
p = http_find_param_list(HTX_SL_REQ_UPTR(sl), HTX_SL_REQ_ULEN(sl), '?');
|
||||||
if (!p)
|
if (!p)
|
||||||
goto end;
|
goto end;
|
||||||
@ -1861,27 +1840,27 @@ static int promex_parse_uri(struct appctx *appctx)
|
|||||||
*p = 0;
|
*p = 0;
|
||||||
len = url_decode(key, 1);
|
len = url_decode(key, 1);
|
||||||
if (len == -1)
|
if (len == -1)
|
||||||
goto bad_req_error;
|
goto error;
|
||||||
|
|
||||||
/* decode value */
|
/* decode value */
|
||||||
if (value) {
|
if (value) {
|
||||||
while (p < end && *p != '=' && *p != '&' && *p != '#')
|
while (p < end && *p != '=' && *p != '&' && *p != '#')
|
||||||
++p;
|
++p;
|
||||||
if (*p == '=')
|
if (*p == '=')
|
||||||
goto bad_req_error;
|
goto error;
|
||||||
if (*p == '&')
|
if (*p == '&')
|
||||||
*(p++) = 0;
|
*(p++) = 0;
|
||||||
else if (*p == '#')
|
else if (*p == '#')
|
||||||
*p = 0;
|
*p = 0;
|
||||||
len = url_decode(value, 1);
|
len = url_decode(value, 1);
|
||||||
if (len == -1)
|
if (len == -1)
|
||||||
goto bad_req_error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (strcmp(key, "scope") == 0) {
|
if (strcmp(key, "scope") == 0) {
|
||||||
default_scopes = 0; /* at least a scope defined, unset default scopes */
|
default_scopes = 0; /* at least a scope defined, unset default scopes */
|
||||||
if (!value)
|
if (!value)
|
||||||
goto bad_req_error;
|
goto error;
|
||||||
else if (*value == 0)
|
else if (*value == 0)
|
||||||
ctx->flags &= ~PROMEX_FL_SCOPE_ALL;
|
ctx->flags &= ~PROMEX_FL_SCOPE_ALL;
|
||||||
else if (*value == '*' && *(value+1) == 0)
|
else if (*value == '*' && *(value+1) == 0)
|
||||||
@ -1912,14 +1891,14 @@ static int promex_parse_uri(struct appctx *appctx)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!(ctx->flags & PROMEX_FL_SCOPE_MODULE))
|
if (!(ctx->flags & PROMEX_FL_SCOPE_MODULE))
|
||||||
goto bad_req_error;
|
goto error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (strcmp(key, "metrics") == 0) {
|
else if (strcmp(key, "metrics") == 0) {
|
||||||
struct ist args;
|
struct ist args;
|
||||||
|
|
||||||
if (!value)
|
if (!value)
|
||||||
goto bad_req_error;
|
goto error;
|
||||||
|
|
||||||
for (args = ist(value); istlen(args); args = istadv(istfind(args, ','), 1)) {
|
for (args = ist(value); istlen(args); args = istadv(istfind(args, ','), 1)) {
|
||||||
struct eb32_node *node;
|
struct eb32_node *node;
|
||||||
@ -1970,28 +1949,30 @@ static int promex_parse_uri(struct appctx *appctx)
|
|||||||
ctx->flags |= (default_scopes | default_metrics_filter);
|
ctx->flags |= (default_scopes | default_metrics_filter);
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
bad_req_error:
|
error:
|
||||||
err = &http_err_chunks[HTTP_ERR_400];
|
err = &http_err_chunks[HTTP_ERR_400];
|
||||||
goto error;
|
channel_erase(res);
|
||||||
|
res->buf.data = b_data(err);
|
||||||
|
memcpy(res->buf.area, b_head(err), b_data(err));
|
||||||
|
res_htx = htx_from_buf(&res->buf);
|
||||||
|
channel_add_input(res, res_htx->data);
|
||||||
|
return -1;
|
||||||
|
|
||||||
internal_error:
|
internal_error:
|
||||||
err = &http_err_chunks[HTTP_ERR_500];
|
err = &http_err_chunks[HTTP_ERR_400];
|
||||||
goto error;
|
channel_erase(res);
|
||||||
|
res->buf.data = b_data(err);
|
||||||
error:
|
memcpy(res->buf.area, b_head(err), b_data(err));
|
||||||
outbuf = DISGUISE(applet_get_outbuf(appctx));
|
res_htx = htx_from_buf(&res->buf);
|
||||||
b_reset(outbuf);
|
channel_add_input(res, res_htx->data);
|
||||||
outbuf->data = b_data(err);
|
|
||||||
memcpy(outbuf->area, b_head(err), b_data(err));
|
|
||||||
applet_set_eoi(appctx);
|
|
||||||
applet_set_eos(appctx);
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Send HTTP headers of the response. It returns 1 on success and 0 if <htx> is
|
/* Send HTTP headers of the response. It returns 1 on success and 0 if <htx> is
|
||||||
* full. */
|
* full. */
|
||||||
static int promex_send_headers(struct appctx *appctx, struct htx *htx)
|
static int promex_send_headers(struct appctx *appctx, struct stconn *sc, struct htx *htx)
|
||||||
{
|
{
|
||||||
|
struct channel *chn = sc_ic(sc);
|
||||||
struct htx_sl *sl;
|
struct htx_sl *sl;
|
||||||
unsigned int flags;
|
unsigned int flags;
|
||||||
|
|
||||||
@ -2006,10 +1987,11 @@ static int promex_send_headers(struct appctx *appctx, struct htx *htx)
|
|||||||
!htx_add_endof(htx, HTX_BLK_EOH))
|
!htx_add_endof(htx, HTX_BLK_EOH))
|
||||||
goto full;
|
goto full;
|
||||||
|
|
||||||
|
channel_add_input(chn, htx->data);
|
||||||
return 1;
|
return 1;
|
||||||
full:
|
full:
|
||||||
htx_reset(htx);
|
htx_reset(htx);
|
||||||
applet_have_more_data(appctx);
|
sc_need_room(sc, 0);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2063,51 +2045,52 @@ static void promex_appctx_release(struct appctx *appctx)
|
|||||||
/* The main I/O handler for the promex applet. */
|
/* The main I/O handler for the promex applet. */
|
||||||
static void promex_appctx_handle_io(struct appctx *appctx)
|
static void promex_appctx_handle_io(struct appctx *appctx)
|
||||||
{
|
{
|
||||||
struct promex_ctx *ctx = appctx->svcctx;
|
struct stconn *sc = appctx_sc(appctx);
|
||||||
struct buffer *outbuf;
|
struct stream *s = __sc_strm(sc);
|
||||||
struct htx *res_htx;
|
struct channel *req = sc_oc(sc);
|
||||||
|
struct channel *res = sc_ic(sc);
|
||||||
|
struct htx *req_htx, *res_htx;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (unlikely(applet_fl_test(appctx, APPCTX_FL_EOS|APPCTX_FL_ERROR)))
|
res_htx = htx_from_buf(&res->buf);
|
||||||
|
|
||||||
|
if (unlikely(se_fl_test(appctx->sedesc, (SE_FL_EOS|SE_FL_ERROR|SE_FL_SHR|SE_FL_SHW))))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/* Check if the input buffer is available. */
|
/* Check if the input buffer is available. */
|
||||||
outbuf = applet_get_outbuf(appctx);
|
if (!b_size(&res->buf)) {
|
||||||
if (outbuf == NULL) {
|
sc_need_room(sc, 0);
|
||||||
applet_have_more_data(appctx);
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
res_htx = htx_from_buf(outbuf);
|
|
||||||
|
|
||||||
switch (appctx->st0) {
|
switch (appctx->st0) {
|
||||||
case PROMEX_ST_INIT:
|
case PROMEX_ST_INIT:
|
||||||
if (!applet_get_inbuf(appctx) || !applet_htx_input_data(appctx)) {
|
if (!co_data(req)) {
|
||||||
applet_need_more_data(appctx);
|
applet_need_more_data(appctx);
|
||||||
break;
|
goto out;
|
||||||
}
|
}
|
||||||
|
ret = promex_parse_uri(appctx, sc);
|
||||||
ret = promex_parse_uri(appctx);
|
|
||||||
if (ret <= 0) {
|
if (ret <= 0) {
|
||||||
if (ret == -1)
|
if (ret == -1)
|
||||||
applet_set_error(appctx);
|
goto error;
|
||||||
break;
|
goto out;
|
||||||
}
|
}
|
||||||
appctx->st0 = PROMEX_ST_HEAD;
|
appctx->st0 = PROMEX_ST_HEAD;
|
||||||
appctx->st1 = PROMEX_DUMPER_INIT;
|
appctx->st1 = PROMEX_DUMPER_INIT;
|
||||||
__fallthrough;
|
__fallthrough;
|
||||||
|
|
||||||
case PROMEX_ST_HEAD:
|
case PROMEX_ST_HEAD:
|
||||||
if (!promex_send_headers(appctx, res_htx))
|
if (!promex_send_headers(appctx, sc, res_htx))
|
||||||
break;
|
goto out;
|
||||||
appctx->st0 = ((ctx->flags & PROMEX_FL_BODYLESS_RESP) ? PROMEX_ST_DONE : PROMEX_ST_DUMP);
|
appctx->st0 = ((s->txn->meth == HTTP_METH_HEAD) ? PROMEX_ST_DONE : PROMEX_ST_DUMP);
|
||||||
__fallthrough;
|
__fallthrough;
|
||||||
|
|
||||||
case PROMEX_ST_DUMP:
|
case PROMEX_ST_DUMP:
|
||||||
ret = promex_dump_metrics(appctx, res_htx);
|
ret = promex_dump_metrics(appctx, sc, res_htx);
|
||||||
if (ret <= 0) {
|
if (ret <= 0) {
|
||||||
if (ret == -1)
|
if (ret == -1)
|
||||||
applet_set_error(appctx);
|
goto error;
|
||||||
break;
|
goto out;
|
||||||
}
|
}
|
||||||
appctx->st0 = PROMEX_ST_DONE;
|
appctx->st0 = PROMEX_ST_DONE;
|
||||||
__fallthrough;
|
__fallthrough;
|
||||||
@ -2121,36 +2104,41 @@ static void promex_appctx_handle_io(struct appctx *appctx)
|
|||||||
*/
|
*/
|
||||||
if (htx_is_empty(res_htx)) {
|
if (htx_is_empty(res_htx)) {
|
||||||
if (!htx_add_endof(res_htx, HTX_BLK_EOT)) {
|
if (!htx_add_endof(res_htx, HTX_BLK_EOT)) {
|
||||||
applet_have_more_data(appctx);
|
sc_need_room(sc, sizeof(struct htx_blk) + 1);
|
||||||
break;
|
goto out;
|
||||||
}
|
}
|
||||||
|
channel_add_input(res, 1);
|
||||||
}
|
}
|
||||||
res_htx->flags |= HTX_FL_EOM;
|
res_htx->flags |= HTX_FL_EOM;
|
||||||
applet_set_eoi(appctx);
|
se_fl_set(appctx->sedesc, SE_FL_EOI);
|
||||||
appctx->st0 = PROMEX_ST_END;
|
appctx->st0 = PROMEX_ST_END;
|
||||||
__fallthrough;
|
__fallthrough;
|
||||||
|
|
||||||
case PROMEX_ST_END:
|
case PROMEX_ST_END:
|
||||||
applet_set_eos(appctx);
|
se_fl_set(appctx->sedesc, SE_FL_EOS);
|
||||||
}
|
}
|
||||||
|
|
||||||
htx_to_buf(res_htx, outbuf);
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
htx_to_buf(res_htx, &res->buf);
|
||||||
|
|
||||||
/* eat the whole request */
|
/* eat the whole request */
|
||||||
applet_reset_input(appctx);
|
if (co_data(req)) {
|
||||||
|
req_htx = htx_from_buf(&req->buf);
|
||||||
|
co_htx_skip(req, req_htx, co_data(req));
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
error:
|
||||||
|
se_fl_set(appctx->sedesc, SE_FL_ERROR);
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct applet promex_applet = {
|
struct applet promex_applet = {
|
||||||
.obj_type = OBJ_TYPE_APPLET,
|
.obj_type = OBJ_TYPE_APPLET,
|
||||||
.flags = APPLET_FL_NEW_API|APPLET_FL_HTX,
|
|
||||||
.name = "<PROMEX>", /* used for logging */
|
.name = "<PROMEX>", /* used for logging */
|
||||||
.init = promex_appctx_init,
|
.init = promex_appctx_init,
|
||||||
.release = promex_appctx_release,
|
.release = promex_appctx_release,
|
||||||
.fct = promex_appctx_handle_io,
|
.fct = promex_appctx_handle_io,
|
||||||
.rcv_buf = appctx_htx_rcv_buf,
|
|
||||||
.snd_buf = appctx_htx_snd_buf,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static enum act_parse_ret service_parse_prometheus_exporter(const char **args, int *cur_arg, struct proxy *px,
|
static enum act_parse_ret service_parse_prometheus_exporter(const char **args, int *cur_arg, struct proxy *px,
|
||||||
|
|||||||
@ -1,235 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# Dump certificates from the HAProxy stats or master socket to the filesystem
|
|
||||||
# Experimental script
|
|
||||||
#
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
export BASEPATH=${BASEPATH:-/etc/haproxy}/
|
|
||||||
export SOCKET=${SOCKET:-/var/run/haproxy-master.sock}
|
|
||||||
export DRY_RUN=0
|
|
||||||
export DEBUG=
|
|
||||||
export VERBOSE=
|
|
||||||
export M="@1 "
|
|
||||||
export TMP
|
|
||||||
|
|
||||||
vecho() {
|
|
||||||
|
|
||||||
[ -n "$VERBOSE" ] && echo "$@"
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
read_certificate() {
|
|
||||||
name=$1
|
|
||||||
crt_filename=
|
|
||||||
key_filename=
|
|
||||||
|
|
||||||
OFS=$IFS
|
|
||||||
IFS=":"
|
|
||||||
|
|
||||||
while read -r key value; do
|
|
||||||
case "$key" in
|
|
||||||
"Crt filename")
|
|
||||||
crt_filename="${value# }"
|
|
||||||
key_filename="${value# }"
|
|
||||||
;;
|
|
||||||
"Key filename")
|
|
||||||
key_filename="${value# }"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done < <(echo "${M}show ssl cert ${name}" | socat "${SOCKET}" -)
|
|
||||||
IFS=$OFS
|
|
||||||
|
|
||||||
if [ -z "$crt_filename" ] || [ -z "$key_filename" ]; then
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# handle fields without a crt-base/key-base
|
|
||||||
[ "${crt_filename:0:1}" != "/" ] && crt_filename="${BASEPATH}${crt_filename}"
|
|
||||||
[ "${key_filename:0:1}" != "/" ] && key_filename="${BASEPATH}${key_filename}"
|
|
||||||
|
|
||||||
vecho "name:$name"
|
|
||||||
vecho "crt:$crt_filename"
|
|
||||||
vecho "key:$key_filename"
|
|
||||||
|
|
||||||
export NAME="$name"
|
|
||||||
export CRT_FILENAME="$crt_filename"
|
|
||||||
export KEY_FILENAME="$key_filename"
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
cmp_certkey() {
|
|
||||||
prev=$1
|
|
||||||
new=$2
|
|
||||||
|
|
||||||
if [ ! -f "$prev" ]; then
|
|
||||||
return 1;
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! cmp -s <(openssl x509 -in "$prev" -noout -fingerprint -sha256) <(openssl x509 -in "$new" -noout -fingerprint -sha256); then
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
dump_certificate() {
|
|
||||||
name=$1
|
|
||||||
prev_crt=$2
|
|
||||||
prev_key=$3
|
|
||||||
r="tmp.${RANDOM}"
|
|
||||||
d="old.$(date +%s)"
|
|
||||||
new_crt="$TMP/$(basename "$prev_crt").${r}"
|
|
||||||
new_key="$TMP/$(basename "$prev_key").${r}"
|
|
||||||
|
|
||||||
if ! touch "${new_crt}" || ! touch "${new_key}"; then
|
|
||||||
echo "[ALERT] ($$) : can't dump \"$name\", can't create tmp files" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "${M}dump ssl cert ${name}" | socat "${SOCKET}" - | openssl pkey >> "${new_key}"
|
|
||||||
# use crl2pkcs7 as a way to dump multiple x509, storeutl could be used in modern versions of openssl
|
|
||||||
echo "${M}dump ssl cert ${name}" | socat "${SOCKET}" - | openssl crl2pkcs7 -nocrl -certfile /dev/stdin | openssl pkcs7 -print_certs >> "${new_crt}"
|
|
||||||
|
|
||||||
if ! cmp -s <(openssl x509 -in "${new_crt}" -pubkey -noout) <(openssl pkey -in "${new_key}" -pubout); then
|
|
||||||
echo "[ALERT] ($$) : Private key \"${new_key}\" and public key \"${new_crt}\" don't match" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if cmp_certkey "${prev_crt}" "${new_crt}"; then
|
|
||||||
echo "[NOTICE] ($$) : ${crt_filename} is already up to date" >&2
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# dry run will just return before trying to move the files
|
|
||||||
if [ "${DRY_RUN}" != "0" ]; then
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# move the current certificates to ".old.timestamp"
|
|
||||||
if [ -f "${prev_crt}" ] && [ -f "${prev_key}" ]; then
|
|
||||||
mv "${prev_crt}" "${prev_crt}.${d}"
|
|
||||||
[ "${prev_crt}" != "${prev_key}" ] && mv "${prev_key}" "${prev_key}.${d}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# move the new certificates to old place
|
|
||||||
mv "${new_crt}" "${prev_crt}"
|
|
||||||
[ "${prev_crt}" != "${prev_key}" ] && mv "${new_key}" "${prev_key}"
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
dump_all_certificates() {
|
|
||||||
echo "${M}show ssl cert" | socat "${SOCKET}" - | grep -v '^#' | grep -v '^$' | while read -r line; do
|
|
||||||
export NAME
|
|
||||||
export CRT_FILENAME
|
|
||||||
export KEY_FILENAME
|
|
||||||
|
|
||||||
if read_certificate "$line"; then
|
|
||||||
dump_certificate "$NAME" "$CRT_FILENAME" "$KEY_FILENAME"
|
|
||||||
else
|
|
||||||
echo "[WARNING] ($$) : can't dump \"$name\", crt/key filename details not found in \"show ssl cert\"" >&2
|
|
||||||
fi
|
|
||||||
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
usage() {
|
|
||||||
echo "Usage:"
|
|
||||||
echo " $0 [options]* [cert]*"
|
|
||||||
echo ""
|
|
||||||
echo " Dump certificates from the HAProxy stats or master socket to the filesystem"
|
|
||||||
echo " Require socat and openssl"
|
|
||||||
echo " EXPERIMENTAL script, backup your files!"
|
|
||||||
echo " The script will move your previous files to FILE.old.unixtimestamp (ex: foo.com.pem.old.1759044998)"
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "Options:"
|
|
||||||
echo " -S, --master-socket <path> Use the master socket at <path> (default: ${SOCKET})"
|
|
||||||
echo " -s, --socket <path> Use the stats socket at <path>"
|
|
||||||
echo " -p, --path <path> Specifiy a base path for relative files (default: ${BASEPATH})"
|
|
||||||
echo " -n, --dry-run Read certificates on the socket but don't dump them"
|
|
||||||
echo " -d, --debug Debug mode, set -x"
|
|
||||||
echo " -v, --verbose Verbose mode"
|
|
||||||
echo " -h, --help This help"
|
|
||||||
echo " -- End of options"
|
|
||||||
echo ""
|
|
||||||
echo "Examples:"
|
|
||||||
echo " $0 -v -p ${BASEPATH} -S ${SOCKET}"
|
|
||||||
echo " $0 -v -p ${BASEPATH} -S ${SOCKET} bar.com.rsa.pem"
|
|
||||||
echo " $0 -v -p ${BASEPATH} -S ${SOCKET} -- foo.com.ecdsa.pem bar.com.rsa.pem"
|
|
||||||
}
|
|
||||||
|
|
||||||
main() {
|
|
||||||
while [ -n "$1" ]; do
|
|
||||||
case "$1" in
|
|
||||||
-S|--master-socket)
|
|
||||||
SOCKET="$2"
|
|
||||||
M="@1 "
|
|
||||||
shift 2
|
|
||||||
;;
|
|
||||||
-s|--socket)
|
|
||||||
SOCKET="$2"
|
|
||||||
M=
|
|
||||||
shift 2
|
|
||||||
;;
|
|
||||||
-p|--path)
|
|
||||||
BASEPATH="$2/"
|
|
||||||
shift 2
|
|
||||||
;;
|
|
||||||
-n|--dry-run)
|
|
||||||
DRY_RUN=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-d|--debug)
|
|
||||||
DEBUG=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-v|--verbose)
|
|
||||||
VERBOSE=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-h|--help)
|
|
||||||
usage "$@"
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
--)
|
|
||||||
shift
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
-*)
|
|
||||||
echo "[ALERT] ($$) : Unknown option '$1'" >&2
|
|
||||||
usage "$@"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ -n "$DEBUG" ]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
|
|
||||||
TMP=${TMP:-$(mktemp -d)}
|
|
||||||
|
|
||||||
if [ -z "$1" ]; then
|
|
||||||
dump_all_certificates
|
|
||||||
else
|
|
||||||
# compute the certificates names at the end of the command
|
|
||||||
while [ -n "$1" ]; do
|
|
||||||
if ! read_certificate "$1"; then
|
|
||||||
echo "[ALERT] ($$) : can't dump \"$1\", crt/key filename details not found in \"show ssl cert\"" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
[ "${DRY_RUN}" = "0" ] && dump_certificate "$NAME" "$CRT_FILENAME" "$KEY_FILENAME"
|
|
||||||
shift
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
trap 'rm -rf -- "$TMP"' EXIT
|
|
||||||
main "$@"
|
|
||||||
@ -1,113 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
export VERBOSE=1
|
|
||||||
export TIMEOUT=90
|
|
||||||
export MASTER_SOCKET=${MASTER_SOCKET:-/var/run/haproxy-master.sock}
|
|
||||||
export RET=
|
|
||||||
|
|
||||||
alert() {
|
|
||||||
if [ "$VERBOSE" -ge "1" ]; then
|
|
||||||
echo "[ALERT] $*" >&2
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
reload() {
|
|
||||||
while read -r line; do
|
|
||||||
|
|
||||||
if [ "$line" = "Success=0" ]; then
|
|
||||||
RET=1
|
|
||||||
elif [ "$line" = "Success=1" ]; then
|
|
||||||
RET=0
|
|
||||||
elif [ "$line" = "Another reload is still in progress." ]; then
|
|
||||||
alert "$line"
|
|
||||||
elif [ "$line" = "--" ]; then
|
|
||||||
continue;
|
|
||||||
else
|
|
||||||
if [ "$RET" = 1 ] && [ "$VERBOSE" = "2" ]; then
|
|
||||||
echo "$line" >&2
|
|
||||||
elif [ "$VERBOSE" = "3" ]; then
|
|
||||||
echo "$line" >&2
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
done < <(echo "reload" | socat -t"${TIMEOUT}" "${MASTER_SOCKET}" -)
|
|
||||||
|
|
||||||
if [ -z "$RET" ]; then
|
|
||||||
alert "Couldn't finish the reload before the timeout (${TIMEOUT})."
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
return "$RET"
|
|
||||||
}
|
|
||||||
|
|
||||||
usage() {
|
|
||||||
echo "Usage:"
|
|
||||||
echo " $0 [options]*"
|
|
||||||
echo ""
|
|
||||||
echo " Trigger a reload from the master socket"
|
|
||||||
echo " Require socat"
|
|
||||||
echo " EXPERIMENTAL script!"
|
|
||||||
echo ""
|
|
||||||
echo "Options:"
|
|
||||||
echo " -S, --master-socket <path> Use the master socket at <path> (default: ${MASTER_SOCKET})"
|
|
||||||
echo " -d, --debug Debug mode, set -x"
|
|
||||||
echo " -t, --timeout Timeout (socat -t) (default: ${TIMEOUT})"
|
|
||||||
echo " -s, --silent Silent mode (no output)"
|
|
||||||
echo " -v, --verbose Verbose output (output from haproxy on failure)"
|
|
||||||
echo " -vv Even more verbose output (output from haproxy on success and failure)"
|
|
||||||
echo " -h, --help This help"
|
|
||||||
echo ""
|
|
||||||
echo "Examples:"
|
|
||||||
echo " $0 -S ${MASTER_SOCKET} -d ${TIMEOUT}"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
main() {
|
|
||||||
while [ -n "$1" ]; do
|
|
||||||
case "$1" in
|
|
||||||
-S|--master-socket)
|
|
||||||
MASTER_SOCKET="$2"
|
|
||||||
shift 2
|
|
||||||
;;
|
|
||||||
-t|--timeout)
|
|
||||||
TIMEOUT="$2"
|
|
||||||
shift 2
|
|
||||||
;;
|
|
||||||
-s|--silent)
|
|
||||||
VERBOSE=0
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-v|--verbose)
|
|
||||||
VERBOSE=2
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-vv|--verbose)
|
|
||||||
VERBOSE=3
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-d|--debug)
|
|
||||||
DEBUG=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-h|--help)
|
|
||||||
usage "$@"
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "[ALERT] ($$) : Unknown option '$1'" >&2
|
|
||||||
usage "$@"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ -n "$DEBUG" ]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
main "$@"
|
|
||||||
reload
|
|
||||||
@ -123,22 +123,6 @@ struct url_stat {
|
|||||||
#define FILT2_PRESERVE_QUERY 0x02
|
#define FILT2_PRESERVE_QUERY 0x02
|
||||||
#define FILT2_EXTRACT_CAPTURE 0x04
|
#define FILT2_EXTRACT_CAPTURE 0x04
|
||||||
|
|
||||||
#define FILT_OUTPUT_FMT (FILT_COUNT_ONLY| \
|
|
||||||
FILT_COUNT_STATUS| \
|
|
||||||
FILT_COUNT_SRV_STATUS| \
|
|
||||||
FILT_COUNT_COOK_CODES| \
|
|
||||||
FILT_COUNT_TERM_CODES| \
|
|
||||||
FILT_COUNT_URL_ONLY| \
|
|
||||||
FILT_COUNT_URL_COUNT| \
|
|
||||||
FILT_COUNT_URL_ERR| \
|
|
||||||
FILT_COUNT_URL_TAVG| \
|
|
||||||
FILT_COUNT_URL_TTOT| \
|
|
||||||
FILT_COUNT_URL_TAVGO| \
|
|
||||||
FILT_COUNT_URL_TTOTO| \
|
|
||||||
FILT_COUNT_URL_BAVG| \
|
|
||||||
FILT_COUNT_URL_BTOT| \
|
|
||||||
FILT_COUNT_IP_COUNT)
|
|
||||||
|
|
||||||
unsigned int filter = 0;
|
unsigned int filter = 0;
|
||||||
unsigned int filter2 = 0;
|
unsigned int filter2 = 0;
|
||||||
unsigned int filter_invert = 0;
|
unsigned int filter_invert = 0;
|
||||||
@ -208,7 +192,7 @@ void help()
|
|||||||
" you can also use -n to start from earlier then field %d\n"
|
" you can also use -n to start from earlier then field %d\n"
|
||||||
" -query preserve the query string for per-URL (-u*) statistics\n"
|
" -query preserve the query string for per-URL (-u*) statistics\n"
|
||||||
"\n"
|
"\n"
|
||||||
"Output format - **only one** may be used at a time\n"
|
"Output format - only one may be used at a time\n"
|
||||||
" -c only report the number of lines that would have been printed\n"
|
" -c only report the number of lines that would have been printed\n"
|
||||||
" -pct output connect and response times percentiles\n"
|
" -pct output connect and response times percentiles\n"
|
||||||
" -st output number of requests per HTTP status code\n"
|
" -st output number of requests per HTTP status code\n"
|
||||||
@ -914,9 +898,6 @@ int main(int argc, char **argv)
|
|||||||
if (!filter && !filter2)
|
if (!filter && !filter2)
|
||||||
die("No action specified.\n");
|
die("No action specified.\n");
|
||||||
|
|
||||||
if ((filter & FILT_OUTPUT_FMT) & ((filter & FILT_OUTPUT_FMT) - 1))
|
|
||||||
die("Please, set only one output filter.\n");
|
|
||||||
|
|
||||||
if (filter & FILT_ACC_COUNT && !filter_acc_count)
|
if (filter & FILT_ACC_COUNT && !filter_acc_count)
|
||||||
filter_acc_count=1;
|
filter_acc_count=1;
|
||||||
|
|
||||||
@ -1571,10 +1552,6 @@ void filter_count_srv_status(const char *accept_field, const char *time_field, s
|
|||||||
if (!srv_node) {
|
if (!srv_node) {
|
||||||
/* server not yet in the tree, let's create it */
|
/* server not yet in the tree, let's create it */
|
||||||
srv = (void *)calloc(1, sizeof(struct srv_st) + e - b + 1);
|
srv = (void *)calloc(1, sizeof(struct srv_st) + e - b + 1);
|
||||||
if (unlikely(!srv)) {
|
|
||||||
fprintf(stderr, "%s: not enough memory\n", __FUNCTION__);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
srv_node = &srv->node;
|
srv_node = &srv->node;
|
||||||
memcpy(&srv_node->key, b, e - b);
|
memcpy(&srv_node->key, b, e - b);
|
||||||
srv_node->key[e - b] = '\0';
|
srv_node->key[e - b] = '\0';
|
||||||
@ -1684,10 +1661,6 @@ void filter_count_url(const char *accept_field, const char *time_field, struct t
|
|||||||
*/
|
*/
|
||||||
if (unlikely(!ustat))
|
if (unlikely(!ustat))
|
||||||
ustat = calloc(1, sizeof(*ustat));
|
ustat = calloc(1, sizeof(*ustat));
|
||||||
if (unlikely(!ustat)) {
|
|
||||||
fprintf(stderr, "%s: not enough memory\n", __FUNCTION__);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
ustat->nb_err = err;
|
ustat->nb_err = err;
|
||||||
ustat->nb_req = 1;
|
ustat->nb_req = 1;
|
||||||
|
|||||||
@ -6,9 +6,9 @@ Wants=network-online.target
|
|||||||
[Service]
|
[Service]
|
||||||
EnvironmentFile=-/etc/default/haproxy
|
EnvironmentFile=-/etc/default/haproxy
|
||||||
EnvironmentFile=-/etc/sysconfig/haproxy
|
EnvironmentFile=-/etc/sysconfig/haproxy
|
||||||
Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/run/haproxy.pid" "CFGDIR=/etc/haproxy/conf.d" "EXTRAOPTS=-S /run/haproxy-master.sock"
|
Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/run/haproxy.pid" "EXTRAOPTS=-S /run/haproxy-master.sock"
|
||||||
ExecStart=@SBINDIR@/haproxy -Ws -f $CONFIG -f $CFGDIR -p $PIDFILE $EXTRAOPTS
|
ExecStart=@SBINDIR@/haproxy -Ws -f $CONFIG -p $PIDFILE $EXTRAOPTS
|
||||||
ExecReload=@SBINDIR@/haproxy -Ws -f $CONFIG -f $CFGDIR -c $EXTRAOPTS
|
ExecReload=@SBINDIR@/haproxy -Ws -f $CONFIG -c $EXTRAOPTS
|
||||||
ExecReload=/bin/kill -USR2 $MAINPID
|
ExecReload=/bin/kill -USR2 $MAINPID
|
||||||
KillMode=mixed
|
KillMode=mixed
|
||||||
Restart=always
|
Restart=always
|
||||||
|
|||||||
@ -1,19 +0,0 @@
|
|||||||
# show non-null memprofile entries with method, alloc/free counts/tot and caller
|
|
||||||
|
|
||||||
define memprof_dump
|
|
||||||
set $i = 0
|
|
||||||
set $meth={ "UNKN", "MALL", "CALL", "REAL", "STRD", "FREE", "P_AL", "P_FR", "STND", "VALL", "ALAL", "PALG", "MALG", "PVAL" }
|
|
||||||
while $i < sizeof(memprof_stats) / sizeof(memprof_stats[0])
|
|
||||||
if memprof_stats[$i].alloc_calls || memprof_stats[$i].free_calls
|
|
||||||
set $m = memprof_stats[$i].method
|
|
||||||
printf "m:%s ac:%u fc:%u at:%u ft:%u ", $meth[$m], \
|
|
||||||
memprof_stats[$i].alloc_calls, memprof_stats[$i].free_calls, \
|
|
||||||
memprof_stats[$i].alloc_tot, memprof_stats[$i].free_tot
|
|
||||||
output/a memprof_stats[$i].caller
|
|
||||||
printf "\n"
|
|
||||||
end
|
|
||||||
set $i = $i + 1
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
|
|
||||||
@ -59,9 +59,9 @@ struct ring_v2 {
|
|||||||
struct ring_v2a {
|
struct ring_v2a {
|
||||||
size_t size; // storage size
|
size_t size; // storage size
|
||||||
size_t rsvd; // header length (used for file-backed maps)
|
size_t rsvd; // header length (used for file-backed maps)
|
||||||
size_t tail ALIGNED(64); // storage tail
|
size_t tail __attribute__((aligned(64))); // storage tail
|
||||||
size_t head ALIGNED(64); // storage head
|
size_t head __attribute__((aligned(64))); // storage head
|
||||||
char area[0] ALIGNED(64); // storage area begins immediately here
|
char area[0] __attribute__((aligned(64))); // storage area begins immediately here
|
||||||
};
|
};
|
||||||
|
|
||||||
/* display the message and exit with the code */
|
/* display the message and exit with the code */
|
||||||
|
|||||||
@ -1,70 +0,0 @@
|
|||||||
BEGININPUT
|
|
||||||
BEGINCONTEXT
|
|
||||||
|
|
||||||
HAProxy's development cycle consists in one development branch, and multiple
|
|
||||||
maintenance branches.
|
|
||||||
|
|
||||||
All the development is made into the development branch exclusively. This
|
|
||||||
includes mostly new features, doc updates, cleanups and or course, fixes.
|
|
||||||
|
|
||||||
The maintenance branches, also called stable branches, never see any
|
|
||||||
development, and only receive ultra-safe fixes for bugs that affect them,
|
|
||||||
that are picked from the development branch.
|
|
||||||
|
|
||||||
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
|
|
||||||
release, the development branch enters maintenance and a new development branch
|
|
||||||
is created with a new, higher version. The current development branch is
|
|
||||||
3.3-dev, and maintenance branches are 3.2 and below.
|
|
||||||
|
|
||||||
Fixes created in the development branch for issues that were introduced in an
|
|
||||||
earlier branch are applied in descending order to each and every version till
|
|
||||||
that branch that introduced the issue: 3.2 first, then 3.1, then 3.0, then 2.9
|
|
||||||
and so on. This operation is called "backporting". A fix for an issue is never
|
|
||||||
backported beyond the branch that introduced the issue. An important point is
|
|
||||||
that the project maintainers really aim at zero regression in maintenance
|
|
||||||
branches, so they're never willing to take any risk backporting patches that
|
|
||||||
are not deemed strictly necessary.
|
|
||||||
|
|
||||||
Fixes consist of patches managed using the Git version control tool and are
|
|
||||||
identified by a Git commit ID and a commit message. For this reason we
|
|
||||||
indistinctly talk about backporting fixes, commits, or patches; all mean the
|
|
||||||
same thing. When mentioning commit IDs, developers always use a short form
|
|
||||||
made of the first 8 characters only, and expect the AI assistant to do the
|
|
||||||
same.
|
|
||||||
|
|
||||||
It seldom happens that some fixes depend on changes that were brought by other
|
|
||||||
patches that were not in some branches and that will need to be backported as
|
|
||||||
well for the fix to work. In this case, such information is explicitly provided
|
|
||||||
in the commit message by the patch's author in natural language.
|
|
||||||
|
|
||||||
Developers are serious and always indicate if a patch needs to be backported.
|
|
||||||
Sometimes they omit the exact target branch, or they will say that the patch is
|
|
||||||
"needed" in some older branch, but it means the same. If a commit message
|
|
||||||
doesn't mention any backport instructions, it means that the commit does not
|
|
||||||
have to be backported. And patches that are not strictly bug fixes nor doc
|
|
||||||
improvements are normally not backported. For example, fixes for design
|
|
||||||
limitations, architectural improvements and performance optimizations are
|
|
||||||
considered too risky for a backport. Finally, all bug fixes are tagged as
|
|
||||||
"BUG" at the beginning of their subject line. Patches that are not tagged as
|
|
||||||
such are not bugs, and must never be backported unless their commit message
|
|
||||||
explicitly requests so.
|
|
||||||
|
|
||||||
ENDCONTEXT
|
|
||||||
|
|
||||||
A developer is reviewing the development branch, trying to spot which commits
|
|
||||||
need to be backported to maintenance branches. This person is already expert
|
|
||||||
on HAProxy and everything related to Git, patch management, and the risks
|
|
||||||
associated with backports, so he doesn't want to be told how to proceed nor to
|
|
||||||
review the contents of the patch.
|
|
||||||
|
|
||||||
The goal for this developer is to get some help from the AI assistant to save
|
|
||||||
some precious time on this tedious review work. In order to do a better job, he
|
|
||||||
needs an accurate summary of the information and instructions found in each
|
|
||||||
commit message. Specifically he needs to figure if the patch fixes a problem
|
|
||||||
affecting an older branch or not, if it needs to be backported, if so to which
|
|
||||||
branches, and if other patches need to be backported along with it.
|
|
||||||
|
|
||||||
The indented text block below after an "id" line and starting with a Subject line
|
|
||||||
is a commit message from the HAProxy development branch that describes a patch
|
|
||||||
applied to that branch, starting with its subject line, please read it carefully.
|
|
||||||
|
|
||||||
@ -1,29 +0,0 @@
|
|||||||
|
|
||||||
ENDINPUT
|
|
||||||
BEGININSTRUCTION
|
|
||||||
|
|
||||||
You are an AI assistant that follows instruction extremely well. Help as much
|
|
||||||
as you can, responding to a single question using a single response.
|
|
||||||
|
|
||||||
The developer wants to know if he needs to backport the patch above to fix
|
|
||||||
maintenance branches, for which branches, and what possible dependencies might
|
|
||||||
be mentioned in the commit message. Carefully study the commit message and its
|
|
||||||
backporting instructions if any (otherwise it should probably not be backported),
|
|
||||||
then provide a very concise and short summary that will help the developer decide
|
|
||||||
to backport it, or simply to skip it.
|
|
||||||
|
|
||||||
Start by explaining in one or two sentences what you recommend for this one and why.
|
|
||||||
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
|
|
||||||
where X is a single word among:
|
|
||||||
- "yes", if you recommend to backport the patch right now either because
|
|
||||||
it explicitly states this or because it's a fix for a bug that affects
|
|
||||||
a maintenance branch (3.2 or lower);
|
|
||||||
- "wait", if this patch explicitly mentions that it must be backported, but
|
|
||||||
only after waiting some time.
|
|
||||||
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
|
|
||||||
lack of explicit backport instructions, or it's just an improvement);
|
|
||||||
- "uncertain" otherwise for cases not covered above
|
|
||||||
|
|
||||||
ENDINSTRUCTION
|
|
||||||
|
|
||||||
Explanation:
|
|
||||||
@ -1,70 +0,0 @@
|
|||||||
BEGININPUT
|
|
||||||
BEGINCONTEXT
|
|
||||||
|
|
||||||
HAProxy's development cycle consists in one development branch, and multiple
|
|
||||||
maintenance branches.
|
|
||||||
|
|
||||||
All the development is made into the development branch exclusively. This
|
|
||||||
includes mostly new features, doc updates, cleanups and or course, fixes.
|
|
||||||
|
|
||||||
The maintenance branches, also called stable branches, never see any
|
|
||||||
development, and only receive ultra-safe fixes for bugs that affect them,
|
|
||||||
that are picked from the development branch.
|
|
||||||
|
|
||||||
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
|
|
||||||
release, the development branch enters maintenance and a new development branch
|
|
||||||
is created with a new, higher version. The current development branch is
|
|
||||||
3.4-dev, and maintenance branches are 3.3 and below.
|
|
||||||
|
|
||||||
Fixes created in the development branch for issues that were introduced in an
|
|
||||||
earlier branch are applied in descending order to each and every version till
|
|
||||||
that branch that introduced the issue: 3.3 first, then 3.2, then 3.1, then 3.0
|
|
||||||
and so on. This operation is called "backporting". A fix for an issue is never
|
|
||||||
backported beyond the branch that introduced the issue. An important point is
|
|
||||||
that the project maintainers really aim at zero regression in maintenance
|
|
||||||
branches, so they're never willing to take any risk backporting patches that
|
|
||||||
are not deemed strictly necessary.
|
|
||||||
|
|
||||||
Fixes consist of patches managed using the Git version control tool and are
|
|
||||||
identified by a Git commit ID and a commit message. For this reason we
|
|
||||||
indistinctly talk about backporting fixes, commits, or patches; all mean the
|
|
||||||
same thing. When mentioning commit IDs, developers always use a short form
|
|
||||||
made of the first 8 characters only, and expect the AI assistant to do the
|
|
||||||
same.
|
|
||||||
|
|
||||||
It seldom happens that some fixes depend on changes that were brought by other
|
|
||||||
patches that were not in some branches and that will need to be backported as
|
|
||||||
well for the fix to work. In this case, such information is explicitly provided
|
|
||||||
in the commit message by the patch's author in natural language.
|
|
||||||
|
|
||||||
Developers are serious and always indicate if a patch needs to be backported.
|
|
||||||
Sometimes they omit the exact target branch, or they will say that the patch is
|
|
||||||
"needed" in some older branch, but it means the same. If a commit message
|
|
||||||
doesn't mention any backport instructions, it means that the commit does not
|
|
||||||
have to be backported. And patches that are not strictly bug fixes nor doc
|
|
||||||
improvements are normally not backported. For example, fixes for design
|
|
||||||
limitations, architectural improvements and performance optimizations are
|
|
||||||
considered too risky for a backport. Finally, all bug fixes are tagged as
|
|
||||||
"BUG" at the beginning of their subject line. Patches that are not tagged as
|
|
||||||
such are not bugs, and must never be backported unless their commit message
|
|
||||||
explicitly requests so.
|
|
||||||
|
|
||||||
ENDCONTEXT
|
|
||||||
|
|
||||||
A developer is reviewing the development branch, trying to spot which commits
|
|
||||||
need to be backported to maintenance branches. This person is already expert
|
|
||||||
on HAProxy and everything related to Git, patch management, and the risks
|
|
||||||
associated with backports, so he doesn't want to be told how to proceed nor to
|
|
||||||
review the contents of the patch.
|
|
||||||
|
|
||||||
The goal for this developer is to get some help from the AI assistant to save
|
|
||||||
some precious time on this tedious review work. In order to do a better job, he
|
|
||||||
needs an accurate summary of the information and instructions found in each
|
|
||||||
commit message. Specifically he needs to figure if the patch fixes a problem
|
|
||||||
affecting an older branch or not, if it needs to be backported, if so to which
|
|
||||||
branches, and if other patches need to be backported along with it.
|
|
||||||
|
|
||||||
The indented text block below after an "id" line and starting with a Subject line
|
|
||||||
is a commit message from the HAProxy development branch that describes a patch
|
|
||||||
applied to that branch, starting with its subject line, please read it carefully.
|
|
||||||
|
|
||||||
@ -1,29 +0,0 @@
|
|||||||
|
|
||||||
ENDINPUT
|
|
||||||
BEGININSTRUCTION
|
|
||||||
|
|
||||||
You are an AI assistant that follows instruction extremely well. Help as much
|
|
||||||
as you can, responding to a single question using a single response.
|
|
||||||
|
|
||||||
The developer wants to know if he needs to backport the patch above to fix
|
|
||||||
maintenance branches, for which branches, and what possible dependencies might
|
|
||||||
be mentioned in the commit message. Carefully study the commit message and its
|
|
||||||
backporting instructions if any (otherwise it should probably not be backported),
|
|
||||||
then provide a very concise and short summary that will help the developer decide
|
|
||||||
to backport it, or simply to skip it.
|
|
||||||
|
|
||||||
Start by explaining in one or two sentences what you recommend for this one and why.
|
|
||||||
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
|
|
||||||
where X is a single word among:
|
|
||||||
- "yes", if you recommend to backport the patch right now either because
|
|
||||||
it explicitly states this or because it's a fix for a bug that affects
|
|
||||||
a maintenance branch (3.3 or lower);
|
|
||||||
- "wait", if this patch explicitly mentions that it must be backported, but
|
|
||||||
only after waiting some time.
|
|
||||||
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
|
|
||||||
lack of explicit backport instructions, or it's just an improvement);
|
|
||||||
- "uncertain" otherwise for cases not covered above
|
|
||||||
|
|
||||||
ENDINSTRUCTION
|
|
||||||
|
|
||||||
Explanation:
|
|
||||||
@ -22,8 +22,7 @@ STABLE=$(cd "$HAPROXY_DIR" && git describe --tags "v${BRANCH}-dev0^" |cut -f1,2
|
|||||||
PATCHES_DIR="$PATCHES_PFX"-"$BRANCH"
|
PATCHES_DIR="$PATCHES_PFX"-"$BRANCH"
|
||||||
|
|
||||||
(cd "$HAPROXY_DIR"
|
(cd "$HAPROXY_DIR"
|
||||||
# avoid git pull, it chokes on forced push
|
git pull
|
||||||
git remote update origin; git reset origin/master;git checkout -f
|
|
||||||
last_file=$(ls -1 "$PATCHES_DIR"/*.patch 2>/dev/null | tail -n1)
|
last_file=$(ls -1 "$PATCHES_DIR"/*.patch 2>/dev/null | tail -n1)
|
||||||
if [ -n "$last_file" ]; then
|
if [ -n "$last_file" ]; then
|
||||||
restart=$(head -n1 "$last_file" | cut -f2 -d' ')
|
restart=$(head -n1 "$last_file" | cut -f2 -d' ')
|
||||||
|
|||||||
BIN
dev/phash/a.out
Executable file
BIN
dev/phash/a.out
Executable file
Binary file not shown.
@ -3,9 +3,7 @@ DeviceAtlas Device Detection
|
|||||||
|
|
||||||
In order to add DeviceAtlas Device Detection support, you would need to download
|
In order to add DeviceAtlas Device Detection support, you would need to download
|
||||||
the API source code from https://deviceatlas.com/deviceatlas-haproxy-module.
|
the API source code from https://deviceatlas.com/deviceatlas-haproxy-module.
|
||||||
Once extracted, two modes are supported :
|
Once extracted :
|
||||||
|
|
||||||
1/ Build HAProxy and DeviceAtlas in one command
|
|
||||||
|
|
||||||
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=<path to the API root folder>
|
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=<path to the API root folder>
|
||||||
|
|
||||||
@ -16,6 +14,10 @@ directory. Also, in the case the api cache support is not needed and/or a C++ to
|
|||||||
|
|
||||||
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=<path to the API root folder> DEVICEATLAS_NOCACHE=1
|
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=<path to the API root folder> DEVICEATLAS_NOCACHE=1
|
||||||
|
|
||||||
|
However, if the API had been installed beforehand, DEVICEATLAS_SRC
|
||||||
|
can be omitted. Note that the DeviceAtlas C API version supported is from the 3.x
|
||||||
|
releases series (3.2.1 minimum recommended).
|
||||||
|
|
||||||
For HAProxy developers who need to verify that their changes didn't accidentally
|
For HAProxy developers who need to verify that their changes didn't accidentally
|
||||||
break the DeviceAtlas code, it is possible to build a dummy library provided in
|
break the DeviceAtlas code, it is possible to build a dummy library provided in
|
||||||
the addons/deviceatlas/dummy directory and to use it as an alternative for the
|
the addons/deviceatlas/dummy directory and to use it as an alternative for the
|
||||||
@ -25,29 +27,6 @@ validate API changes :
|
|||||||
|
|
||||||
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=$PWD/addons/deviceatlas/dummy
|
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=$PWD/addons/deviceatlas/dummy
|
||||||
|
|
||||||
2/ Build and install DeviceAtlas according to https://docs.deviceatlas.com/apis/enterprise/c/<release version>/README.html
|
|
||||||
|
|
||||||
For example :
|
|
||||||
In the deviceatlas library folder :
|
|
||||||
$ cmake .
|
|
||||||
$ make
|
|
||||||
$ sudo make install
|
|
||||||
|
|
||||||
In the HAProxy folder :
|
|
||||||
$ make TARGET=<target> USE_DEVICEATLAS=1
|
|
||||||
|
|
||||||
Note that if the -DCMAKE_INSTALL_PREFIX cmake option had been used, it is necessary to set as well DEVICEATLAS_LIB and
|
|
||||||
DEVICEATLAS_INC as follow :
|
|
||||||
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_INC=<CMAKE_INSTALL_PREFIX value>/include DEVICEATLAS_LIB=<CMAKE_INSTALL_PREFIX value>/lib
|
|
||||||
|
|
||||||
For example :
|
|
||||||
$ cmake -DCMAKE_INSTALL_PREFIX=/opt/local
|
|
||||||
$ make
|
|
||||||
$ sudo make install
|
|
||||||
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_INC=/opt/local/include DEVICEATLAS_LIB=/opt/local/lib
|
|
||||||
|
|
||||||
Note that DEVICEATLAS_SRC is omitted in this case.
|
|
||||||
|
|
||||||
These are supported DeviceAtlas directives (see doc/configuration.txt) :
|
These are supported DeviceAtlas directives (see doc/configuration.txt) :
|
||||||
- deviceatlas-json-file <path to the DeviceAtlas JSON data file>.
|
- deviceatlas-json-file <path to the DeviceAtlas JSON data file>.
|
||||||
- deviceatlas-log-level <number> (0 to 3, level of information returned by
|
- deviceatlas-log-level <number> (0 to 3, level of information returned by
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -1,86 +0,0 @@
|
|||||||
2025-08-13 - Memory allocation in HAProxy 3.3
|
|
||||||
|
|
||||||
The vast majority of dynamic memory allocations are performed from pools. Pools
|
|
||||||
are optimized to store pre-calibrated objects of the right size for a given
|
|
||||||
usage, try to favor locality and hot objects as much as possible, and are
|
|
||||||
heavily instrumented to detect and help debug a wide class of bugs including
|
|
||||||
buffer overflows, use-after-free, etc.
|
|
||||||
|
|
||||||
For objects of random sizes, or those used only at configuration time, pools
|
|
||||||
are not suited, and the regular malloc/free family is available, in addition of
|
|
||||||
a few others.
|
|
||||||
|
|
||||||
The standard allocation calls are intercepted at the code level (#define) when
|
|
||||||
the code is compiled with -DDEBUG_MEM_STATS. For this reason, these calls are
|
|
||||||
redefined as macros in "bug.h", and one must not try to use the pointers to
|
|
||||||
such functions, as this may break DEBUG_MEM_STATS. This provides fine-grained
|
|
||||||
stats about allocation/free per line of source code using locally implemented
|
|
||||||
counters that can be consulted by "debug dev memstats". The calls are
|
|
||||||
categorized into one of "calloc", "free", "malloc", "realloc", "strdup",
|
|
||||||
"p_alloc", "p_free", the latter two designating pools. Extra calls such as
|
|
||||||
memalign() and similar are also intercepted and counted as malloc.
|
|
||||||
|
|
||||||
Due to the nature of this replacement, DEBUG_MEM_STATS cannot see operations
|
|
||||||
performed in libraries or dependencies.
|
|
||||||
|
|
||||||
In addition to DEBUG_MEM_STATS, when haproxy is built with USE_MEMORY_PROFILING
|
|
||||||
the standard functions are wrapped by new ones defined in "activity.c", which
|
|
||||||
also hold counters by call place. These ones are able to trace activity in
|
|
||||||
libraries because the functions check the return pointer to figure where the
|
|
||||||
call was made. The approach is different and relies on a large hash table. The
|
|
||||||
files, function names and line numbers are not know, but by passing the pointer
|
|
||||||
to dladdr(), we can often resolve most of these symbols. These operations are
|
|
||||||
consulted via "show profiling memory". It must first be enabled either in the
|
|
||||||
global config "profiling.memory on" or the CLI using "set profiling memory on".
|
|
||||||
Memory profiling can also track pool allocations and frees thanks to knowing
|
|
||||||
the size of the element and knowing a place where to store it. Some future
|
|
||||||
evolutions might consider making this possible as well for pure malloc/free
|
|
||||||
too by leveraging malloc_usable_size() a bit more.
|
|
||||||
|
|
||||||
Finally, 3.3 brought aligned allocations. These are made available via a new
|
|
||||||
family of functions around ha_aligned_alloc() that simply map to either
|
|
||||||
posix_memalign(), memalign() or _aligned_malloc() for CYGWIN, depending on
|
|
||||||
which one is available. This latter one requires to pass the pointer to
|
|
||||||
_aligned_free() instead of free(), so for this reason, all aligned allocations
|
|
||||||
have to be released using ha_aligned_free(). Since this mostly happens on
|
|
||||||
configuration elements, in practice it's not as inconvenient as it can sound.
|
|
||||||
These functions are in reality macros handled in "bug.h" like the previous
|
|
||||||
ones in order to deal with DEBUG_MEM_STATS. All "alloc" variants are reported
|
|
||||||
in memstats as "malloc". All "zalloc" variants are reported in memstats as
|
|
||||||
"calloc".
|
|
||||||
|
|
||||||
The currently available allocators are the following:
|
|
||||||
|
|
||||||
- void *ha_aligned_alloc(size_t align, size_t size)
|
|
||||||
- void *ha_aligned_zalloc(size_t align, size_t size)
|
|
||||||
|
|
||||||
Equivalent of malloc() but aligned to <align> bytes. The alignment MUST be
|
|
||||||
at least as large as one word and MUST be a power of two. The "zalloc"
|
|
||||||
variant also zeroes the area on success. Both return NULL on failure.
|
|
||||||
|
|
||||||
- void *ha_aligned_alloc_safe(size_t align, size_t size)
|
|
||||||
- void *ha_aligned_zalloc_safe(size_t align, size_t size)
|
|
||||||
|
|
||||||
Equivalent of malloc() but aligned to <align> bytes. The alignment is
|
|
||||||
automatically adjusted to the nearest larger power of two that is at least
|
|
||||||
as large as a word. The "zalloc" variant also zeroes the area on
|
|
||||||
success. Both return NULL on failure.
|
|
||||||
|
|
||||||
- (type *)ha_aligned_alloc_typed(size_t count, type)
|
|
||||||
(type *)ha_aligned_zalloc_typed(size_t count, type)
|
|
||||||
|
|
||||||
This macro returns an area aligned to the required alignment for type
|
|
||||||
<type>, large enough for <count> objects of this type, and the result is a
|
|
||||||
pointer of this type. The goal is to ease allocation of known structures
|
|
||||||
whose alignment is not necessarily known to the developer (and to avoid
|
|
||||||
encouraging to hard-code alignment). The cast in return also provides a
|
|
||||||
last-minute control in case a wrong type is mistakenly used due to a poor
|
|
||||||
copy-paste or an extra "*" after the type. When DEBUG_MEM_STATS is in use,
|
|
||||||
the type is stored as a string in the ".extra" field so that it can be
|
|
||||||
displayed in "debug dev memstats". The "zalloc" variant also zeroes the
|
|
||||||
area on success. Both return NULL on failure.
|
|
||||||
|
|
||||||
- void ha_aligned_free(void *ptr)
|
|
||||||
|
|
||||||
Frees the area pointed to by ptr. It is the equivalent of free() but for
|
|
||||||
objects allocated using one of the functions above.
|
|
||||||
@ -245,30 +245,6 @@ mt_list_pop(l)
|
|||||||
#=========#
|
#=========#
|
||||||
|
|
||||||
|
|
||||||
mt_list_pop_locked(l)
|
|
||||||
Removes the list's first element, returns it locked. If the list was empty,
|
|
||||||
NULL is returned. A macro MT_LIST_POP_LOCKED() is provided for a
|
|
||||||
more convenient use; instead of returning the list element, it will return
|
|
||||||
the structure holding the element, taking care of preserving the NULL.
|
|
||||||
|
|
||||||
before:
|
|
||||||
+---+ +---+ +---+ +---+ +---+ +---+ +---+
|
|
||||||
#=>| L |<===>| A |<===>| B |<===>| C |<===>| D |<===>| E |<===>| F |<=#
|
|
||||||
# +---+ +---+ +---+ +---+ +---+ +---+ +---+ #
|
|
||||||
#=====================================================================#
|
|
||||||
|
|
||||||
after:
|
|
||||||
+---+ +---+ +---+ +---+ +---+ +---+
|
|
||||||
#=>| L |<===>| B |<===>| C |<===>| D |<===>| E |<===>| F |<=#
|
|
||||||
# +---+ +---+ +---+ +---+ +---+ +---+ #
|
|
||||||
#===========================================================#
|
|
||||||
|
|
||||||
+---+
|
|
||||||
# x| A |x #
|
|
||||||
# +---+ #
|
|
||||||
#=========#
|
|
||||||
|
|
||||||
|
|
||||||
_mt_list_lock_next(elt)
|
_mt_list_lock_next(elt)
|
||||||
Locks the link that starts at the next pointer of the designated element.
|
Locks the link that starts at the next pointer of the designated element.
|
||||||
The link is replaced by two locked pointers, and a pointer to the next
|
The link is replaced by two locked pointers, and a pointer to the next
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
2025-08-11 - Pools structure and API
|
2022-02-24 - Pools structure and API
|
||||||
|
|
||||||
1. Background
|
1. Background
|
||||||
-------------
|
-------------
|
||||||
@ -204,14 +204,6 @@ the cache, when this option is set, objects are picked from the cache from the
|
|||||||
oldest one instead of the freshest one. This way even late memory corruptions
|
oldest one instead of the freshest one. This way even late memory corruptions
|
||||||
have a chance to be detected.
|
have a chance to be detected.
|
||||||
|
|
||||||
Another non-destructive approach is to use "-dMbackup". A full copy of the
|
|
||||||
object is made after its end, which eases inspection (e.g. of the parts
|
|
||||||
scratched by the pool_item elements), and a comparison is made upon allocation
|
|
||||||
of that object, just like with "-dMintegrity", causing a crash on mismatch. The
|
|
||||||
initial 4 words corresponding to the list are ignored as well. Note that when
|
|
||||||
both "-dMbackup" and "-dMintegrity" are used, the copy is performed before
|
|
||||||
being scratched, and the comparison is done by "-dMintegrity" only.
|
|
||||||
|
|
||||||
When build option DEBUG_MEMORY_POOLS is set, or the boot-time option "-dMtag"
|
When build option DEBUG_MEMORY_POOLS is set, or the boot-time option "-dMtag"
|
||||||
is passed on the executable's command line, pool objects are allocated with
|
is passed on the executable's command line, pool objects are allocated with
|
||||||
one extra pointer compared to the requested size, so that the bytes that follow
|
one extra pointer compared to the requested size, so that the bytes that follow
|
||||||
@ -239,6 +231,10 @@ currently in use:
|
|||||||
+------------+ +------------+ / is set at build time
|
+------------+ +------------+ / is set at build time
|
||||||
or -dMtag at boot time
|
or -dMtag at boot time
|
||||||
|
|
||||||
|
Right now no provisions are made to return objects aligned on larger boundaries
|
||||||
|
than those currently covered by malloc() (i.e. two pointers). This need appears
|
||||||
|
from time to time and the layout above might evolve a little bit if needed.
|
||||||
|
|
||||||
|
|
||||||
4. Storage in the process-wide shared pool
|
4. Storage in the process-wide shared pool
|
||||||
------------------------------------------
|
------------------------------------------
|
||||||
@ -346,25 +342,7 @@ struct pool_head *create_pool(char *name, uint size, uint flags)
|
|||||||
"-dMno-merge" is passed on the executable's command line, the pools
|
"-dMno-merge" is passed on the executable's command line, the pools
|
||||||
also need to have the exact same name to be merged. In addition, unless
|
also need to have the exact same name to be merged. In addition, unless
|
||||||
MEM_F_EXACT is set in <flags>, the object size will usually be rounded
|
MEM_F_EXACT is set in <flags>, the object size will usually be rounded
|
||||||
up to the size of pointers (16 or 32 bytes). MEM_F_UAF may be set on a
|
up to the size of pointers (16 or 32 bytes). The name that will appear
|
||||||
per-pool basis to enable the UAF detection only for this specific pool,
|
|
||||||
saving the massive overhead of global usage. The name that will appear
|
|
||||||
in the pool upon merging is the name of the first created pool. The
|
|
||||||
returned pointer is the new (or reused) pool head, or NULL upon error.
|
|
||||||
Pools created this way must be destroyed using pool_destroy().
|
|
||||||
|
|
||||||
struct pool_head *create_aligned_pool(char *name, uint size, uint align, uint flags)
|
|
||||||
Create a new pool named <name> for objects of size <size> bytes and
|
|
||||||
aligned to <align> bytes (0 meaning use the platform's default). Pool
|
|
||||||
names are truncated to their first 11 characters. Pools of very similar
|
|
||||||
size will usually be merged if both have set the flag MEM_F_SHARED in
|
|
||||||
<flags>. When DEBUG_DONT_SHARE_POOLS was set at build time, or
|
|
||||||
"-dMno-merge" is passed on the executable's command line, the pools
|
|
||||||
also need to have the exact same name to be merged. In addition, unless
|
|
||||||
MEM_F_EXACT is set in <flags>, the object size will usually be rounded
|
|
||||||
up to the size of pointers (16 or 32 bytes). MEM_F_UAF may be set on a
|
|
||||||
per-pool basis to enable the UAF detection only for this specific pool,
|
|
||||||
saving the massive overhead of global usage. The name that will appear
|
|
||||||
in the pool upon merging is the name of the first created pool. The
|
in the pool upon merging is the name of the first created pool. The
|
||||||
returned pointer is the new (or reused) pool head, or NULL upon error.
|
returned pointer is the new (or reused) pool head, or NULL upon error.
|
||||||
Pools created this way must be destroyed using pool_destroy().
|
Pools created this way must be destroyed using pool_destroy().
|
||||||
@ -482,20 +460,6 @@ complicate maintenance.
|
|||||||
|
|
||||||
A few macros exist to ease the declaration of pools:
|
A few macros exist to ease the declaration of pools:
|
||||||
|
|
||||||
DECLARE_ALIGNED_POOL(ptr, name, size, align)
|
|
||||||
Placed at the top level of a file, this declares a global memory pool
|
|
||||||
as variable <ptr>, name <name> and size <size> bytes per element, all
|
|
||||||
of which will be aligned to <align> bytes. The alignment will be
|
|
||||||
rounded up to the next power of two and will be at least as large as a
|
|
||||||
word on the platform. This is made via a call to REGISTER_ALIGNED_POOL()
|
|
||||||
and by assigning the resulting pointer to variable <ptr>. <ptr> will be
|
|
||||||
created of type "struct pool_head *". If the pool needs to be visible
|
|
||||||
outside of the function (which is likely), it will also need to be
|
|
||||||
declared somewhere as "extern struct pool_head *<ptr>;". It is
|
|
||||||
recommended to place such declarations very early in the source file so
|
|
||||||
that the variable is already known to all subsequent functions which
|
|
||||||
may use it.
|
|
||||||
|
|
||||||
DECLARE_POOL(ptr, name, size)
|
DECLARE_POOL(ptr, name, size)
|
||||||
Placed at the top level of a file, this declares a global memory pool
|
Placed at the top level of a file, this declares a global memory pool
|
||||||
as variable <ptr>, name <name> and size <size> bytes per element. This
|
as variable <ptr>, name <name> and size <size> bytes per element. This
|
||||||
@ -507,17 +471,6 @@ DECLARE_POOL(ptr, name, size)
|
|||||||
declarations very early in the source file so that the variable is
|
declarations very early in the source file so that the variable is
|
||||||
already known to all subsequent functions which may use it.
|
already known to all subsequent functions which may use it.
|
||||||
|
|
||||||
DECLARE_STATIC_ALIGNED_POOL(ptr, name, size, align)
|
|
||||||
Placed at the top level of a file, this declares a global memory pool
|
|
||||||
as variable <ptr>, name <name> and size <size> bytes per element, all
|
|
||||||
of which will be aligned to <align> bytes. The alignment will be
|
|
||||||
rounded up to the next power of two and will be at least as large as a
|
|
||||||
word on the platform. This is made via a call to REGISTER_ALIGNED_POOL()
|
|
||||||
and by assigning the resulting pointer to local variable <ptr>. <ptr>
|
|
||||||
will be created of type "static struct pool_head *". It is recommended
|
|
||||||
to place such declarations very early in the source file so that the
|
|
||||||
variable is already known to all subsequent functions which may use it.
|
|
||||||
|
|
||||||
DECLARE_STATIC_POOL(ptr, name, size)
|
DECLARE_STATIC_POOL(ptr, name, size)
|
||||||
Placed at the top level of a file, this declares a static memory pool
|
Placed at the top level of a file, this declares a static memory pool
|
||||||
as variable <ptr>, name <name> and size <size> bytes per element. This
|
as variable <ptr>, name <name> and size <size> bytes per element. This
|
||||||
@ -527,42 +480,6 @@ DECLARE_STATIC_POOL(ptr, name, size)
|
|||||||
early in the source file so that the variable is already known to all
|
early in the source file so that the variable is already known to all
|
||||||
subsequent functions which may use it.
|
subsequent functions which may use it.
|
||||||
|
|
||||||
DECLARE_STATIC_TYPED_POOL(ptr, name, type[, extra[, align]])
|
|
||||||
Placed at the top level of a file, this declares a global memory pool
|
|
||||||
as variable <ptr>, name <name>, and configured to allocate objects of
|
|
||||||
type <type>. It is optionally possible to grow these objects by <extra>
|
|
||||||
bytes (e.g. if they contain some variable length data at the end), and
|
|
||||||
to force them to be aligned to <align> bytes. If only alignment is
|
|
||||||
desired without extra data, pass 0 as <extra>. Alignment must be at
|
|
||||||
least as large as the type's, and a control is enforced at declaration
|
|
||||||
time so that objects cannot be less aligned than what is promised to
|
|
||||||
the compiler. The default alignment of zero indicates that the default
|
|
||||||
one (from the type) should be used. This is made via a call to
|
|
||||||
REGISTER_ALIGNED_POOL() and by assigning the resulting pointer to local
|
|
||||||
variable <ptr>. <ptr> will be created of type "static struct pool_head
|
|
||||||
*". It is recommended to place such declarations very early in the
|
|
||||||
source file so that the variable is already known to all subsequent
|
|
||||||
functions which may use it.
|
|
||||||
|
|
||||||
DECLARE_TYPED_POOL(ptr, name, type[, extra[, align]])
|
|
||||||
Placed at the top level of a file, this declares a global memory pool
|
|
||||||
as variable <ptr>, name <name>, and configured to allocate objects of
|
|
||||||
type <type>. It is optionally possible to grow these objects by <extra>
|
|
||||||
bytes (e.g. if they contain some variable length data at the end), and
|
|
||||||
to force them to be aligned to <align> bytes. If only alignment is
|
|
||||||
desired without extra data, pass 0 as <extra>. Alignment must be at
|
|
||||||
least as large as the type's, and a control is enforced at declaration
|
|
||||||
time so that objects cannot be less aligned than what is promised to
|
|
||||||
the compiler. The default alignment of zero indicates that the default
|
|
||||||
one (from the type) should be used. This is made via a call to
|
|
||||||
REGISTER_ALIGNED_POOL() and by assigning the resulting pointer to
|
|
||||||
variable <ptr>. <ptr> will be created of type "struct pool_head *". If
|
|
||||||
the pool needs to be visible outside of the function (which is likely),
|
|
||||||
it will also need to be declared somewhere as "extern struct pool_head
|
|
||||||
*<ptr>;". It is recommended to place such declarations very early in
|
|
||||||
the source file so that the variable is already known to all subsequent
|
|
||||||
functions which may use it.
|
|
||||||
|
|
||||||
|
|
||||||
6. Build options
|
6. Build options
|
||||||
----------------
|
----------------
|
||||||
|
|||||||
@ -1,53 +0,0 @@
|
|||||||
2025/09/16 - SHM stats file storage description and hints
|
|
||||||
|
|
||||||
Shm stats file (used to share thread-groupable statistics over multiple
|
|
||||||
process through the "shm-stats-file" directive) is made of:
|
|
||||||
|
|
||||||
- a main header which describes the file version, the processes making
|
|
||||||
use of it, the common clock source and hints about the number of
|
|
||||||
objects that are currently stored or provisionned in the file.
|
|
||||||
- an indefinite number of "objects" blocks coming right after the
|
|
||||||
main header, all blocks have the same size which is the size of the
|
|
||||||
maximum underlying object that may be stored. The main header tells
|
|
||||||
how many objects are stored in the file.
|
|
||||||
|
|
||||||
File header looks like this (32/64 bits systems):
|
|
||||||
|
|
||||||
0 8 16 32 48 64
|
|
||||||
+-------+---------+----------------+-------------------+-------------------+
|
|
||||||
| VERSION | 2 bytes | global_now_ms (global mono date in ms)|
|
|
||||||
|MAJOR | MINOR | hole | |
|
|
||||||
+----------------------------------+---------------------------------------+
|
|
||||||
| global_now_ns (global mono date in ns) |
|
|
||||||
+--------------------------------------------------------------------------+
|
|
||||||
| now_offset (offset applied to global monotonic date |
|
|
||||||
| on startup) |
|
|
||||||
+--------------------------------------------------------------------------+
|
|
||||||
| Process slot : | 1byte x 64
|
|
||||||
| pid | heartbeat (ticks) |
|
|
||||||
+----------------------------------+---------------------------------------+
|
|
||||||
| objects | objects slots |
|
|
||||||
| (used objects) | (available for use) |
|
|
||||||
+----------------------------------+---------------------------------------+
|
|
||||||
| padding (for future use) | 128 bytes
|
|
||||||
+--------------------------------------------------------------------------+
|
|
||||||
|
|
||||||
Object block looks like this:
|
|
||||||
|
|
||||||
0 8 16 32 48 64
|
|
||||||
+-------+---------+----------------+-------------------+-------------------+
|
|
||||||
| GUID | 128 bytes
|
|
||||||
+ (zero terminated) +
|
|
||||||
| |
|
|
||||||
+-------+---------+--------------------------------------------------------+
|
|
||||||
| tgid | type | padding |
|
|
||||||
+-------+---------+--------------------------------------------------------+
|
|
||||||
| users (bitmask of process slots making use of the obj) |
|
|
||||||
+--------------------------------------------------------------------------+
|
|
||||||
| object data |
|
|
||||||
| (version dependent) |
|
|
||||||
| struct be_counters_shared_tg or |
|
|
||||||
| struct fe_counters_shared_tg |
|
|
||||||
+--------------------------------------------------------------------------+
|
|
||||||
| padding (to anticipate evolutions) | 64 bytes
|
|
||||||
+--------------------------------------------------------------------------+
|
|
||||||
@ -21,7 +21,7 @@ falls back to CLOCK_REALTIME. The former is more accurate as it really counts
|
|||||||
the time spent in the process, while the latter might also account for time
|
the time spent in the process, while the latter might also account for time
|
||||||
stuck on paging in etc.
|
stuck on paging in etc.
|
||||||
|
|
||||||
Then wdt_ping() is called to arm the timer. It's set to trigger every
|
Then wdt_ping() is called to arm the timer. t's set to trigger every
|
||||||
<wdt_warn_blocked_traffic_ns> interval. It is also called by wdt_handler()
|
<wdt_warn_blocked_traffic_ns> interval. It is also called by wdt_handler()
|
||||||
to reprogram a new wakeup after it has ticked.
|
to reprogram a new wakeup after it has ticked.
|
||||||
|
|
||||||
@ -37,18 +37,15 @@ If the thread was not marked as stuck, it's verified that no progress was made
|
|||||||
for at least one second, in which case the TH_FL_STUCK flag is set. The lack of
|
for at least one second, in which case the TH_FL_STUCK flag is set. The lack of
|
||||||
progress is measured by the distance between the thread's current cpu_time and
|
progress is measured by the distance between the thread's current cpu_time and
|
||||||
its prev_cpu_time. If the lack of progress is at least as large as the warning
|
its prev_cpu_time. If the lack of progress is at least as large as the warning
|
||||||
threshold, then the signal is bounced to the faulty thread if it's not the
|
threshold and no context switch happened since last call, ha_stuck_warning() is
|
||||||
current one. Since this bounce is based on the time spent without update, it
|
called to emit a warning about that thread. In any case the context switch
|
||||||
already doesn't happen often.
|
counter for that thread is updated.
|
||||||
|
|
||||||
Once on the faulty thread, two checks are performed:
|
If the thread was already marked as stuck, then the thread is considered as
|
||||||
1) if the thread was already marked as stuck, then the thread is considered
|
definitely stuck. Then ha_panic() is directly called if the thread is the
|
||||||
as definitely stuck, and ha_panic() is called. It will not return.
|
current one, otherwise ha_kill() is used to resend the signal directly to the
|
||||||
|
target thread, which will in turn go through this handler and handle the panic
|
||||||
2) a check is made to verify if the scheduler is still ticking, by reading
|
itself.
|
||||||
and setting a variable that only the scheduler can clear when leaving a
|
|
||||||
task. If the scheduler didn't make any progress, ha_stuck_warning() is
|
|
||||||
called to emit a warning about that thread.
|
|
||||||
|
|
||||||
Most of the time there's no panic of course, and a wdt_ping() is performed
|
Most of the time there's no panic of course, and a wdt_ping() is performed
|
||||||
before leaving the handler to reprogram a check for that thread.
|
before leaving the handler to reprogram a check for that thread.
|
||||||
@ -64,12 +61,12 @@ set TAINTED_WARN_BLOCKED_TRAFFIC.
|
|||||||
|
|
||||||
ha_panic() uses the current thread's trash buffer to produce the messages, as
|
ha_panic() uses the current thread's trash buffer to produce the messages, as
|
||||||
we don't care about its contents since that thread will never return. However
|
we don't care about its contents since that thread will never return. However
|
||||||
ha_stuck_warning() instead uses a local 8kB buffer in the thread's stack.
|
ha_stuck_warning() instead uses a local 4kB buffer in the thread's stack.
|
||||||
ha_panic() will call ha_thread_dump_fill() for each thread, to complete the
|
ha_panic() will call ha_thread_dump_fill() for each thread, to complete the
|
||||||
buffer being filled with each thread's dump messages. ha_stuck_warning() only
|
buffer being filled with each thread's dump messages. ha_stuck_warning() only
|
||||||
calls ha_thread_dump_one(), which works on the current thread. In both cases
|
calls the function for the current thread. In both cases the message is then
|
||||||
the message is then directly sent to fd #2 (stderr) and ha_thread_dump_done()
|
directly sent to fd #2 (stderr) and ha_thread_dump_one() is called to release
|
||||||
is called to release the dumped thread.
|
the dumped thread.
|
||||||
|
|
||||||
Both print a few extra messages, but ha_panic() just ends by looping on abort()
|
Both print a few extra messages, but ha_panic() just ends by looping on abort()
|
||||||
until the process dies.
|
until the process dies.
|
||||||
@ -113,19 +110,13 @@ ha_dump_backtrace() before returning.
|
|||||||
ha_dump_backtrace() produces a backtrace into a local buffer (100 entries max),
|
ha_dump_backtrace() produces a backtrace into a local buffer (100 entries max),
|
||||||
then dumps the code bytes nearby the crashing instrution, dumps pointers and
|
then dumps the code bytes nearby the crashing instrution, dumps pointers and
|
||||||
tries to resolve function names, and sends all of that into the target buffer.
|
tries to resolve function names, and sends all of that into the target buffer.
|
||||||
On some architectures (x86_64, arm64), it will also try to detect and decode
|
|
||||||
call instructions and resolve them to called functions.
|
|
||||||
|
|
||||||
3. Improvements
|
3. Improvements
|
||||||
---------------
|
---------------
|
||||||
|
|
||||||
The symbols resolution is extremely expensive, particularly for the warnings
|
The symbols resolution is extremely expensive, particularly for the warnings
|
||||||
which should be fast. But we need it, it's just unfortunate that it strikes at
|
which should be fast. But we need it, it's just unfortunate that it strikes at
|
||||||
the wrong moment. At least ha_dump_backtrace() does disable signals while it's
|
the wrong moment.
|
||||||
resolving, in order to avoid unwanted re-entrance. In addition, the called
|
|
||||||
function resolve_sym_name() uses some locking and refrains from calling the
|
|
||||||
dladdr family of functions in a re-entrant way (in the worst case only well
|
|
||||||
known symbols will be resolved)..
|
|
||||||
|
|
||||||
In an ideal case, ha_dump_backtrace() would dump the pointers to a local array,
|
In an ideal case, ha_dump_backtrace() would dump the pointers to a local array,
|
||||||
which would then later be resolved asynchronously in a tasklet. This can work
|
which would then later be resolved asynchronously in a tasklet. This can work
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
-----------------------
|
-----------------------
|
||||||
HAProxy Starter Guide
|
HAProxy Starter Guide
|
||||||
-----------------------
|
-----------------------
|
||||||
version 3.4
|
version 3.2
|
||||||
|
|
||||||
|
|
||||||
This document is an introduction to HAProxy for all those who don't know it, as
|
This document is an introduction to HAProxy for all those who don't know it, as
|
||||||
|
|||||||
@ -893,9 +893,7 @@ Core class
|
|||||||
|
|
||||||
**context**: init, task, action
|
**context**: init, task, action
|
||||||
|
|
||||||
This function returns a new object of a *httpclient* class. An *httpclient*
|
This function returns a new object of a *httpclient* class.
|
||||||
object must be used to process one and only one request. It must never be
|
|
||||||
reused to process several requests.
|
|
||||||
|
|
||||||
:returns: A :ref:`httpclient_class` object.
|
:returns: A :ref:`httpclient_class` object.
|
||||||
|
|
||||||
@ -935,7 +933,7 @@ Core class
|
|||||||
Give back the hand at the HAProxy scheduler. Unlike :js:func:`core.yield`
|
Give back the hand at the HAProxy scheduler. Unlike :js:func:`core.yield`
|
||||||
the task will not be woken up automatically to resume as fast as possible.
|
the task will not be woken up automatically to resume as fast as possible.
|
||||||
Instead, it will wait for an event to wake the task. If milliseconds argument
|
Instead, it will wait for an event to wake the task. If milliseconds argument
|
||||||
is provided then the Lua execution will be automatically resumed passed this
|
is provided then the Lua excecution will be automatically resumed passed this
|
||||||
delay even if no event caused the task to wake itself up.
|
delay even if no event caused the task to wake itself up.
|
||||||
|
|
||||||
:param integer milliseconds: automatic wakeup passed this delay. (optional)
|
:param integer milliseconds: automatic wakeup passed this delay. (optional)
|
||||||
@ -945,7 +943,7 @@ Core class
|
|||||||
**context**: task, action
|
**context**: task, action
|
||||||
|
|
||||||
Give back the hand at the HAProxy scheduler. It is used when the LUA
|
Give back the hand at the HAProxy scheduler. It is used when the LUA
|
||||||
processing consumes a lot of processing time. Lua execution will be resumed
|
processing consumes a lot of processing time. Lua excecution will be resumed
|
||||||
automatically (automatic reschedule).
|
automatically (automatic reschedule).
|
||||||
|
|
||||||
.. js:function:: core.parse_addr(address)
|
.. js:function:: core.parse_addr(address)
|
||||||
@ -1089,13 +1087,18 @@ Core class
|
|||||||
perform the heavy job in a dedicated task and allow remaining events to be
|
perform the heavy job in a dedicated task and allow remaining events to be
|
||||||
processed more quickly.
|
processed more quickly.
|
||||||
|
|
||||||
.. js:function:: core.use_native_mailers_config()
|
.. js:function:: core.disable_legacy_mailers()
|
||||||
|
|
||||||
**context**: body
|
**LEGACY**
|
||||||
|
|
||||||
Inform haproxy that the script will make use of the native "mailers"
|
**context**: body, init
|
||||||
config section (although legacy). In other words, inform haproxy that
|
|
||||||
:js:func:`Proxy.get_mailers()` will be used later in the program.
|
Disable the sending of email alerts through the legacy email sending
|
||||||
|
function when mailers are used in the configuration.
|
||||||
|
|
||||||
|
Use this when sending email alerts directly from lua.
|
||||||
|
|
||||||
|
:see: :js:func:`Proxy.get_mailers()`
|
||||||
|
|
||||||
.. _proxy_class:
|
.. _proxy_class:
|
||||||
|
|
||||||
@ -1224,14 +1227,8 @@ Proxy class
|
|||||||
|
|
||||||
**LEGACY**
|
**LEGACY**
|
||||||
|
|
||||||
Returns a table containing legacy mailers config (from haproxy configuration
|
Returns a table containing mailers config for the current proxy or nil
|
||||||
file) for the current proxy or nil if mailers are not available for the proxy.
|
if mailers are not available for the proxy.
|
||||||
|
|
||||||
.. warning::
|
|
||||||
When relying on :js:func:`Proxy.get_mailers()` to retrieve mailers
|
|
||||||
configuration, :js:func:`core.use_native_mailers_config()` must be called
|
|
||||||
first from body or init context to inform haproxy that Lua makes use of the
|
|
||||||
legacy mailers config.
|
|
||||||
|
|
||||||
:param class_proxy px: A :ref:`proxy_class` which indicates the manipulated
|
:param class_proxy px: A :ref:`proxy_class` which indicates the manipulated
|
||||||
proxy.
|
proxy.
|
||||||
@ -1248,6 +1245,10 @@ ProxyMailers class
|
|||||||
|
|
||||||
This class provides mailers config for a given proxy.
|
This class provides mailers config for a given proxy.
|
||||||
|
|
||||||
|
If sending emails directly from lua, please consider
|
||||||
|
:js:func:`core.disable_legacy_mailers()` to disable the email sending from
|
||||||
|
haproxy. (Or email alerts will be sent twice...)
|
||||||
|
|
||||||
.. js:attribute:: ProxyMailers.track_server_health
|
.. js:attribute:: ProxyMailers.track_server_health
|
||||||
|
|
||||||
Boolean set to true if the option "log-health-checks" is configured on
|
Boolean set to true if the option "log-health-checks" is configured on
|
||||||
@ -2580,9 +2581,7 @@ HTTPClient class
|
|||||||
.. js:class:: HTTPClient
|
.. js:class:: HTTPClient
|
||||||
|
|
||||||
The httpclient class allows issue of outbound HTTP requests through a simple
|
The httpclient class allows issue of outbound HTTP requests through a simple
|
||||||
API without the knowledge of HAProxy internals. Any instance must be used to
|
API without the knowledge of HAProxy internals.
|
||||||
process one and only one request. It must never be reused to process several
|
|
||||||
requests.
|
|
||||||
|
|
||||||
.. js:function:: HTTPClient.get(httpclient, request)
|
.. js:function:: HTTPClient.get(httpclient, request)
|
||||||
.. js:function:: HTTPClient.head(httpclient, request)
|
.. js:function:: HTTPClient.head(httpclient, request)
|
||||||
@ -3917,25 +3916,21 @@ AppletTCP class
|
|||||||
*size* is missing, the function tries to read all the content of the stream
|
*size* is missing, the function tries to read all the content of the stream
|
||||||
until the end. An optional timeout may be specified in milliseconds. In this
|
until the end. An optional timeout may be specified in milliseconds. In this
|
||||||
case the function will return no longer than this delay, with the amount of
|
case the function will return no longer than this delay, with the amount of
|
||||||
available data, or nil if there is no data. An empty string is returned if the
|
available data (possibly none).
|
||||||
connection is closed.
|
|
||||||
|
|
||||||
:param class_AppletTCP applet: An :ref:`applettcp_class`
|
:param class_AppletTCP applet: An :ref:`applettcp_class`
|
||||||
:param integer size: the required read size.
|
:param integer size: the required read size.
|
||||||
:returns: return nil if the timeout has expired and no data was available but
|
:returns: always return a string, the string can be empty if the connection is
|
||||||
can still be received. Otherwise, a string is returned, possibly an empty
|
closed.
|
||||||
string if the connection is closed.
|
|
||||||
|
|
||||||
.. js:function:: AppletTCP.try_receive(applet)
|
.. js:function:: AppletTCP.try_receive(applet)
|
||||||
|
|
||||||
Reads available data from the TCP stream and returns immediately. Returns a
|
Reads available data from the TCP stream and returns immediately. Returns a
|
||||||
string containing read bytes or nil if no bytes are available at that time. An
|
string containing read bytes that may possibly be empty if no bytes are
|
||||||
empty string is returned if the connection is closed.
|
available at that time.
|
||||||
|
|
||||||
:param class_AppletTCP applet: An :ref:`applettcp_class`
|
:param class_AppletTCP applet: An :ref:`applettcp_class`
|
||||||
:returns: return nil if no data was available but can still be
|
:returns: always return a string, the string can be empty.
|
||||||
received. Otherwise, a string is returned, possibly an empty string if the
|
|
||||||
connection is closed.
|
|
||||||
|
|
||||||
.. js:function:: AppletTCP.send(appletmsg)
|
.. js:function:: AppletTCP.send(appletmsg)
|
||||||
|
|
||||||
@ -4612,27 +4607,6 @@ HTTPMessage class
|
|||||||
data by default.
|
data by default.
|
||||||
:returns: an integer containing the amount of bytes copied or -1.
|
:returns: an integer containing the amount of bytes copied or -1.
|
||||||
|
|
||||||
.. js:function:: HTTPMessage.set_body_len(http_msg, length)
|
|
||||||
|
|
||||||
This function changes the expected payload length of the HTTP message
|
|
||||||
**http_msg**. **length** can be an integer value. In that case, a
|
|
||||||
"Content-Length" header is added with the given value. It is also possible to
|
|
||||||
pass the **"chunked"** string instead of an integer value to force the HTTP
|
|
||||||
message to be chunk-encoded. In that case, a "Transfer-Encoding" header is
|
|
||||||
added with the "chunked" value. In both cases, all existing "Content-Length"
|
|
||||||
and "Transfer-Encoding" headers are removed.
|
|
||||||
|
|
||||||
This function should be used in the filter context to be able to alter the
|
|
||||||
payload of the HTTP message. The internal state of the HTTP message is updated
|
|
||||||
accordingly. :js:func:`HTTPMessage.add_header()` or
|
|
||||||
:js:func:`HTTPMessage.set_header()` functions must be used in that case.
|
|
||||||
|
|
||||||
:param class_httpmessage http_msg: The manipulated HTTP message.
|
|
||||||
:param type length: The new payload length to set. It can be an integer or
|
|
||||||
the string "chunked".
|
|
||||||
:returns: true if the payload length was successfully updated, false
|
|
||||||
otherwise.
|
|
||||||
|
|
||||||
.. js:function:: HTTPMessage.set_eom(http_msg)
|
.. js:function:: HTTPMessage.set_eom(http_msg)
|
||||||
|
|
||||||
This function set the end of message for the HTTP message **http_msg**.
|
This function set the end of message for the HTTP message **http_msg**.
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
------------------------
|
------------------------
|
||||||
HAProxy Management Guide
|
HAProxy Management Guide
|
||||||
------------------------
|
------------------------
|
||||||
version 3.4
|
version 3.2
|
||||||
|
|
||||||
|
|
||||||
This document describes how to start, stop, manage, and troubleshoot HAProxy,
|
This document describes how to start, stop, manage, and troubleshoot HAProxy,
|
||||||
@ -200,12 +200,6 @@ list of options is :
|
|||||||
-c : only performs a check of the configuration files and exits before trying
|
-c : only performs a check of the configuration files and exits before trying
|
||||||
to bind. The exit status is zero if everything is OK, or non-zero if an
|
to bind. The exit status is zero if everything is OK, or non-zero if an
|
||||||
error is encountered. Presence of warnings will be reported if any.
|
error is encountered. Presence of warnings will be reported if any.
|
||||||
By default this option does not report a success message. Combined with
|
|
||||||
"-V" this will print the message "Configuration file is valid" upon
|
|
||||||
success.
|
|
||||||
|
|
||||||
Scripts must use the exit status to determine the success of the
|
|
||||||
command.
|
|
||||||
|
|
||||||
-cc : evaluates a condition as used within a conditional block of the
|
-cc : evaluates a condition as used within a conditional block of the
|
||||||
configuration. The exit status is zero if the condition is true, 1 if the
|
configuration. The exit status is zero if the condition is true, 1 if the
|
||||||
@ -331,16 +325,6 @@ list of options is :
|
|||||||
last released. This works best with "no-merge", "cold-first" and "tag".
|
last released. This works best with "no-merge", "cold-first" and "tag".
|
||||||
Enabling this option will slightly increase the CPU usage.
|
Enabling this option will slightly increase the CPU usage.
|
||||||
|
|
||||||
- backup / no-backup:
|
|
||||||
This option performs a copy of each released object at release time,
|
|
||||||
allowing developers to inspect them. It also performs a comparison at
|
|
||||||
allocation time to detect if anything changed in between, indicating a
|
|
||||||
use-after-free condition. This doubles the memory usage and slightly
|
|
||||||
increases the CPU usage (similar to "integrity"). If combined with
|
|
||||||
"integrity", it still duplicates the contents but doesn't perform the
|
|
||||||
comparison (which is performed by "integrity"). Just like "integrity",
|
|
||||||
it works best with "no-merge", "cold-first" and "tag".
|
|
||||||
|
|
||||||
- no-global / global:
|
- no-global / global:
|
||||||
Depending on the operating system, a process-wide global memory cache
|
Depending on the operating system, a process-wide global memory cache
|
||||||
may be enabled if it is estimated that the standard allocator is too
|
may be enabled if it is estimated that the standard allocator is too
|
||||||
@ -390,10 +374,6 @@ list of options is :
|
|||||||
using strace to see the forwarded data (which do not appear when using
|
using strace to see the forwarded data (which do not appear when using
|
||||||
splice()).
|
splice()).
|
||||||
|
|
||||||
-dT : disable the use of ktls. It is equivalent to the "global" section's
|
|
||||||
keyword "noktls". It is mostly useful when suspecting a bug related to
|
|
||||||
ktls.
|
|
||||||
|
|
||||||
-dV : disable SSL verify on the server side. It is equivalent to having
|
-dV : disable SSL verify on the server side. It is equivalent to having
|
||||||
"ssl-server-verify none" in the "global" section. This is useful when
|
"ssl-server-verify none" in the "global" section. This is useful when
|
||||||
trying to reproduce production issues out of the production
|
trying to reproduce production issues out of the production
|
||||||
@ -900,7 +880,9 @@ If a memory allocation fails due to the memory limit being reached or because
|
|||||||
the system doesn't have any enough memory, then haproxy will first start to
|
the system doesn't have any enough memory, then haproxy will first start to
|
||||||
free all available objects from all pools before attempting to allocate memory
|
free all available objects from all pools before attempting to allocate memory
|
||||||
again. This mechanism of releasing unused memory can be triggered by sending
|
again. This mechanism of releasing unused memory can be triggered by sending
|
||||||
the signal SIGQUIT to the haproxy process.
|
the signal SIGQUIT to the haproxy process. When doing so, the pools state prior
|
||||||
|
to the flush will also be reported to stderr when the process runs in
|
||||||
|
foreground.
|
||||||
|
|
||||||
During a reload operation, the process switched to the graceful stop state also
|
During a reload operation, the process switched to the graceful stop state also
|
||||||
automatically performs some flushes after releasing any connection so that all
|
automatically performs some flushes after releasing any connection so that all
|
||||||
@ -1335,26 +1317,6 @@ Here is the list of static fields using the proxy statistics domain:
|
|||||||
97. used_conn_cur [...S]: current number of connections in use
|
97. used_conn_cur [...S]: current number of connections in use
|
||||||
98. need_conn_est [...S]: estimated needed number of connections
|
98. need_conn_est [...S]: estimated needed number of connections
|
||||||
99. uweight [..BS]: total user weight (backend), server user weight (server)
|
99. uweight [..BS]: total user weight (backend), server user weight (server)
|
||||||
100. agg_server_status [..B.]: backend aggregated gauge of server's status
|
|
||||||
101. agg_server_status_check [..B.]: (deprecated)
|
|
||||||
102. agg_check_status [..B.]: backend aggregated gauge of server's state check
|
|
||||||
status
|
|
||||||
103. srid [...S]: server id revision
|
|
||||||
104. sess_other [.F..]: total number of sessions other than HTTP since process
|
|
||||||
started
|
|
||||||
105. h1_sess [.F..]: total number of HTTP/1 sessions since process started
|
|
||||||
106. h2_sess [.F..]: total number of HTTP/2 sessions since process started
|
|
||||||
107. h3_sess [.F..]: total number of HTTP/3 sessions since process started
|
|
||||||
108. req_other [.F..]: total number of sessions other than HTTP processed by
|
|
||||||
this object since the worker process started
|
|
||||||
109. h1req [.F..]: total number of HTTP/1 sessions processed by this object
|
|
||||||
since the worker process started
|
|
||||||
110. h2req [.F..]: total number of hTTP/2 sessions processed by this object
|
|
||||||
since the worker process started
|
|
||||||
111. h3req [.F..]: total number of HTTP/3 sessions processed by this object
|
|
||||||
since the worker process started
|
|
||||||
112. proto [L...]: protocol
|
|
||||||
113. priv_idle_cur [...S]: current number of private idle connections
|
|
||||||
|
|
||||||
For all other statistics domains, the presence or the order of the fields are
|
For all other statistics domains, the presence or the order of the fields are
|
||||||
not guaranteed. In this case, the header line should always be used to parse
|
not guaranteed. In this case, the header line should always be used to parse
|
||||||
@ -1374,10 +1336,9 @@ The first column designates the object or metric being dumped. Its format is
|
|||||||
specific to the command producing this output and will not be described in this
|
specific to the command producing this output and will not be described in this
|
||||||
section. Usually it will consist in a series of identifiers and field names.
|
section. Usually it will consist in a series of identifiers and field names.
|
||||||
|
|
||||||
The second column contains 4 characters respectively indicating the origin, the
|
The second column contains 3 characters respectively indicating the origin, the
|
||||||
nature, the scope and the persistence state of the value being reported. The
|
nature and the scope of the value being reported. The first character (the
|
||||||
first character (the origin) indicates where the value was extracted from.
|
origin) indicates where the value was extracted from. Possible characters are :
|
||||||
Possible characters are :
|
|
||||||
|
|
||||||
M The value is a metric. It is valid at one instant any may change depending
|
M The value is a metric. It is valid at one instant any may change depending
|
||||||
on its nature .
|
on its nature .
|
||||||
@ -1493,16 +1454,7 @@ characters are currently supported :
|
|||||||
current date or resource usage. At the moment this scope is not used by
|
current date or resource usage. At the moment this scope is not used by
|
||||||
any metric.
|
any metric.
|
||||||
|
|
||||||
The fourth character (persistence state) indicates that the value (the metric)
|
Consumers of these information will generally have enough of these 3 characters
|
||||||
is volatile or persistent across reloads. The following characters are expected :
|
|
||||||
|
|
||||||
V The metric is volatile because it is local to the current process so
|
|
||||||
the value will be lost when reloading.
|
|
||||||
|
|
||||||
P The metric is persistent because it may be shared with other co-processes
|
|
||||||
so that the value is preserved across reloads.
|
|
||||||
|
|
||||||
Consumers of these information will generally have enough of these 4 characters
|
|
||||||
to determine how to accurately report aggregated information across multiple
|
to determine how to accurately report aggregated information across multiple
|
||||||
processes.
|
processes.
|
||||||
|
|
||||||
@ -1689,28 +1641,19 @@ abort ssl crl-file <crlfile>
|
|||||||
|
|
||||||
See also "set ssl crl-file" and "commit ssl crl-file".
|
See also "set ssl crl-file" and "commit ssl crl-file".
|
||||||
|
|
||||||
acme renew <certificate>
|
acme ps
|
||||||
Starts an ACME certificate generation task with the given certificate name.
|
Show the running ACME tasks. See also "acme renew".
|
||||||
The certificate must be linked to an acme section, see section 12.8 "ACME"
|
|
||||||
of the configuration manual. See also "acme status".
|
|
||||||
|
|
||||||
acme status
|
|
||||||
Show the status of every certificates that were configured with ACME.
|
|
||||||
|
|
||||||
This command outputs, separated by a tab:
|
|
||||||
- The name of the certificate configured in haproxy
|
|
||||||
- The acme section used in the configuration
|
|
||||||
- The state of the acme task, either "Running", "Scheduled" or "Stopped"
|
|
||||||
- The UTC expiration date of the certificate in ISO8601 format
|
|
||||||
- The relative expiration time (0d if expired)
|
|
||||||
- The UTC scheduled date of the certificate in ISO8601 format
|
|
||||||
- The relative schedule time (0d if Running)
|
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
$ echo "@1; acme status" | socat /tmp/master.sock - | column -t -s $'\t'
|
$ echo "@1 acme ps" | socat /run/haproxy-master.sock - | column -t -s $'\t'
|
||||||
# certificate section state expiration date (UTC) expires in scheduled date (UTC) scheduled in
|
# certificate section state
|
||||||
ecdsa.pem LE Running 2020-01-18T09:31:12Z 0d 0h00m00s 2020-01-15T21:31:12Z 0d 0h00m00s
|
foobar.pem.rsa LE1 Running
|
||||||
foobar.pem.rsa LE Scheduled 2025-08-04T11:50:54Z 89d 23h01m13s 2025-07-27T23:50:55Z 82d 11h01m14s
|
foobar.pem.ecdsa LE2 Running
|
||||||
|
|
||||||
|
acme renew <certificate>
|
||||||
|
Starts an ACME certificate generation task with the given certificate name.
|
||||||
|
The certificate must be linked to an acme section, see section 3.13. of the
|
||||||
|
configuration manual. See also "acme ps".
|
||||||
|
|
||||||
add acl [@<ver>] <acl> <pattern>
|
add acl [@<ver>] <acl> <pattern>
|
||||||
Add an entry into the acl <acl>. <acl> is the #<id> or the <name> returned by
|
Add an entry into the acl <acl>. <acl> is the #<id> or the <name> returned by
|
||||||
@ -1762,9 +1705,8 @@ add server <backend>/<server> [args]*
|
|||||||
The <server> name must not be already used in the backend. A special
|
The <server> name must not be already used in the backend. A special
|
||||||
restriction is put on the backend which must used a dynamic load-balancing
|
restriction is put on the backend which must used a dynamic load-balancing
|
||||||
algorithm. A subset of keywords from the server config file statement can be
|
algorithm. A subset of keywords from the server config file statement can be
|
||||||
used to configure the server behavior (see "add server help" to list them).
|
used to configure the server behavior. Also note that no settings will be
|
||||||
Also note that no settings will be reused from an hypothetical
|
reused from an hypothetical 'default-server' statement in the same backend.
|
||||||
'default-server' statement in the same backend.
|
|
||||||
|
|
||||||
Currently a dynamic server is statically initialized with the "none"
|
Currently a dynamic server is statically initialized with the "none"
|
||||||
init-addr method. This means that no resolution will be undertaken if a FQDN
|
init-addr method. This means that no resolution will be undertaken if a FQDN
|
||||||
@ -1794,10 +1736,78 @@ add server <backend>/<server> [args]*
|
|||||||
servers. Please refer to the "u-limit" global keyword documentation in this
|
servers. Please refer to the "u-limit" global keyword documentation in this
|
||||||
case.
|
case.
|
||||||
|
|
||||||
add server help
|
Here is the list of the currently supported keywords :
|
||||||
List the keywords supported for dynamic servers by the current haproxy
|
|
||||||
version. Keyword syntax is similar to the server line from the configuration
|
- agent-addr
|
||||||
file, please refer to their individual documentation for details.
|
- agent-check
|
||||||
|
- agent-inter
|
||||||
|
- agent-port
|
||||||
|
- agent-send
|
||||||
|
- allow-0rtt
|
||||||
|
- alpn
|
||||||
|
- addr
|
||||||
|
- backup
|
||||||
|
- ca-file
|
||||||
|
- check
|
||||||
|
- check-alpn
|
||||||
|
- check-proto
|
||||||
|
- check-send-proxy
|
||||||
|
- check-sni
|
||||||
|
- check-ssl
|
||||||
|
- check-via-socks4
|
||||||
|
- ciphers
|
||||||
|
- ciphersuites
|
||||||
|
- cookie
|
||||||
|
- crl-file
|
||||||
|
- crt
|
||||||
|
- disabled
|
||||||
|
- downinter
|
||||||
|
- error-limit
|
||||||
|
- fall
|
||||||
|
- fastinter
|
||||||
|
- force-sslv3/tlsv10/tlsv11/tlsv12/tlsv13
|
||||||
|
- id
|
||||||
|
- init-state
|
||||||
|
- inter
|
||||||
|
- maxconn
|
||||||
|
- maxqueue
|
||||||
|
- minconn
|
||||||
|
- no-ssl-reuse
|
||||||
|
- no-sslv3/tlsv10/tlsv11/tlsv12/tlsv13
|
||||||
|
- no-tls-tickets
|
||||||
|
- npn
|
||||||
|
- observe
|
||||||
|
- on-error
|
||||||
|
- on-marked-down
|
||||||
|
- on-marked-up
|
||||||
|
- pool-low-conn
|
||||||
|
- pool-max-conn
|
||||||
|
- pool-purge-delay
|
||||||
|
- port
|
||||||
|
- proto
|
||||||
|
- proxy-v2-options
|
||||||
|
- rise
|
||||||
|
- send-proxy
|
||||||
|
- send-proxy-v2
|
||||||
|
- send-proxy-v2-ssl
|
||||||
|
- send-proxy-v2-ssl-cn
|
||||||
|
- slowstart
|
||||||
|
- sni
|
||||||
|
- source
|
||||||
|
- ssl
|
||||||
|
- ssl-max-ver
|
||||||
|
- ssl-min-ver
|
||||||
|
- tfo
|
||||||
|
- tls-tickets
|
||||||
|
- track
|
||||||
|
- usesrc
|
||||||
|
- verify
|
||||||
|
- verifyhost
|
||||||
|
- weight
|
||||||
|
- ws
|
||||||
|
|
||||||
|
Their syntax is similar to the server line from the configuration file,
|
||||||
|
please refer to their individual documentation for details.
|
||||||
|
|
||||||
add ssl ca-file <cafile> <payload>
|
add ssl ca-file <cafile> <payload>
|
||||||
Add a new certificate to a ca-file. This command is useful when you reached
|
Add a new certificate to a ca-file. This command is useful when you reached
|
||||||
@ -1834,35 +1844,6 @@ add ssl crt-list <crtlist> <payload>
|
|||||||
$ echo -e 'add ssl crt-list certlist1 <<\nfoobar.pem [allow-0rtt] foo.bar.com
|
$ echo -e 'add ssl crt-list certlist1 <<\nfoobar.pem [allow-0rtt] foo.bar.com
|
||||||
!test1.com\n' | socat /tmp/sock1 -
|
!test1.com\n' | socat /tmp/sock1 -
|
||||||
|
|
||||||
add ssl ech <bind> <payload>
|
|
||||||
Add an ECH key to a <bind> line. The payload must be in the PEM for ECH format.
|
|
||||||
(https://datatracker.ietf.org/doc/html/draft-farrell-tls-pemesni)
|
|
||||||
|
|
||||||
The bind line format is <frontend>/@<filename>:<linenum> (Example:
|
|
||||||
frontend1/@haproxy.conf:19) or <frontend>/<name> if the bind line was named
|
|
||||||
with the "name" keyword.
|
|
||||||
|
|
||||||
Necessitates an OpenSSL version that supports ECH, and HAProxy must be
|
|
||||||
compiled with USE_ECH=1. This command is only supported on a CLI connection
|
|
||||||
running in experimental mode (see "experimental-mode on").
|
|
||||||
|
|
||||||
See also "show ssl ech" and "ech" in the Section 5.1 of the configuration
|
|
||||||
manual.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
$ openssl ech -public_name foobar.com -out foobar3.com.ech
|
|
||||||
$ echo -e "experimental-mode on; add ssl ech frontend1/@haproxy.conf:19 <<%EOF%\n$(cat foobar3.com.ech)\n%EOF%\n" | \
|
|
||||||
socat /tmp/haproxy.sock -
|
|
||||||
added a new ECH config to frontend1
|
|
||||||
|
|
||||||
add ssl jwt <filename>
|
|
||||||
Add an already loaded certificate to the list of certificates that can be
|
|
||||||
used for JWT validation (see "jwt_verify_cert" converter). This command does
|
|
||||||
not work on ongoing transactions.
|
|
||||||
See also "del ssl jwt" and "show ssl jwt" commands.
|
|
||||||
See "jwt" certificate option for more information.
|
|
||||||
|
|
||||||
clear counters
|
clear counters
|
||||||
Clear the max values of the statistics counters in each proxy (frontend &
|
Clear the max values of the statistics counters in each proxy (frontend &
|
||||||
backend) and in each server. The accumulated counters are not affected. The
|
backend) and in each server. The accumulated counters are not affected. The
|
||||||
@ -2114,11 +2095,10 @@ del ssl ca-file <cafile>
|
|||||||
the "ca-file" or "ca-verify-file" directives in the configuration.
|
the "ca-file" or "ca-verify-file" directives in the configuration.
|
||||||
|
|
||||||
del ssl cert <certfile>
|
del ssl cert <certfile>
|
||||||
Delete a certificate store from HAProxy. The certificate must be unused
|
Delete a certificate store from HAProxy. The certificate must be unused and
|
||||||
(included for JWT validation) and removed from any crt-list or directory.
|
removed from any crt-list or directory. "show ssl cert" displays the status
|
||||||
"show ssl cert" displays the status of the certificate. The deletion doesn't
|
of the certificate. The deletion doesn't work with a certificate referenced
|
||||||
work with a certificate referenced directly with the "crt" directive in the
|
directly with the "crt" directive in the configuration.
|
||||||
configuration.
|
|
||||||
|
|
||||||
del ssl crl-file <crlfile>
|
del ssl crl-file <crlfile>
|
||||||
Delete a CRL file tree entry from HAProxy. The CRL file must be unused and
|
Delete a CRL file tree entry from HAProxy. The CRL file must be unused and
|
||||||
@ -2132,46 +2112,12 @@ del ssl crt-list <filename> <certfile[:line]>
|
|||||||
you will need to provide which line you want to delete. To display the line
|
you will need to provide which line you want to delete. To display the line
|
||||||
numbers, use "show ssl crt-list -n <crtlist>".
|
numbers, use "show ssl crt-list -n <crtlist>".
|
||||||
|
|
||||||
det ssl ech <bind>
|
|
||||||
Delete the ECH keys of a bind line.
|
|
||||||
|
|
||||||
The bind line format is <frontend>/@<filename>:<linenum> (Example:
|
|
||||||
frontend1/@haproxy.conf:19) or <frontend>/<name> if the bind line was named
|
|
||||||
with the "name" keyword.
|
|
||||||
|
|
||||||
Necessitates an OpenSSL version that supports ECH, and HAProxy must be
|
|
||||||
compiled with USE_ECH=1. This command is only supported on a CLI connection
|
|
||||||
running in experimental mode (see "experimental-mode on").
|
|
||||||
|
|
||||||
See also "show ssl ech", "add ssl ech" and "ech" in the Section 5.1 of the
|
|
||||||
configuration manual.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
$ echo "experimental-mode on; del ssl ech frontend1/@haproxy.conf:19" | socat /tmp/haproxy.sock -
|
|
||||||
deleted all ECH configs from frontend1/@haproxy.conf:19
|
|
||||||
|
|
||||||
del ssl jwt <filename>
|
|
||||||
Remove an already loaded certificate to the list of certificates that can be
|
|
||||||
used for JWT validation (see "jwt_verify_cert" converter). This command does
|
|
||||||
not work on ongoing transactions.
|
|
||||||
See also "add ssl jwt" and "show ssl jwt" commands.
|
|
||||||
See "jwt" certificate option for more information.
|
|
||||||
|
|
||||||
del server <backend>/<server>
|
del server <backend>/<server>
|
||||||
Delete a removable server attached to the backend <backend>. A removable
|
Remove a server attached to the backend <backend>. All servers are eligible,
|
||||||
server is the server which satisfies all of these conditions :
|
except servers which are referenced by other configuration elements. The
|
||||||
- not referenced by other configuration elements
|
server must be put in maintenance mode prior to its deletion. The operation
|
||||||
- must already be in maintenance (see "disable server")
|
is cancelled if the server still has active or idle connection or its
|
||||||
- must not have any active or idle connections
|
connection queue is not empty.
|
||||||
|
|
||||||
If any of these conditions is not met, the command will fail.
|
|
||||||
|
|
||||||
Active connections are those with at least one ongoing request. It is
|
|
||||||
possible to speed up their termination using "shutdown sessions server". It
|
|
||||||
is highly recommended to use "wait srv-removable" before "del server" to
|
|
||||||
ensure that all active or idle connections are closed and that the command
|
|
||||||
succeeds.
|
|
||||||
|
|
||||||
disable agent <backend>/<server>
|
disable agent <backend>/<server>
|
||||||
Mark the auxiliary agent check as temporarily stopped.
|
Mark the auxiliary agent check as temporarily stopped.
|
||||||
@ -2401,7 +2347,7 @@ help [<command>]
|
|||||||
the requested one. The same help screen is also displayed for unknown
|
the requested one. The same help screen is also displayed for unknown
|
||||||
commands.
|
commands.
|
||||||
|
|
||||||
httpclient [--htx] <method> <URI>
|
httpclient <method> <URI>
|
||||||
Launch an HTTP client request and print the response on the CLI. Only
|
Launch an HTTP client request and print the response on the CLI. Only
|
||||||
supported on a CLI connection running in expert mode (see "expert-mode on").
|
supported on a CLI connection running in expert mode (see "expert-mode on").
|
||||||
It's only meant for debugging. The httpclient is able to resolve a server
|
It's only meant for debugging. The httpclient is able to resolve a server
|
||||||
@ -2410,9 +2356,6 @@ httpclient [--htx] <method> <URI>
|
|||||||
able to resolve an host from /etc/hosts if you don't use a local dns daemon
|
able to resolve an host from /etc/hosts if you don't use a local dns daemon
|
||||||
which can resolve those.
|
which can resolve those.
|
||||||
|
|
||||||
The --htx option allow to use the haproxy internal htx representation using
|
|
||||||
the htx_dump() function, mainly used for debugging.
|
|
||||||
|
|
||||||
new ssl ca-file <cafile>
|
new ssl ca-file <cafile>
|
||||||
Create a new empty CA file tree entry to be filled with a set of CA
|
Create a new empty CA file tree entry to be filled with a set of CA
|
||||||
certificates and added to a crt-list. This command should be used in
|
certificates and added to a crt-list. This command should be used in
|
||||||
@ -2463,7 +2406,7 @@ prompt [help | n | i | p | timed]*
|
|||||||
|
|
||||||
Without any option, this will cycle through prompt mode then non-interactive
|
Without any option, this will cycle through prompt mode then non-interactive
|
||||||
mode. In non-interactive mode, the connection is closed after the last
|
mode. In non-interactive mode, the connection is closed after the last
|
||||||
command of the current line completes. In interactive mode, the connection is
|
command of the current line compltes. In interactive mode, the connection is
|
||||||
not closed after a command completes, so that a new one can be entered. In
|
not closed after a command completes, so that a new one can be entered. In
|
||||||
prompt mode, the interactive mode is still in use, and a prompt will appear
|
prompt mode, the interactive mode is still in use, and a prompt will appear
|
||||||
at the beginning of the line, indicating to the user that the interpreter is
|
at the beginning of the line, indicating to the user that the interpreter is
|
||||||
@ -2474,11 +2417,6 @@ prompt [help | n | i | p | timed]*
|
|||||||
advanced scripts, and the non-interactive mode (default) to basic scripts.
|
advanced scripts, and the non-interactive mode (default) to basic scripts.
|
||||||
Note that the non-interactive mode is not available for the master socket.
|
Note that the non-interactive mode is not available for the master socket.
|
||||||
|
|
||||||
publish backend <backend>
|
|
||||||
Activates content switching to a backend instance. This is the reverse
|
|
||||||
operation of "unpublish backend" command. This command is restricted and can
|
|
||||||
only be issued on sockets configured for levels "operator" or "admin".
|
|
||||||
|
|
||||||
quit
|
quit
|
||||||
Close the connection when in interactive mode.
|
Close the connection when in interactive mode.
|
||||||
|
|
||||||
@ -2704,28 +2642,6 @@ set ssl crl-file <crlfile> <payload>
|
|||||||
socat /var/run/haproxy.stat -
|
socat /var/run/haproxy.stat -
|
||||||
echo "commit ssl crl-file crlfile.pem" | socat /var/run/haproxy.stat -
|
echo "commit ssl crl-file crlfile.pem" | socat /var/run/haproxy.stat -
|
||||||
|
|
||||||
set ssl ech <bind> <payload>
|
|
||||||
Replace the ECH keys of a bind line with this one. The payload must be in the
|
|
||||||
PEM for ECH format.
|
|
||||||
(https://datatracker.ietf.org/doc/html/draft-farrell-tls-pemesni)
|
|
||||||
|
|
||||||
The bind line format is <frontend>/@<filename>:<linenum> (Example:
|
|
||||||
frontend1/@haproxy.conf:19) or <frontend>/<name> if the bind line was named
|
|
||||||
with the "name" keyword.
|
|
||||||
|
|
||||||
Necessitates an OpenSSL version that supports ECH, and HAProxy must be
|
|
||||||
compiled with USE_ECH=1. This command is only supported on a CLI connection
|
|
||||||
running in experimental mode (see "experimental-mode on").
|
|
||||||
|
|
||||||
See also "show ssl ech", "add ssl ech" and "ech" in the Section 5.1 of the
|
|
||||||
configuration manual.
|
|
||||||
|
|
||||||
$ openssl ech -public_name foobar.com -out foobar3.com.ech
|
|
||||||
$ echo -e "experimental-mode on;
|
|
||||||
set ssl ech frontend1/@haproxy.conf:19 <<%EOF%\n$(cat foobar3.com.ech)\n%EOF%\n" | \
|
|
||||||
socat /tmp/haproxy.sock -
|
|
||||||
set new ECH configs for frontend1/@haproxy.conf:19
|
|
||||||
|
|
||||||
set ssl ocsp-response <response | payload>
|
set ssl ocsp-response <response | payload>
|
||||||
This command is used to update an OCSP Response for a certificate (see "crt"
|
This command is used to update an OCSP Response for a certificate (see "crt"
|
||||||
on "bind" lines). Same controls are performed as during the initial loading of
|
on "bind" lines). Same controls are performed as during the initial loading of
|
||||||
@ -2847,13 +2763,6 @@ operator
|
|||||||
increased. It also drops expert and experimental mode. See also "show cli
|
increased. It also drops expert and experimental mode. See also "show cli
|
||||||
level".
|
level".
|
||||||
|
|
||||||
unpublish backend <backend>
|
|
||||||
Marks the backend as unqualified for future traffic selection. In effect,
|
|
||||||
use_backend / default_backend rules which reference it are ignored and the
|
|
||||||
next content switching rules are evaluated. Contrary to disabled backends,
|
|
||||||
servers health checks remain active. This command is restricted and can only
|
|
||||||
be issued on sockets configured for levels "operator" or "admin".
|
|
||||||
|
|
||||||
user
|
user
|
||||||
Decrease the CLI level of the current CLI session to user. It can't be
|
Decrease the CLI level of the current CLI session to user. It can't be
|
||||||
increased. It also drops expert and experimental mode. See also "show cli
|
increased. It also drops expert and experimental mode. See also "show cli
|
||||||
@ -3128,19 +3037,18 @@ show info [typed|json] [desc] [float]
|
|||||||
(...)
|
(...)
|
||||||
|
|
||||||
> show info typed
|
> show info typed
|
||||||
0.Name.1:POSV:str:HAProxy
|
0.Name.1:POS:str:HAProxy
|
||||||
1.Version.1:POSV:str:3.1-dev0-7c653d-2466
|
1.Version.1:POS:str:1.7-dev1-de52ea-146
|
||||||
2.Release_date.1:POSV:str:2025/07/01
|
2.Release_date.1:POS:str:2016/03/11
|
||||||
3.Nbthread.1:CGSV:u32:1
|
3.Nbproc.1:CGS:u32:1
|
||||||
4.Nbproc.1:CGSV:u32:1
|
4.Process_num.1:KGP:u32:1
|
||||||
5.Process_num.1:KGPV:u32:1
|
5.Pid.1:SGP:u32:28105
|
||||||
6.Pid.1:SGPV:u32:638069
|
6.Uptime.1:MDP:str:0d 0h00m08s
|
||||||
7.Uptime.1:MDPV:str:0d 0h00m07s
|
7.Uptime_sec.1:MDP:u32:8
|
||||||
8.Uptime_sec.1:MDPV:u32:7
|
8.Memmax_MB.1:CLP:u32:0
|
||||||
9.Memmax_MB.1:CLPV:u32:0
|
9.PoolAlloc_MB.1:MGP:u32:0
|
||||||
10.PoolAlloc_MB.1:MGPV:u32:0
|
10.PoolUsed_MB.1:MGP:u32:0
|
||||||
11.PoolUsed_MB.1:MGPV:u32:0
|
11.PoolFailed.1:MCP:u32:0
|
||||||
12.PoolFailed.1:MCPV:u32:0
|
|
||||||
(...)
|
(...)
|
||||||
|
|
||||||
In the typed format, the presence of the process ID at the end of the
|
In the typed format, the presence of the process ID at the end of the
|
||||||
@ -3347,17 +3255,16 @@ show quic [<format>] [<filter>]
|
|||||||
|
|
||||||
An optional argument can be specified to control the verbosity. Its value can
|
An optional argument can be specified to control the verbosity. Its value can
|
||||||
be interpreted in different way. The first possibility is to used predefined
|
be interpreted in different way. The first possibility is to used predefined
|
||||||
values, "oneline" for the default format, "stream" to list every active
|
values, "oneline" for the default format and "full" to display all
|
||||||
streams and "full" to display all information. Alternatively, a list of
|
information. Alternatively, a list of comma-delimited fields can be specified
|
||||||
comma-delimited fields can be specified to restrict output. Currently
|
to restrict output. Currently supported values are "tp", "sock", "pktns",
|
||||||
supported values are "tp", "sock", "pktns", "cc" and "mux". Finally, "help"
|
"cc" and "mux". Finally, "help" in the format will instead show a more
|
||||||
in the format will instead show a more detailed help message.
|
detailed help message.
|
||||||
|
|
||||||
The final argument is used to restrict or extend the connection list. By
|
The final argument is used to restrict or extend the connection list. By
|
||||||
default, active frontend connections only are displayed. Use the extra
|
default, connections on closing or draining state are not displayed. Use the
|
||||||
argument "clo" to list instead closing frontend connections, "be" for backend
|
extra argument "all" to include them in the output. It's also possible to
|
||||||
connections or "all" for every categories. It's also possible to restrict to
|
restrict to a single connection by specifying its hexadecimal address.
|
||||||
a single connection by specifying its hexadecimal address.
|
|
||||||
|
|
||||||
show servers conn [<backend>]
|
show servers conn [<backend>]
|
||||||
Dump the current and idle connections state of the servers belonging to the
|
Dump the current and idle connections state of the servers belonging to the
|
||||||
@ -3367,32 +3274,7 @@ show servers conn [<backend>]
|
|||||||
The output consists in a header line showing the fields titles, then one
|
The output consists in a header line showing the fields titles, then one
|
||||||
server per line with for each, the backend name and ID, server name and ID,
|
server per line with for each, the backend name and ID, server name and ID,
|
||||||
the address, port and a series or values. The number of fields varies
|
the address, port and a series or values. The number of fields varies
|
||||||
depending on thread count. The exact format of the output may vary slightly
|
depending on thread count.
|
||||||
across versions and depending on the number of threads. One needs to pay
|
|
||||||
attention to the header line to match columns when extracting output values,
|
|
||||||
and to the number of threads as the last columns are per-thread:
|
|
||||||
|
|
||||||
bkname/svname Backend name '/' server name
|
|
||||||
bkid/svid Backend ID '/' server ID
|
|
||||||
addr Server's IP address
|
|
||||||
port Server's port (or zero if none)
|
|
||||||
- Unused field, serves as a visual delimiter
|
|
||||||
purge_delay Interval between connection purges, in milliseconds
|
|
||||||
served Number of connections currently in use
|
|
||||||
used_cur Number of connections currently in use
|
|
||||||
note that this excludes conns attached to a session
|
|
||||||
used_max Highest value of used_cur since the process started
|
|
||||||
need_est Floating estimate of total needed connections
|
|
||||||
idle_sess Number of idle connections flagged as private
|
|
||||||
unsafe_nb Number of idle connections considered as "unsafe"
|
|
||||||
safe_nb Number of idle connections considered as "safe"
|
|
||||||
idle_lim Configured maximum number of idle connections
|
|
||||||
idle_cur Total of the per-thread currently idle connections
|
|
||||||
idle_per_thr[NB] Idle conns per thread for each one of the NB threads
|
|
||||||
|
|
||||||
HAProxy will kill a portion of <idle_cur> every <purge_delay> when the total
|
|
||||||
of <idle_cur> + <used_cur> exceeds the estimate <need_est>. This estimate
|
|
||||||
varies based on connection activity.
|
|
||||||
|
|
||||||
Given the threaded nature of idle connections, it's important to understand
|
Given the threaded nature of idle connections, it's important to understand
|
||||||
that some values may change once read, and that as such, consistency within a
|
that some values may change once read, and that as such, consistency within a
|
||||||
@ -3625,11 +3507,10 @@ show stat [domain <resolvers|proxy>] [{<iid>|<proxy>} <type> <sid>] \
|
|||||||
|
|
||||||
The rest of the line starting after the first colon follows the "typed output
|
The rest of the line starting after the first colon follows the "typed output
|
||||||
format" described in the section above. In short, the second column (after the
|
format" described in the section above. In short, the second column (after the
|
||||||
first ':') indicates the origin, nature, scope and persistence state of the
|
first ':') indicates the origin, nature and scope of the variable. The third
|
||||||
variable. The third column indicates the field type, among "s32", "s64",
|
column indicates the field type, among "s32", "s64", "u32", "u64", "flt' and
|
||||||
"u32", "u64", "flt' and "str". Then the fourth column is the value itself,
|
"str". Then the fourth column is the value itself, which the consumer knows
|
||||||
which the consumer knows how to parse thanks to column 3 and how to process
|
how to parse thanks to column 3 and how to process thanks to column 2.
|
||||||
thanks to column 2.
|
|
||||||
|
|
||||||
When "desc" is appended to the command, one extra colon followed by a quoted
|
When "desc" is appended to the command, one extra colon followed by a quoted
|
||||||
string is appended with a description for the metric. At the time of writing,
|
string is appended with a description for the metric. At the time of writing,
|
||||||
@ -3642,32 +3523,37 @@ show stat [domain <resolvers|proxy>] [{<iid>|<proxy>} <type> <sid>] \
|
|||||||
Here's an example of typed output format :
|
Here's an example of typed output format :
|
||||||
|
|
||||||
$ echo "show stat typed" | socat stdio unix-connect:/tmp/sock1
|
$ echo "show stat typed" | socat stdio unix-connect:/tmp/sock1
|
||||||
F.2.0.0.pxname.1:KNSV:str:dummy
|
F.2.0.0.pxname.1:MGP:str:private-frontend
|
||||||
F.2.0.1.svname.1:KNSV:str:FRONTEND
|
F.2.0.1.svname.1:MGP:str:FRONTEND
|
||||||
F.2.0.4.scur.1:MGPV:u32:0
|
F.2.0.8.bin.1:MGP:u64:0
|
||||||
F.2.0.5.smax.1:MMPV:u32:0
|
F.2.0.9.bout.1:MGP:u64:0
|
||||||
F.2.0.6.slim.1:CLPV:u32:524269
|
F.2.0.40.hrsp_2xx.1:MGP:u64:0
|
||||||
F.2.0.7.stot.1:MCPP:u64:0
|
L.2.1.0.pxname.1:MGP:str:private-frontend
|
||||||
F.2.0.8.bin.1:MCPP:u64:0
|
L.2.1.1.svname.1:MGP:str:sock-1
|
||||||
F.2.0.9.bout.1:MCPP:u64:0
|
L.2.1.17.status.1:MGP:str:OPEN
|
||||||
F.2.0.10.dreq.1:MCPP:u64:0
|
L.2.1.73.addr.1:MGP:str:0.0.0.0:8001
|
||||||
F.2.0.11.dresp.1:MCPP:u64:0
|
S.3.13.60.rtime.1:MCP:u32:0
|
||||||
F.2.0.12.ereq.1:MCPP:u64:0
|
S.3.13.61.ttime.1:MCP:u32:0
|
||||||
F.2.0.17.status.1:SGPV:str:OPEN
|
S.3.13.62.agent_status.1:MGP:str:L4TOUT
|
||||||
F.2.0.26.pid.1:KGPV:u32:1
|
S.3.13.64.agent_duration.1:MGP:u64:2001
|
||||||
F.2.0.27.iid.1:KGSV:u32:2
|
S.3.13.65.check_desc.1:MCP:str:Layer4 timeout
|
||||||
F.2.0.28.sid.1:KGSV:u32:0
|
S.3.13.66.agent_desc.1:MCP:str:Layer4 timeout
|
||||||
F.2.0.32.type.1:CGSV:u32:0
|
S.3.13.67.check_rise.1:MCP:u32:2
|
||||||
F.2.0.33.rate.1:MRPP:u32:0
|
S.3.13.68.check_fall.1:MCP:u32:3
|
||||||
F.2.0.34.rate_lim.1:CLPV:u32:0
|
S.3.13.69.check_health.1:SGP:u32:0
|
||||||
F.2.0.35.rate_max.1:MMPV:u32:0
|
S.3.13.70.agent_rise.1:MaP:u32:1
|
||||||
F.2.0.46.req_rate.1:MRPP:u32:0
|
S.3.13.71.agent_fall.1:SGP:u32:1
|
||||||
F.2.0.47.req_rate_max.1:MMPV:u32:0
|
S.3.13.72.agent_health.1:SGP:u32:1
|
||||||
F.2.0.48.req_tot.1:MCPP:u64:0
|
S.3.13.73.addr.1:MCP:str:1.255.255.255:8888
|
||||||
F.2.0.51.comp_in.1:MCPP:u64:0
|
S.3.13.75.mode.1:MAP:str:http
|
||||||
F.2.0.52.comp_out.1:MCPP:u64:0
|
B.3.0.0.pxname.1:MGP:str:private-backend
|
||||||
F.2.0.53.comp_byp.1:MCPP:u64:0
|
B.3.0.1.svname.1:MGP:str:BACKEND
|
||||||
F.2.0.54.comp_rsp.1:MCPP:u64:0
|
B.3.0.2.qcur.1:MGP:u32:0
|
||||||
|
B.3.0.3.qmax.1:MGP:u32:0
|
||||||
|
B.3.0.4.scur.1:MGP:u32:0
|
||||||
|
B.3.0.5.smax.1:MGP:u32:0
|
||||||
|
B.3.0.6.slim.1:MGP:u32:1000
|
||||||
|
B.3.0.55.lastsess.1:MMP:s32:-1
|
||||||
(...)
|
(...)
|
||||||
|
|
||||||
In the typed format, the presence of the process ID at the end of the
|
In the typed format, the presence of the process ID at the end of the
|
||||||
@ -3678,20 +3564,20 @@ show stat [domain <resolvers|proxy>] [{<iid>|<proxy>} <type> <sid>] \
|
|||||||
$ ( echo show stat typed | socat /var/run/haproxy.sock1 - ; \
|
$ ( echo show stat typed | socat /var/run/haproxy.sock1 - ; \
|
||||||
echo show stat typed | socat /var/run/haproxy.sock2 - ) | \
|
echo show stat typed | socat /var/run/haproxy.sock2 - ) | \
|
||||||
sort -t . -k 1,1 -k 2,2n -k 3,3n -k 4,4n -k 5,5 -k 6,6n
|
sort -t . -k 1,1 -k 2,2n -k 3,3n -k 4,4n -k 5,5 -k 6,6n
|
||||||
B.3.0.0.pxname.1:KNSV:str:private-backend
|
B.3.0.0.pxname.1:MGP:str:private-backend
|
||||||
B.3.0.0.pxname.2:KNSV:str:private-backend
|
B.3.0.0.pxname.2:MGP:str:private-backend
|
||||||
B.3.0.1.svname.1:KNSV:str:BACKEND
|
B.3.0.1.svname.1:MGP:str:BACKEND
|
||||||
B.3.0.1.svname.2:KNSV:str:BACKEND
|
B.3.0.1.svname.2:MGP:str:BACKEND
|
||||||
B.3.0.2.qcur.1:MGPV:u32:0
|
B.3.0.2.qcur.1:MGP:u32:0
|
||||||
B.3.0.2.qcur.2:MGPV:u32:0
|
B.3.0.2.qcur.2:MGP:u32:0
|
||||||
B.3.0.3.qmax.1:MMPV:u32:0
|
B.3.0.3.qmax.1:MGP:u32:0
|
||||||
B.3.0.3.qmax.2:MMPV:u32:0
|
B.3.0.3.qmax.2:MGP:u32:0
|
||||||
B.3.0.4.scur.1:MGPV:u32:0
|
B.3.0.4.scur.1:MGP:u32:0
|
||||||
B.3.0.4.scur.2:MGPV:u32:0
|
B.3.0.4.scur.2:MGP:u32:0
|
||||||
B.3.0.5.smax.1:MMPV:u32:0
|
B.3.0.5.smax.1:MGP:u32:0
|
||||||
B.3.0.5.smax.2:MMPV:u32:0
|
B.3.0.5.smax.2:MGP:u32:0
|
||||||
B.3.0.6.slim.1:CLPV:u32:1000
|
B.3.0.6.slim.1:MGP:u32:1000
|
||||||
B.3.0.6.slim.2:CLPV:u32:1000
|
B.3.0.6.slim.2:MGP:u32:1000
|
||||||
(...)
|
(...)
|
||||||
|
|
||||||
The format of JSON output is described in a schema which may be output
|
The format of JSON output is described in a schema which may be output
|
||||||
@ -3869,66 +3755,6 @@ show ssl crt-list [-n] [<filename>]
|
|||||||
ecdsa.pem:3 [verify none allow-0rtt ssl-min-ver TLSv1.0 ssl-max-ver TLSv1.3] localhost !www.test1.com
|
ecdsa.pem:3 [verify none allow-0rtt ssl-min-ver TLSv1.0 ssl-max-ver TLSv1.3] localhost !www.test1.com
|
||||||
ecdsa.pem:4 [verify none allow-0rtt ssl-min-ver TLSv1.0 ssl-max-ver TLSv1.3]
|
ecdsa.pem:4 [verify none allow-0rtt ssl-min-ver TLSv1.0 ssl-max-ver TLSv1.3]
|
||||||
|
|
||||||
show ssl ech [<name>]
|
|
||||||
Display the list of ECH keys loaded in the HAProxy process.
|
|
||||||
|
|
||||||
When <name> is specified, displays the keys for a specific bind line. The
|
|
||||||
bind line format is <frontend>/@<filename>:<linenum> (Example:
|
|
||||||
frontend1/@haproxy.conf:19) or <frontend>/<name> if the bind line was named
|
|
||||||
with the "name" keyword.
|
|
||||||
|
|
||||||
The 'age' entry represents the time, in seconds, since the key was loaded in
|
|
||||||
the bind line. This value is reset when HAProxy is started, reloaded, or
|
|
||||||
restarted.
|
|
||||||
|
|
||||||
Necessitates an OpenSSL version that supports ECH, and HAProxy must be
|
|
||||||
compiled with USE_ECH=1.
|
|
||||||
This command is only supported on a CLI connection running in experimental
|
|
||||||
mode (see "experimental-mode on").
|
|
||||||
|
|
||||||
See also "ech" in the Section 5.1 of the configuration manual.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
$ echo "experimental-mode on; show ssl ech" | socat /tmp/haproxy.sock -
|
|
||||||
***
|
|
||||||
frontend: frontend1
|
|
||||||
|
|
||||||
bind: frontend1/@haproxy.conf:19
|
|
||||||
|
|
||||||
ECH entry: 0 public_name: example.com age: 557 (has private key)
|
|
||||||
[fe0d,94,example.com,[0020,0001,0001],c39285b774bf61c071864181c5292a012b30adaf767e39369a566af05573ef2b,00,00]
|
|
||||||
|
|
||||||
ECH entry: 1 public_name: example.com age: 557 (has private key)
|
|
||||||
[fe0d,ee,example.com,[0020,0001,0001],6572191131b5cabba819f8cacf2d2e06fa0b87b30d9b793644daba7b8866d511,00,00]
|
|
||||||
|
|
||||||
bind: frontend1/@haproxy.conf:20
|
|
||||||
|
|
||||||
ECH entry: 0 public_name: example.com age: 557 (has private key)
|
|
||||||
[fe0d,94,example.com,[0020,0001,0001],c39285b774bf61c071864181c5292a012b30adaf767e39369a566af05573ef2b,00,00]
|
|
||||||
|
|
||||||
ECH entry: 1 public_name: example.com age: 557 (has private key)
|
|
||||||
[fe0d,ee,example.com,[0020,0001,0001],6572191131b5cabba819f8cacf2d2e06fa0b87b30d9b793644daba7b8866d511,00,00]
|
|
||||||
|
|
||||||
$ echo "experimental-mode on; show ssl ech frontend1/@haproxy.conf:19" | socat /tmp/haproxy.sock -
|
|
||||||
***
|
|
||||||
ECH for frontend1/@haproxy.conf:19
|
|
||||||
ECH entry: 0 public_name: example.com age: 786 (has private key)
|
|
||||||
[fe0d,94,example.com,[0020,0001,0001],c39285b774bf61c071864181c5292a012b30adaf767e39369a566af05573ef2b,00,00]
|
|
||||||
|
|
||||||
ECH entry: 1 public_name: example.com age: 786 (has private key)
|
|
||||||
[fe0d,ee,example.com,[0020,0001,0001],6572191131b5cabba819f8cacf2d2e06fa0b87b30d9b793644daba7b8866d511,00,00]
|
|
||||||
|
|
||||||
show ssl jwt
|
|
||||||
Display the list of certificates that can be used for JWT validation.
|
|
||||||
See also "add ssl jwt" and "del ssl jwt" commands.
|
|
||||||
See "jwt" certificate option for more information.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
echo "show ssl jwt" | socat /tmp/sock1 -
|
|
||||||
#filename
|
|
||||||
jwt.pem
|
|
||||||
|
|
||||||
show ssl ocsp-response [[text|base64] <id|path>]
|
show ssl ocsp-response [[text|base64] <id|path>]
|
||||||
Display the IDs of the OCSP tree entries corresponding to all the OCSP
|
Display the IDs of the OCSP tree entries corresponding to all the OCSP
|
||||||
responses used in HAProxy, as well as the corresponding frontend
|
responses used in HAProxy, as well as the corresponding frontend
|
||||||
@ -4277,10 +4103,6 @@ shutdown sessions server <backend>/<server>
|
|||||||
maintenance mode, for instance. Such terminated streams are reported with a
|
maintenance mode, for instance. Such terminated streams are reported with a
|
||||||
'K' flag in the logs.
|
'K' flag in the logs.
|
||||||
|
|
||||||
Backend connections are left in idle state, unless the server is already in
|
|
||||||
maintenance mode, in which case they will be immediately scheduled for
|
|
||||||
deletion.
|
|
||||||
|
|
||||||
trace
|
trace
|
||||||
The "trace" command alone lists the trace sources, their current status, and
|
The "trace" command alone lists the trace sources, their current status, and
|
||||||
their brief descriptions. It is only meant as a menu to enter next levels,
|
their brief descriptions. It is only meant as a menu to enter next levels,
|
||||||
@ -4495,13 +4317,12 @@ wait { -h | <delay> } [<condition> [<args>...]]
|
|||||||
unsatisfied for the whole <delay> duration. The supported conditions are:
|
unsatisfied for the whole <delay> duration. The supported conditions are:
|
||||||
|
|
||||||
- srv-removable <proxy>/<server> : this will wait for the specified server to
|
- srv-removable <proxy>/<server> : this will wait for the specified server to
|
||||||
be removable by the "del server" command, i.e. be in maintenance and no
|
be removable, i.e. be in maintenance and no longer have any connection on
|
||||||
longer have any connection on it (neither active or idle). Some conditions
|
it. Some conditions will never be accepted (e.g. not in maintenance) and
|
||||||
will never be accepted (e.g. not in maintenance) and will cause the report
|
will cause the report of a specific error message indicating what condition
|
||||||
of a specific error message indicating what condition is not met. The
|
is not met. The server might even have been removed in parallel and no
|
||||||
server might even have been removed in parallel and no longer exit. If
|
longer exit. If everything is OK before the delay, a success is returned
|
||||||
everything is OK before the delay, a success is returned and the operation
|
and the operation is terminated.
|
||||||
is terminated.
|
|
||||||
|
|
||||||
The default unit for the delay is milliseconds, though other units are
|
The default unit for the delay is milliseconds, though other units are
|
||||||
accepted if suffixed with the usual timer units (us, ms, s, m, h, d). When
|
accepted if suffixed with the usual timer units (us, ms, s, m, h, d). When
|
||||||
@ -4552,11 +4373,6 @@ Example:
|
|||||||
case the full command ends at the end of line or semi-colon like any regular
|
case the full command ends at the end of line or semi-colon like any regular
|
||||||
command.
|
command.
|
||||||
|
|
||||||
Bugs: the sockpair@ protocol used to implement communication between the
|
|
||||||
master and the worker is known to not be reliable on macOS because of an
|
|
||||||
issue in the macOS sendmsg(2) implementation. A command might end up without
|
|
||||||
response because of that.
|
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
|
|
||||||
$ socat /var/run/haproxy-master.sock readline
|
$ socat /var/run/haproxy-master.sock readline
|
||||||
@ -4623,11 +4439,6 @@ Example:
|
|||||||
command). In this case, the prompt mode of the master socket (interactive,
|
command). In this case, the prompt mode of the master socket (interactive,
|
||||||
prompt, timed) is propagated into the worker process.
|
prompt, timed) is propagated into the worker process.
|
||||||
|
|
||||||
Bugs: the sockpair@ protocol used to implement communication between the
|
|
||||||
master and the worker is known to not be reliable on macOS because of an
|
|
||||||
issue in the macOS sendmsg(2) implementation. A command might end up without
|
|
||||||
response because of that.
|
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
# gracefully close connections and delete a server once idle (wait max 10s)
|
# gracefully close connections and delete a server once idle (wait max 10s)
|
||||||
$ socat -t 11 /var/run/haproxy-master.sock - <<< \
|
$ socat -t 11 /var/run/haproxy-master.sock - <<< \
|
||||||
@ -4751,6 +4562,9 @@ show proc [debug]
|
|||||||
1271 worker 1 0d00h00m00s 2.5-dev13
|
1271 worker 1 0d00h00m00s 2.5-dev13
|
||||||
# old workers
|
# old workers
|
||||||
1233 worker 3 0d00h00m43s 2.0-dev3-6019f6-289
|
1233 worker 3 0d00h00m43s 2.0-dev3-6019f6-289
|
||||||
|
# programs
|
||||||
|
1244 foo 0 0d00h00m00s -
|
||||||
|
1255 bar 0 0d00h00m00s -
|
||||||
|
|
||||||
In this example, the master has been reloaded 5 times but one of the old
|
In this example, the master has been reloaded 5 times but one of the old
|
||||||
worker is still running and survived 3 reloads. You could access the CLI of
|
worker is still running and survived 3 reloads. You could access the CLI of
|
||||||
|
|||||||
@ -28,8 +28,7 @@ Revision history
|
|||||||
string encoding. With contributions from Andriy Palamarchuk
|
string encoding. With contributions from Andriy Palamarchuk
|
||||||
(Amazon.com).
|
(Amazon.com).
|
||||||
2020/03/05 - added the unique ID TLV type (Tim Düsterhus)
|
2020/03/05 - added the unique ID TLV type (Tim Düsterhus)
|
||||||
2025/09/09 - added SSL-related TLVs for key exchange group and signature
|
|
||||||
scheme (Steven Collison)
|
|
||||||
|
|
||||||
1. Background
|
1. Background
|
||||||
|
|
||||||
@ -536,20 +535,18 @@ the information they choose to publish.
|
|||||||
|
|
||||||
The following types have already been registered for the <type> field :
|
The following types have already been registered for the <type> field :
|
||||||
|
|
||||||
#define PP2_TYPE_ALPN 0x01
|
#define PP2_TYPE_ALPN 0x01
|
||||||
#define PP2_TYPE_AUTHORITY 0x02
|
#define PP2_TYPE_AUTHORITY 0x02
|
||||||
#define PP2_TYPE_CRC32C 0x03
|
#define PP2_TYPE_CRC32C 0x03
|
||||||
#define PP2_TYPE_NOOP 0x04
|
#define PP2_TYPE_NOOP 0x04
|
||||||
#define PP2_TYPE_UNIQUE_ID 0x05
|
#define PP2_TYPE_UNIQUE_ID 0x05
|
||||||
#define PP2_TYPE_SSL 0x20
|
#define PP2_TYPE_SSL 0x20
|
||||||
#define PP2_SUBTYPE_SSL_VERSION 0x21
|
#define PP2_SUBTYPE_SSL_VERSION 0x21
|
||||||
#define PP2_SUBTYPE_SSL_CN 0x22
|
#define PP2_SUBTYPE_SSL_CN 0x22
|
||||||
#define PP2_SUBTYPE_SSL_CIPHER 0x23
|
#define PP2_SUBTYPE_SSL_CIPHER 0x23
|
||||||
#define PP2_SUBTYPE_SSL_SIG_ALG 0x24
|
#define PP2_SUBTYPE_SSL_SIG_ALG 0x24
|
||||||
#define PP2_SUBTYPE_SSL_KEY_ALG 0x25
|
#define PP2_SUBTYPE_SSL_KEY_ALG 0x25
|
||||||
#define PP2_SUBTYPE_SSL_GROUP 0x26
|
#define PP2_TYPE_NETNS 0x30
|
||||||
#define PP2_SUBTYPE_SSL_SIG_SCHEME 0x27
|
|
||||||
#define PP2_TYPE_NETNS 0x30
|
|
||||||
|
|
||||||
|
|
||||||
2.2.1 PP2_TYPE_ALPN
|
2.2.1 PP2_TYPE_ALPN
|
||||||
@ -657,21 +654,13 @@ of the used cipher, for example "ECDHE-RSA-AES128-GCM-SHA256".
|
|||||||
The second level TLV PP2_SUBTYPE_SSL_SIG_ALG provides the US-ASCII string name
|
The second level TLV PP2_SUBTYPE_SSL_SIG_ALG provides the US-ASCII string name
|
||||||
of the algorithm used to sign the certificate presented by the frontend when
|
of the algorithm used to sign the certificate presented by the frontend when
|
||||||
the incoming connection was made over an SSL/TLS transport layer, for example
|
the incoming connection was made over an SSL/TLS transport layer, for example
|
||||||
"RSA-SHA256".
|
"SHA256".
|
||||||
|
|
||||||
The second level TLV PP2_SUBTYPE_SSL_KEY_ALG provides the US-ASCII string name
|
The second level TLV PP2_SUBTYPE_SSL_KEY_ALG provides the US-ASCII string name
|
||||||
of the algorithm used to generate the key of the certificate presented by the
|
of the algorithm used to generate the key of the certificate presented by the
|
||||||
frontend when the incoming connection was made over an SSL/TLS transport layer,
|
frontend when the incoming connection was made over an SSL/TLS transport layer,
|
||||||
for example "RSA2048".
|
for example "RSA2048".
|
||||||
|
|
||||||
The second level TLV PP2_SUBTYPE_SSL_GROUP provides the US-ASCII string name of
|
|
||||||
the key exchange algorithm used for the frontend TLS connection, for example
|
|
||||||
"secp256r1".
|
|
||||||
|
|
||||||
The second level TLV PP2_SUBTYPE_SSL_SIG_SCHEME provides the US-ASCII string
|
|
||||||
name of the algorithm the frontend used to sign the ServerKeyExchange or
|
|
||||||
CertificateVerify message, for example "rsa_pss_rsae_sha256".
|
|
||||||
|
|
||||||
In all cases, the string representation (in UTF8) of the Common Name field
|
In all cases, the string representation (in UTF8) of the Common Name field
|
||||||
(OID: 2.5.4.3) of the client certificate's Distinguished Name, is appended
|
(OID: 2.5.4.3) of the client certificate's Distinguished Name, is appended
|
||||||
using the TLV format and the type PP2_SUBTYPE_SSL_CN. E.g. "example.com".
|
using the TLV format and the type PP2_SUBTYPE_SSL_CN. E.g. "example.com".
|
||||||
|
|||||||
@ -3,7 +3,7 @@
|
|||||||
-- Provides a pure lua alternative to tcpcheck mailers.
|
-- Provides a pure lua alternative to tcpcheck mailers.
|
||||||
--
|
--
|
||||||
-- To be loaded using "lua-load" from haproxy configuration to handle
|
-- To be loaded using "lua-load" from haproxy configuration to handle
|
||||||
-- email-alerts directly from lua
|
-- email-alerts directly from lua and disable legacy tcpcheck implementation.
|
||||||
|
|
||||||
local SYSLOG_LEVEL = {
|
local SYSLOG_LEVEL = {
|
||||||
["EMERG"] = 0,
|
["EMERG"] = 0,
|
||||||
@ -364,9 +364,9 @@ local function srv_event_add(event, data)
|
|||||||
mailers_track_server_events(data.reference)
|
mailers_track_server_events(data.reference)
|
||||||
end
|
end
|
||||||
|
|
||||||
-- tell haproxy that we do use the legacy native "mailers" config section
|
|
||||||
-- which allows us to retrieve mailers configuration using Proxy:get_mailers()
|
-- disable legacy email-alerts since email-alerts will be sent from lua directly
|
||||||
core.use_native_mailers_config()
|
core.disable_legacy_mailers()
|
||||||
|
|
||||||
-- event subscriptions are purposely performed in an init function to prevent
|
-- event subscriptions are purposely performed in an init function to prevent
|
||||||
-- email alerts from being generated too early (when process is starting up)
|
-- email alerts from being generated too early (when process is starting up)
|
||||||
|
|||||||
@ -112,7 +112,7 @@ local function rotate_piece(piece, piece_id, px, py, board)
|
|||||||
end
|
end
|
||||||
|
|
||||||
function render(applet, board, piece, piece_id, px, py, score)
|
function render(applet, board, piece, piece_id, px, py, score)
|
||||||
local output = cursor_home
|
local output = clear_screen .. cursor_home
|
||||||
output = output .. game_name .. " - Lines: " .. score .. "\r\n"
|
output = output .. game_name .. " - Lines: " .. score .. "\r\n"
|
||||||
output = output .. "+" .. string.rep("-", board_width * 2) .. "+\r\n"
|
output = output .. "+" .. string.rep("-", board_width * 2) .. "+\r\n"
|
||||||
for y = 1, board_height do
|
for y = 1, board_height do
|
||||||
@ -160,7 +160,6 @@ function handler(applet)
|
|||||||
end
|
end
|
||||||
|
|
||||||
applet:send(cursor_hide)
|
applet:send(cursor_hide)
|
||||||
applet:send(clear_screen)
|
|
||||||
|
|
||||||
-- fall the piece by one line every delay
|
-- fall the piece by one line every delay
|
||||||
local function fall_piece()
|
local function fall_piece()
|
||||||
@ -215,7 +214,7 @@ function handler(applet)
|
|||||||
|
|
||||||
local input = applet:receive(1, delay)
|
local input = applet:receive(1, delay)
|
||||||
if input then
|
if input then
|
||||||
if input == "" or input == "q" then
|
if input == "q" then
|
||||||
game_over = true
|
game_over = true
|
||||||
elseif input == "\27" then
|
elseif input == "\27" then
|
||||||
local a = applet:receive(1, delay)
|
local a = applet:receive(1, delay)
|
||||||
|
|||||||
@ -5,14 +5,13 @@
|
|||||||
#include <haproxy/istbuf.h>
|
#include <haproxy/istbuf.h>
|
||||||
#include <haproxy/openssl-compat.h>
|
#include <haproxy/openssl-compat.h>
|
||||||
|
|
||||||
#define ACME_RETRY 5
|
#define ACME_RETRY 3
|
||||||
|
|
||||||
/* acme section configuration */
|
/* acme section configuration */
|
||||||
struct acme_cfg {
|
struct acme_cfg {
|
||||||
char *filename; /* config filename */
|
char *filename; /* config filename */
|
||||||
int linenum; /* config linenum */
|
int linenum; /* config linenum */
|
||||||
char *name; /* section name */
|
char *name; /* section name */
|
||||||
int reuse_key; /* do we need to renew the private key */
|
|
||||||
char *directory; /* directory URL */
|
char *directory; /* directory URL */
|
||||||
char *map; /* storage for tokens + thumbprint */
|
char *map; /* storage for tokens + thumbprint */
|
||||||
struct {
|
struct {
|
||||||
@ -28,13 +27,11 @@ struct acme_cfg {
|
|||||||
int curves; /* NID of curves */
|
int curves; /* NID of curves */
|
||||||
} key;
|
} key;
|
||||||
char *challenge; /* HTTP-01, DNS-01, etc */
|
char *challenge; /* HTTP-01, DNS-01, etc */
|
||||||
char *vars; /* variables put in the dpapi sink */
|
|
||||||
char *provider; /* DNS provider put in the dpapi sink */
|
|
||||||
struct acme_cfg *next;
|
struct acme_cfg *next;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum acme_st {
|
enum acme_st {
|
||||||
ACME_RESOURCES = 0,
|
ACME_RESSOURCES = 0,
|
||||||
ACME_NEWNONCE,
|
ACME_NEWNONCE,
|
||||||
ACME_CHKACCOUNT,
|
ACME_CHKACCOUNT,
|
||||||
ACME_NEWACCOUNT,
|
ACME_NEWACCOUNT,
|
||||||
@ -54,11 +51,9 @@ enum http_st {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct acme_auth {
|
struct acme_auth {
|
||||||
struct ist dns; /* dns entry */
|
|
||||||
struct ist auth; /* auth URI */
|
struct ist auth; /* auth URI */
|
||||||
struct ist chall; /* challenge URI */
|
struct ist chall; /* challenge URI */
|
||||||
struct ist token; /* token */
|
struct ist token; /* token */
|
||||||
int ready; /* is the challenge ready ? */
|
|
||||||
void *next;
|
void *next;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -75,7 +70,7 @@ struct acme_ctx {
|
|||||||
struct ist newNonce;
|
struct ist newNonce;
|
||||||
struct ist newAccount;
|
struct ist newAccount;
|
||||||
struct ist newOrder;
|
struct ist newOrder;
|
||||||
} resources;
|
} ressources;
|
||||||
struct ist nonce;
|
struct ist nonce;
|
||||||
struct ist kid;
|
struct ist kid;
|
||||||
struct ist order;
|
struct ist order;
|
||||||
@ -84,21 +79,6 @@ struct acme_ctx {
|
|||||||
X509_REQ *req;
|
X509_REQ *req;
|
||||||
struct ist finalize;
|
struct ist finalize;
|
||||||
struct ist certificate;
|
struct ist certificate;
|
||||||
struct task *task;
|
struct mt_list el;
|
||||||
struct ebmb_node node;
|
|
||||||
char name[VAR_ARRAY];
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define ACME_EV_SCHED (1ULL << 0) /* scheduling wakeup */
|
|
||||||
#define ACME_EV_NEW (1ULL << 1) /* new task */
|
|
||||||
#define ACME_EV_TASK (1ULL << 2) /* Task handler */
|
|
||||||
#define ACME_EV_REQ (1ULL << 3) /* HTTP Request */
|
|
||||||
#define ACME_EV_RES (1ULL << 4) /* HTTP Response */
|
|
||||||
|
|
||||||
#define ACME_VERB_CLEAN 1
|
|
||||||
#define ACME_VERB_MINIMAL 2
|
|
||||||
#define ACME_VERB_SIMPLE 3
|
|
||||||
#define ACME_VERB_ADVANCED 4
|
|
||||||
#define ACME_VERB_COMPLETE 5
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@ -4,9 +4,6 @@
|
|||||||
|
|
||||||
#include <haproxy/ssl_ckch-t.h>
|
#include <haproxy/ssl_ckch-t.h>
|
||||||
|
|
||||||
int ckch_conf_acme_init(void *value, char *buf, struct ckch_store *s, int cli, const char *filename, int linenum, char **err);
|
int ckch_conf_acme_init(void *value, char *buf, struct ckch_data *d, int cli, const char *filename, int linenum, char **err);
|
||||||
EVP_PKEY *acme_gen_tmp_pkey();
|
|
||||||
X509 *acme_gen_tmp_x509();
|
|
||||||
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@ -66,8 +66,7 @@ enum act_parse_ret {
|
|||||||
enum act_opt {
|
enum act_opt {
|
||||||
ACT_OPT_NONE = 0x00000000, /* no flag */
|
ACT_OPT_NONE = 0x00000000, /* no flag */
|
||||||
ACT_OPT_FINAL = 0x00000001, /* last call, cannot yield */
|
ACT_OPT_FINAL = 0x00000001, /* last call, cannot yield */
|
||||||
ACT_OPT_FINAL_EARLY = 0x00000002, /* set in addition to ACT_OPT_FINAL if last call occurs earlier than normal due to unexpected IO/error */
|
ACT_OPT_FIRST = 0x00000002, /* first call for this action */
|
||||||
ACT_OPT_FIRST = 0x00000004, /* first call for this action */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Flags used to describe the action. */
|
/* Flags used to describe the action. */
|
||||||
|
|||||||
@ -76,12 +76,12 @@ struct memprof_stats {
|
|||||||
const void *caller;
|
const void *caller;
|
||||||
enum memprof_method method;
|
enum memprof_method method;
|
||||||
/* 4-7 bytes hole here */
|
/* 4-7 bytes hole here */
|
||||||
unsigned long long locked_calls;
|
|
||||||
unsigned long long alloc_calls;
|
unsigned long long alloc_calls;
|
||||||
unsigned long long free_calls;
|
unsigned long long free_calls;
|
||||||
unsigned long long alloc_tot;
|
unsigned long long alloc_tot;
|
||||||
unsigned long long free_tot;
|
unsigned long long free_tot;
|
||||||
void *info; // for pools, ptr to the pool
|
void *info; // for pools, ptr to the pool
|
||||||
|
void *pad; // pad to 64
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -125,8 +125,8 @@ struct activity {
|
|||||||
unsigned int ctr2; // general purposee debug counter
|
unsigned int ctr2; // general purposee debug counter
|
||||||
#endif
|
#endif
|
||||||
char __pad[0]; // unused except to check remaining room
|
char __pad[0]; // unused except to check remaining room
|
||||||
char __end[0] THREAD_ALIGNED();
|
char __end[0] __attribute__((aligned(64))); // align size to 64.
|
||||||
} THREAD_ALIGNED();
|
};
|
||||||
|
|
||||||
/* 256 entries for callers * callees should be highly sufficient (~45 seen usually) */
|
/* 256 entries for callers * callees should be highly sufficient (~45 seen usually) */
|
||||||
#define SCHED_ACT_HASH_BITS 8
|
#define SCHED_ACT_HASH_BITS 8
|
||||||
@ -143,10 +143,7 @@ struct sched_activity {
|
|||||||
uint64_t calls;
|
uint64_t calls;
|
||||||
uint64_t cpu_time;
|
uint64_t cpu_time;
|
||||||
uint64_t lat_time;
|
uint64_t lat_time;
|
||||||
uint64_t lkw_time; /* lock waiting time */
|
};
|
||||||
uint64_t lkd_time; /* locked time */
|
|
||||||
uint64_t mem_time; /* memory ops wait time */
|
|
||||||
} THREAD_ALIGNED();
|
|
||||||
|
|
||||||
#endif /* _HAPROXY_ACTIVITY_T_H */
|
#endif /* _HAPROXY_ACTIVITY_T_H */
|
||||||
|
|
||||||
|
|||||||
@ -47,7 +47,7 @@
|
|||||||
#define APPCTX_FL_ERROR 0x00000080
|
#define APPCTX_FL_ERROR 0x00000080
|
||||||
#define APPCTX_FL_SHUTDOWN 0x00000100 /* applet was shut down (->release() called if any). No more data exchange with SCs */
|
#define APPCTX_FL_SHUTDOWN 0x00000100 /* applet was shut down (->release() called if any). No more data exchange with SCs */
|
||||||
#define APPCTX_FL_WANT_DIE 0x00000200 /* applet was running and requested to die */
|
#define APPCTX_FL_WANT_DIE 0x00000200 /* applet was running and requested to die */
|
||||||
/* unused: 0x00000400 */
|
#define APPCTX_FL_INOUT_BUFS 0x00000400 /* applet uses its own buffers */
|
||||||
#define APPCTX_FL_FASTFWD 0x00000800 /* zero-copy forwarding is in-use, don't fill the outbuf */
|
#define APPCTX_FL_FASTFWD 0x00000800 /* zero-copy forwarding is in-use, don't fill the outbuf */
|
||||||
#define APPCTX_FL_IN_MAYALLOC 0x00001000 /* applet may try again to allocate its inbuf */
|
#define APPCTX_FL_IN_MAYALLOC 0x00001000 /* applet may try again to allocate its inbuf */
|
||||||
#define APPCTX_FL_OUT_MAYALLOC 0x00002000 /* applet may try again to allocate its outbuf */
|
#define APPCTX_FL_OUT_MAYALLOC 0x00002000 /* applet may try again to allocate its outbuf */
|
||||||
@ -73,22 +73,17 @@ static forceinline char *appctx_show_flags(char *buf, size_t len, const char *de
|
|||||||
_(APPCTX_FL_OUTBLK_ALLOC, _(APPCTX_FL_OUTBLK_FULL,
|
_(APPCTX_FL_OUTBLK_ALLOC, _(APPCTX_FL_OUTBLK_FULL,
|
||||||
_(APPCTX_FL_EOI, _(APPCTX_FL_EOS,
|
_(APPCTX_FL_EOI, _(APPCTX_FL_EOS,
|
||||||
_(APPCTX_FL_ERR_PENDING, _(APPCTX_FL_ERROR,
|
_(APPCTX_FL_ERR_PENDING, _(APPCTX_FL_ERROR,
|
||||||
_(APPCTX_FL_SHUTDOWN, _(APPCTX_FL_WANT_DIE,
|
_(APPCTX_FL_SHUTDOWN, _(APPCTX_FL_WANT_DIE, _(APPCTX_FL_INOUT_BUFS,
|
||||||
_(APPCTX_FL_FASTFWD, _(APPCTX_FL_IN_MAYALLOC, _(APPCTX_FL_OUT_MAYALLOC)))))))))))));
|
_(APPCTX_FL_FASTFWD, _(APPCTX_FL_IN_MAYALLOC, _(APPCTX_FL_OUT_MAYALLOC))))))))))))));
|
||||||
/* epilogue */
|
/* epilogue */
|
||||||
_(~0U);
|
_(~0U);
|
||||||
return buf;
|
return buf;
|
||||||
#undef _
|
#undef _
|
||||||
}
|
}
|
||||||
|
|
||||||
#define APPLET_FL_NEW_API 0x00000001 /* Set if the applet is based on the new API (using applet's buffers) */
|
|
||||||
#define APPLET_FL_WARNED 0x00000002 /* Set when warning was already emitted about a legacy applet */
|
|
||||||
#define APPLET_FL_HTX 0x00000004 /* Set if the applet is using HTX buffers */
|
|
||||||
|
|
||||||
/* Applet descriptor */
|
/* Applet descriptor */
|
||||||
struct applet {
|
struct applet {
|
||||||
enum obj_type obj_type; /* object type = OBJ_TYPE_APPLET */
|
enum obj_type obj_type; /* object type = OBJ_TYPE_APPLET */
|
||||||
unsigned int flags; /* APPLET_FL_* flags */
|
|
||||||
/* 3 unused bytes here */
|
/* 3 unused bytes here */
|
||||||
char *name; /* applet's name to report in logs */
|
char *name; /* applet's name to report in logs */
|
||||||
int (*init)(struct appctx *); /* callback to init resources, may be NULL.
|
int (*init)(struct appctx *); /* callback to init resources, may be NULL.
|
||||||
|
|||||||
@ -62,12 +62,6 @@ ssize_t applet_append_line(void *ctx, struct ist v1, struct ist v2, size_t ofs,
|
|||||||
static forceinline void applet_fl_set(struct appctx *appctx, uint on);
|
static forceinline void applet_fl_set(struct appctx *appctx, uint on);
|
||||||
static forceinline void applet_fl_clr(struct appctx *appctx, uint off);
|
static forceinline void applet_fl_clr(struct appctx *appctx, uint off);
|
||||||
|
|
||||||
|
|
||||||
static forceinline uint appctx_app_test(const struct appctx *appctx, uint test)
|
|
||||||
{
|
|
||||||
return (appctx->applet->flags & test);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct appctx *appctx_new_here(struct applet *applet, struct sedesc *sedesc)
|
static inline struct appctx *appctx_new_here(struct applet *applet, struct sedesc *sedesc)
|
||||||
{
|
{
|
||||||
return appctx_new_on(applet, sedesc, tid);
|
return appctx_new_on(applet, sedesc, tid);
|
||||||
@ -122,7 +116,7 @@ static inline int appctx_init(struct appctx *appctx)
|
|||||||
* the appctx will be fully initialized. The session and the stream will
|
* the appctx will be fully initialized. The session and the stream will
|
||||||
* eventually be created. The affinity must be set now !
|
* eventually be created. The affinity must be set now !
|
||||||
*/
|
*/
|
||||||
BUG_ON(appctx->t->tid != -1 && appctx->t->tid != tid);
|
BUG_ON(appctx->t->tid != tid);
|
||||||
task_set_thread(appctx->t, tid);
|
task_set_thread(appctx->t, tid);
|
||||||
|
|
||||||
if (appctx->applet->init)
|
if (appctx->applet->init)
|
||||||
@ -288,156 +282,13 @@ static inline void applet_expect_data(struct appctx *appctx)
|
|||||||
se_fl_clr(appctx->sedesc, SE_FL_EXP_NO_DATA);
|
se_fl_clr(appctx->sedesc, SE_FL_EXP_NO_DATA);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Returns the buffer containing data pushed to the applet by the stream. For
|
|
||||||
* applets using its own buffers it is the appctx input buffer. For legacy
|
|
||||||
* applet, it is the output channel buffer.
|
|
||||||
*/
|
|
||||||
static inline struct buffer *applet_get_inbuf(struct appctx *appctx)
|
|
||||||
{
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
|
||||||
if (applet_fl_test(appctx, APPCTX_FL_INBLK_ALLOC) || !appctx_get_buf(appctx, &appctx->inbuf))
|
|
||||||
return NULL;
|
|
||||||
return &appctx->inbuf;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
return sc_ob(appctx_sc(appctx));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Returns the buffer containing data pushed by the applets to the stream. For
|
|
||||||
* applets using its own buffer it is the appctx output buffer. For legacy
|
|
||||||
* applet, it is the input channel buffer.
|
|
||||||
*/
|
|
||||||
static inline struct buffer *applet_get_outbuf(struct appctx *appctx)
|
|
||||||
{
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
|
||||||
if (applet_fl_test(appctx, APPCTX_FL_OUTBLK_ALLOC|APPCTX_FL_OUTBLK_FULL) ||
|
|
||||||
!appctx_get_buf(appctx, &appctx->outbuf))
|
|
||||||
return NULL;
|
|
||||||
return &appctx->outbuf;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
return sc_ib(appctx_sc(appctx));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Returns the amount of HTX data in the input buffer (see applet_get_inbuf) */
|
|
||||||
static inline size_t applet_htx_input_data(const struct appctx *appctx)
|
|
||||||
{
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
|
|
||||||
return htx_used_space(htxbuf(&appctx->inbuf));
|
|
||||||
else
|
|
||||||
return co_data(sc_oc(appctx_sc(appctx)));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Returns the amount of data in the input buffer (see applet_get_inbuf) */
|
|
||||||
static inline size_t applet_input_data(const struct appctx *appctx)
|
|
||||||
{
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_HTX))
|
|
||||||
return applet_htx_input_data(appctx);
|
|
||||||
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
|
|
||||||
return b_data(&appctx->inbuf);
|
|
||||||
else
|
|
||||||
return co_data(sc_oc(appctx_sc(appctx)));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Returns the amount of HTX data in the output buffer (see applet_get_outbuf) */
|
|
||||||
static inline size_t applet_htx_output_data(const struct appctx *appctx)
|
|
||||||
{
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
|
|
||||||
return htx_used_space(htxbuf(&appctx->outbuf));
|
|
||||||
else
|
|
||||||
return ci_data(sc_ic(appctx_sc(appctx)));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Returns the amount of data in the output buffer (see applet_get_outbuf) */
|
|
||||||
static inline size_t applet_output_data(const struct appctx *appctx)
|
|
||||||
{
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_HTX))
|
|
||||||
return applet_htx_output_data(appctx);
|
|
||||||
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
|
|
||||||
return b_data(&appctx->outbuf);
|
|
||||||
else
|
|
||||||
return ci_data(sc_ic(appctx_sc(appctx)));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Skips <len> bytes from the input buffer (see applet_get_inbuf).
|
|
||||||
*
|
|
||||||
* This is useful when data have been read directly from the buffer. It is
|
|
||||||
* illegal to call this function with <len> causing a wrapping at the end of the
|
|
||||||
* buffer. It's the caller's responsibility to ensure that <len> is never larger
|
|
||||||
* than available output data.
|
|
||||||
*
|
|
||||||
* This function is not HTX aware.
|
|
||||||
*/
|
|
||||||
static inline void applet_skip_input(struct appctx *appctx, size_t len)
|
|
||||||
{
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
|
||||||
b_del(&appctx->inbuf, len);
|
|
||||||
applet_fl_clr(appctx, APPCTX_FL_INBLK_FULL);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
co_skip(sc_oc(appctx_sc(appctx)), len);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Removes all bytes from the input buffer (see applet_get_inbuf).
|
|
||||||
*/
|
|
||||||
static inline void applet_reset_input(struct appctx *appctx)
|
|
||||||
{
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
|
||||||
b_reset(&appctx->inbuf);
|
|
||||||
applet_fl_clr(appctx, APPCTX_FL_INBLK_FULL);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
co_skip(sc_oc(appctx_sc(appctx)), co_data(sc_oc(appctx_sc(appctx))));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Returns the amount of space available at the HTX output buffer (see applet_get_outbuf).
|
|
||||||
*/
|
|
||||||
static inline size_t applet_htx_output_room(const struct appctx *appctx)
|
|
||||||
{
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
|
|
||||||
return htx_free_data_space(htxbuf(&appctx->outbuf));
|
|
||||||
else
|
|
||||||
return channel_recv_max(sc_ic(appctx_sc(appctx)));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Returns the amount of space available at the output buffer (see applet_get_outbuf).
|
|
||||||
*/
|
|
||||||
static inline size_t applet_output_room(const struct appctx *appctx)
|
|
||||||
{
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_HTX))
|
|
||||||
return applet_htx_output_room(appctx);
|
|
||||||
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
|
|
||||||
return b_room(&appctx->outbuf);
|
|
||||||
else
|
|
||||||
return channel_recv_max(sc_ic(appctx_sc(appctx)));
|
|
||||||
}
|
|
||||||
|
|
||||||
/*Indicates that the applet have more data to deliver and it needs more room in
|
|
||||||
* the output buffer to do so (see applet_get_outbuf).
|
|
||||||
*
|
|
||||||
* For applets using its own buffers, <room_needed> is not used and only
|
|
||||||
* <appctx> flags are updated. For legacy applets, the amount of free space
|
|
||||||
* required must be specified. In this last case, it is the caller
|
|
||||||
* responsibility to be sure <room_needed> is valid.
|
|
||||||
*/
|
|
||||||
static inline void applet_need_room(struct appctx *appctx, size_t room_needed)
|
|
||||||
{
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
|
|
||||||
applet_have_more_data(appctx);
|
|
||||||
else
|
|
||||||
sc_need_room(appctx_sc(appctx), room_needed);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Should only be used via wrappers applet_putchk() / applet_putchk_stress(). */
|
/* Should only be used via wrappers applet_putchk() / applet_putchk_stress(). */
|
||||||
static inline int _applet_putchk(struct appctx *appctx, struct buffer *chunk,
|
static inline int _applet_putchk(struct appctx *appctx, struct buffer *chunk,
|
||||||
int stress)
|
int stress)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
|
||||||
if (unlikely(stress) ?
|
if (unlikely(stress) ?
|
||||||
b_data(&appctx->outbuf) :
|
b_data(&appctx->outbuf) :
|
||||||
b_data(chunk) > b_room(&appctx->outbuf)) {
|
b_data(chunk) > b_room(&appctx->outbuf)) {
|
||||||
@ -467,10 +318,9 @@ static inline int _applet_putchk(struct appctx *appctx, struct buffer *chunk,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* writes chunk <chunk> into the applet output buffer (see applet_get_outbuf).
|
/* writes chunk <chunk> into the input channel of the stream attached to this
|
||||||
*
|
* appctx's endpoint, and marks the SC_FL_NEED_ROOM on a channel full error.
|
||||||
* Returns the number of written bytes on success or -1 on error (lake of space,
|
* See ci_putchk() for the list of return codes.
|
||||||
* shutdown, invalid call...)
|
|
||||||
*/
|
*/
|
||||||
static inline int applet_putchk(struct appctx *appctx, struct buffer *chunk)
|
static inline int applet_putchk(struct appctx *appctx, struct buffer *chunk)
|
||||||
{
|
{
|
||||||
@ -483,16 +333,15 @@ static inline int applet_putchk_stress(struct appctx *appctx, struct buffer *chu
|
|||||||
return _applet_putchk(appctx, chunk, 1);
|
return _applet_putchk(appctx, chunk, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* writes <len> chars from <blk> into the applet output buffer (see applet_get_outbuf).
|
/* writes <len> chars from <blk> into the input channel of the stream attached
|
||||||
*
|
* to this appctx's endpoint, and marks the SC_FL_NEED_ROOM on a channel full
|
||||||
* Returns the number of written bytes on success or -1 on error (lake of space,
|
* error. See ci_putblk() for the list of return codes.
|
||||||
* shutdown, invalid call...)
|
|
||||||
*/
|
*/
|
||||||
static inline int applet_putblk(struct appctx *appctx, const char *blk, int len)
|
static inline int applet_putblk(struct appctx *appctx, const char *blk, int len)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
|
||||||
if (len > b_room(&appctx->outbuf)) {
|
if (len > b_room(&appctx->outbuf)) {
|
||||||
applet_fl_set(appctx, APPCTX_FL_OUTBLK_FULL);
|
applet_fl_set(appctx, APPCTX_FL_OUTBLK_FULL);
|
||||||
ret = -1;
|
ret = -1;
|
||||||
@ -518,17 +367,16 @@ static inline int applet_putblk(struct appctx *appctx, const char *blk, int len)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* writes chars from <str> up to the trailing zero (excluded) into the applet
|
/* writes chars from <str> up to the trailing zero (excluded) into the input
|
||||||
* output buffer (see applet_get_outbuf).
|
* channel of the stream attached to this appctx's endpoint, and marks the
|
||||||
*
|
* SC_FL_NEED_ROOM on a channel full error. See ci_putstr() for the list of
|
||||||
* Returns the number of written bytes on success or -1 on error (lake of space,
|
* return codes.
|
||||||
* shutdown, invalid call...)
|
|
||||||
*/
|
*/
|
||||||
static inline int applet_putstr(struct appctx *appctx, const char *str)
|
static inline int applet_putstr(struct appctx *appctx, const char *str)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
|
||||||
int len = strlen(str);
|
int len = strlen(str);
|
||||||
|
|
||||||
if (len > b_room(&appctx->outbuf)) {
|
if (len > b_room(&appctx->outbuf)) {
|
||||||
@ -555,16 +403,15 @@ static inline int applet_putstr(struct appctx *appctx, const char *str)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* writes character <chr> into the applet's output buffer (see applet_get_outbuf).
|
/* writes character <chr> into the input channel of the stream attached to this
|
||||||
*
|
* appctx's endpoint, and marks the SC_FL_NEED_ROOM on a channel full error.
|
||||||
* Returns the number of written bytes on success or -1 on error (lake of space,
|
* See ci_putchr() for the list of return codes.
|
||||||
* shutdown, invalid call...)
|
|
||||||
*/
|
*/
|
||||||
static inline int applet_putchr(struct appctx *appctx, char chr)
|
static inline int applet_putchr(struct appctx *appctx, char chr)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
|
||||||
if (b_full(&appctx->outbuf)) {
|
if (b_full(&appctx->outbuf)) {
|
||||||
applet_fl_set(appctx, APPCTX_FL_OUTBLK_FULL);
|
applet_fl_set(appctx, APPCTX_FL_OUTBLK_FULL);
|
||||||
ret = -1;
|
ret = -1;
|
||||||
@ -591,283 +438,6 @@ static inline int applet_putchr(struct appctx *appctx, char chr)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int applet_may_get(const struct appctx *appctx, size_t len)
|
|
||||||
{
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
|
||||||
if (len > b_data(&appctx->inbuf)) {
|
|
||||||
if (se_fl_test(appctx->sedesc, SE_FL_SHW))
|
|
||||||
return -1;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
const struct stconn *sc = appctx_sc(appctx);
|
|
||||||
|
|
||||||
if ((sc->flags & SC_FL_SHUT_DONE) || len > co_data(sc_oc(sc))) {
|
|
||||||
if (sc->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED))
|
|
||||||
return -1;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
/* Gets one char from the applet input buffer (see appet_get_inbuf),
|
|
||||||
*
|
|
||||||
* Return values :
|
|
||||||
* 1 : number of bytes read, equal to requested size.
|
|
||||||
* =0 : not enough data available. <c> is left undefined.
|
|
||||||
* <0 : no more bytes readable because output is shut.
|
|
||||||
*
|
|
||||||
* The status of the corresponding buffer is not changed. The caller must call
|
|
||||||
* applet_skip_input() to update it.
|
|
||||||
*/
|
|
||||||
static inline int applet_getchar(const struct appctx *appctx, char *c)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = applet_may_get(appctx, 1);
|
|
||||||
if (ret <= 0)
|
|
||||||
return ret;
|
|
||||||
*c = ((appctx_app_test(appctx, APPLET_FL_NEW_API))
|
|
||||||
? *(b_head(&appctx->inbuf))
|
|
||||||
: *(co_head(sc_oc(appctx_sc(appctx)))));
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Copies one full block of data from the applet input buffer (see
|
|
||||||
* appet_get_inbuf).
|
|
||||||
*
|
|
||||||
* <len> bytes are capied, starting at the offset <offset>.
|
|
||||||
*
|
|
||||||
* Return values :
|
|
||||||
* >0 : number of bytes read, equal to requested size.
|
|
||||||
* =0 : not enough data available. <blk> is left undefined.
|
|
||||||
* <0 : no more bytes readable because output is shut.
|
|
||||||
*
|
|
||||||
* The status of the corresponding buffer is not changed. The caller must call
|
|
||||||
* applet_skip_input() to update it.
|
|
||||||
*/
|
|
||||||
static inline int applet_getblk(const struct appctx *appctx, char *blk, int len, int offset)
|
|
||||||
{
|
|
||||||
const struct buffer *buf;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = applet_may_get(appctx, len+offset);
|
|
||||||
if (ret <= 0)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
buf = ((appctx_app_test(appctx, APPLET_FL_NEW_API))
|
|
||||||
? &appctx->inbuf
|
|
||||||
: sc_ob(appctx_sc(appctx)));
|
|
||||||
return b_getblk(buf, blk, len, offset);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Gets one text block representing a word from the applet input buffer (see
|
|
||||||
* appet_get_inbuf).
|
|
||||||
*
|
|
||||||
* The separator is waited for as long as some data can still be received and the
|
|
||||||
* destination is not full. Otherwise, the string may be returned as is, without
|
|
||||||
* the separator.
|
|
||||||
*
|
|
||||||
* Return values :
|
|
||||||
* >0 : number of bytes read. Includes the separator if present before len or end.
|
|
||||||
* =0 : no separator before end found. <str> is left undefined.
|
|
||||||
* <0 : no more bytes readable because output is shut.
|
|
||||||
*
|
|
||||||
* The status of the corresponding buffer is not changed. The caller must call
|
|
||||||
* applet_skip_input() to update it.
|
|
||||||
*/
|
|
||||||
static inline int applet_getword(const struct appctx *appctx, char *str, int len, char sep)
|
|
||||||
{
|
|
||||||
const struct buffer *buf;
|
|
||||||
char *p;
|
|
||||||
size_t input, max = len;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
ret = applet_may_get(appctx, 1);
|
|
||||||
if (ret <= 0)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
|
||||||
buf = &appctx->inbuf;
|
|
||||||
input = b_data(buf);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
struct stconn *sc = appctx_sc(appctx);
|
|
||||||
|
|
||||||
buf = sc_ob(sc);
|
|
||||||
input = co_data(sc_oc(sc));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (max > input) {
|
|
||||||
max = input;
|
|
||||||
str[max-1] = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
p = b_head(buf);
|
|
||||||
ret = 0;
|
|
||||||
while (max) {
|
|
||||||
*str++ = *p;
|
|
||||||
ret++;
|
|
||||||
max--;
|
|
||||||
if (*p == sep)
|
|
||||||
goto out;
|
|
||||||
p = b_next(buf, p);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
|
||||||
if (ret < len && (ret < input || b_room(buf)) &&
|
|
||||||
!se_fl_test(appctx->sedesc, SE_FL_SHW))
|
|
||||||
ret = 0;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
struct stconn *sc = appctx_sc(appctx);
|
|
||||||
|
|
||||||
if (ret < len && (ret < input || channel_may_recv(sc_oc(sc))) &&
|
|
||||||
!(sc->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)))
|
|
||||||
ret = 0;
|
|
||||||
}
|
|
||||||
out:
|
|
||||||
if (max)
|
|
||||||
*str = 0;
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Gets one text block representing a line from the applet input buffer (see
|
|
||||||
* appet_get_inbuf).
|
|
||||||
*
|
|
||||||
* The '\n' is waited for as long as some data can still be received and the
|
|
||||||
* destination is not full. Otherwise, the string may be returned as is, without
|
|
||||||
* the '\n'.
|
|
||||||
*
|
|
||||||
* Return values :
|
|
||||||
* >0 : number of bytes read. Includes the \n if present before len or end.
|
|
||||||
* =0 : no '\n' before end found. <str> is left undefined.
|
|
||||||
* <0 : no more bytes readable because output is shut.
|
|
||||||
*
|
|
||||||
* The status of the corresponding buffer is not changed. The caller must call
|
|
||||||
* applet_skip_input() to update it.
|
|
||||||
*/
|
|
||||||
static inline int applet_getline(const struct appctx *appctx, char *str, int len)
|
|
||||||
{
|
|
||||||
return applet_getword(appctx, str, len, '\n');
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Gets one or two blocks of data at once from the applet input buffer (see appet_get_inbuf),
|
|
||||||
*
|
|
||||||
* Data are not copied.
|
|
||||||
*
|
|
||||||
* Return values :
|
|
||||||
* >0 : number of blocks filled (1 or 2). blk1 is always filled before blk2.
|
|
||||||
* =0 : not enough data available. <blk*> are left undefined.
|
|
||||||
* <0 : no more bytes readable because output is shut.
|
|
||||||
*
|
|
||||||
* The status of the corresponding buffer is not changed. The caller must call
|
|
||||||
* applet_skip_input() to update it.
|
|
||||||
*/
|
|
||||||
static inline int applet_getblk_nc(const struct appctx *appctx, const char **blk1, size_t *len1, const char **blk2, size_t *len2)
|
|
||||||
{
|
|
||||||
const struct buffer *buf;
|
|
||||||
size_t max;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = applet_may_get(appctx, 1);
|
|
||||||
if (ret <= 0)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
|
||||||
buf = &appctx->inbuf;
|
|
||||||
max = b_data(buf);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
struct stconn *sc = appctx_sc(appctx);
|
|
||||||
|
|
||||||
buf = sc_ob(sc);
|
|
||||||
max = co_data(sc_oc(sc));
|
|
||||||
}
|
|
||||||
|
|
||||||
return b_getblk_nc(buf, blk1, len1, blk2, len2, 0, max);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Gets one or two blocks of text representing a word from the applet input
|
|
||||||
* buffer (see appet_get_inbuf).
|
|
||||||
*
|
|
||||||
* Data are not copied. The separator is waited for as long as some data can
|
|
||||||
* still be received and the destination is not full. Otherwise, the string may
|
|
||||||
* be returned as is, without the separator.
|
|
||||||
*
|
|
||||||
* Return values :
|
|
||||||
* >0 : number of bytes read. Includes the separator if present before len or end.
|
|
||||||
* =0 : no separator before end found. <str> is left undefined.
|
|
||||||
* <0 : no more bytes readable because output is shut.
|
|
||||||
*
|
|
||||||
* The status of the corresponding buffer is not changed. The caller must call
|
|
||||||
* applet_skip_input() to update it.
|
|
||||||
*/
|
|
||||||
static inline int applet_getword_nc(const struct appctx *appctx, const char **blk1, size_t *len1, const char **blk2, size_t *len2, char sep)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
size_t l;
|
|
||||||
|
|
||||||
ret = applet_getblk_nc(appctx, blk1, len1, blk2, len2);
|
|
||||||
if (unlikely(ret <= 0))
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
for (l = 0; l < *len1 && (*blk1)[l] != sep; l++);
|
|
||||||
if (l < *len1 && (*blk1)[l] == sep) {
|
|
||||||
*len1 = l + 1;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ret >= 2) {
|
|
||||||
for (l = 0; l < *len2 && (*blk2)[l] != sep; l++);
|
|
||||||
if (l < *len2 && (*blk2)[l] == sep) {
|
|
||||||
*len2 = l + 1;
|
|
||||||
return 2;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If we have found no LF and the buffer is full or the SC is shut, then
|
|
||||||
* the resulting string is made of the concatenation of the pending
|
|
||||||
* blocks (1 or 2).
|
|
||||||
*/
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
|
||||||
if (b_full(&appctx->inbuf) || se_fl_test(appctx->sedesc, SE_FL_SHW))
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
struct stconn *sc = appctx_sc(appctx);
|
|
||||||
|
|
||||||
if (!channel_may_recv(sc_oc(sc)) || sc->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED))
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* No LF yet and not shut yet */
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/* Gets one or two blocks of text representing a line from the applet input
|
|
||||||
* buffer (see appet_get_inbuf).
|
|
||||||
*
|
|
||||||
* Data are not copied. The '\n' is waited for as long as some data can still be
|
|
||||||
* received and the destination is not full. Otherwise, the string may be
|
|
||||||
* returned as is, without the '\n'.
|
|
||||||
*
|
|
||||||
* Return values :
|
|
||||||
* >0 : number of bytes read. Includes the \n if present before len or end.
|
|
||||||
* =0 : no '\n' before end found. <str> is left undefined.
|
|
||||||
* <0 : no more bytes readable because output is shut.
|
|
||||||
*
|
|
||||||
* The status of the corresponding buffer is not changed. The caller must call
|
|
||||||
* applet_skip_input() to update it.
|
|
||||||
*/
|
|
||||||
static inline int applet_getline_nc(const struct appctx *appctx, const char **blk1, size_t *len1, const char **blk2, size_t *len2)
|
|
||||||
{
|
|
||||||
return applet_getword_nc(appctx, blk1, len1, blk2, len2, '\n');
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* _HAPROXY_APPLET_H */
|
#endif /* _HAPROXY_APPLET_H */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
@ -46,8 +46,6 @@ int alloc_bind_address(struct sockaddr_storage **ss,
|
|||||||
struct server *srv, struct proxy *be,
|
struct server *srv, struct proxy *be,
|
||||||
struct stream *s);
|
struct stream *s);
|
||||||
|
|
||||||
int be_reuse_mode(const struct proxy *be, const struct server *srv);
|
|
||||||
|
|
||||||
int64_t be_calculate_conn_hash(struct server *srv, struct stream *strm,
|
int64_t be_calculate_conn_hash(struct server *srv, struct stream *strm,
|
||||||
struct session *sess,
|
struct session *sess,
|
||||||
struct sockaddr_storage *src,
|
struct sockaddr_storage *src,
|
||||||
@ -85,21 +83,10 @@ static inline int be_usable_srv(struct proxy *be)
|
|||||||
return be->srv_bck;
|
return be->srv_bck;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Returns true if <be> backend can be used as target to a switching rules. */
|
|
||||||
static inline int be_is_eligible(const struct proxy *be)
|
|
||||||
{
|
|
||||||
/* A disabled or unpublished backend cannot be selected for traffic.
|
|
||||||
* Note that STOPPED state is ignored as there is a risk of breaking
|
|
||||||
* requests during soft-stop.
|
|
||||||
*/
|
|
||||||
return !(be->flags & (PR_FL_DISABLED|PR_FL_BE_UNPUBLISHED));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* set the time of last session on the backend */
|
/* set the time of last session on the backend */
|
||||||
static inline void be_set_sess_last(struct proxy *be)
|
static inline void be_set_sess_last(struct proxy *be)
|
||||||
{
|
{
|
||||||
if (be->be_counters.shared.tg)
|
be->be_counters.last_sess = ns_to_sec(now_ns);
|
||||||
HA_ATOMIC_STORE(&be->be_counters.shared.tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This function returns non-zero if the designated server will be
|
/* This function returns non-zero if the designated server will be
|
||||||
@ -179,12 +166,6 @@ void set_backend_down(struct proxy *be);
|
|||||||
|
|
||||||
unsigned int gen_hash(const struct proxy* px, const char* key, unsigned long len);
|
unsigned int gen_hash(const struct proxy* px, const char* key, unsigned long len);
|
||||||
|
|
||||||
/* Returns true if connection reuse is supported by <be> backend. */
|
|
||||||
static inline int be_supports_conn_reuse(const struct proxy *be)
|
|
||||||
{
|
|
||||||
return be->mode == PR_MODE_HTTP || be->mode == PR_MODE_SPOP;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* _HAPROXY_BACKEND_H */
|
#endif /* _HAPROXY_BACKEND_H */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
@ -40,23 +40,6 @@
|
|||||||
#define DPRINTF(x...)
|
#define DPRINTF(x...)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Let's make DEBUG_STRESS equal to zero if not set or not valid, or to
|
|
||||||
* 1 if set. This way it is always set and should be easy to use in "if ()"
|
|
||||||
* statements without requiring ifdefs, while remaining compatible with
|
|
||||||
* "#if DEBUG_STRESS > 0". We also force DEBUG_STRICT and DEBUG_STRICT_ACTION
|
|
||||||
* when stressed.
|
|
||||||
*/
|
|
||||||
#if !defined(DEBUG_STRESS)
|
|
||||||
# define DEBUG_STRESS 0
|
|
||||||
#elif DEBUG_STRESS != 0
|
|
||||||
# undef DEBUG_STRESS
|
|
||||||
# define DEBUG_STRESS 1 // make sure comparison >0 always works
|
|
||||||
# undef DEBUG_STRICT
|
|
||||||
# define DEBUG_STRICT 2 // enable BUG_ON
|
|
||||||
# undef DEBUG_STRICT_ACTION
|
|
||||||
# define DEBUG_STRICT_ACTION 3 // enable crash on match
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define DUMP_TRACE() do { extern void ha_backtrace_to_stderr(void); ha_backtrace_to_stderr(); } while (0)
|
#define DUMP_TRACE() do { extern void ha_backtrace_to_stderr(void); ha_backtrace_to_stderr(); } while (0)
|
||||||
|
|
||||||
/* First, let's try to handle some arch-specific crashing methods. We prefer
|
/* First, let's try to handle some arch-specific crashing methods. We prefer
|
||||||
@ -85,7 +68,7 @@
|
|||||||
#else // not x86
|
#else // not x86
|
||||||
|
|
||||||
/* generic implementation, causes a segfault */
|
/* generic implementation, causes a segfault */
|
||||||
static inline __attribute((always_inline,noreturn,unused)) void ha_crash_now(void)
|
static inline __attribute((always_inline)) void ha_crash_now(void)
|
||||||
{
|
{
|
||||||
#if __GNUC_PREREQ__(5, 0)
|
#if __GNUC_PREREQ__(5, 0)
|
||||||
#pragma GCC diagnostic push
|
#pragma GCC diagnostic push
|
||||||
@ -424,20 +407,6 @@ extern __attribute__((__weak__)) struct debug_count __stop_dbg_cnt HA_SECTION_S
|
|||||||
# define COUNT_IF_HOT(cond, ...) DISGUISE(cond)
|
# define COUNT_IF_HOT(cond, ...) DISGUISE(cond)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* turn BUG_ON_STRESS() into a real statement when DEBUG_STRESS is set,
|
|
||||||
* otherwise simply ignore it, at the risk of failing to notice if the
|
|
||||||
* condition would build at all. We don't really care if BUG_ON_STRESS
|
|
||||||
* doesn't always build, because it's meant to be used only in certain
|
|
||||||
* scenarios, possibly requiring certain combinations of options. We
|
|
||||||
* just want to be certain that the condition is not implemented at all
|
|
||||||
* when not used, so as to encourage developers to put a lot of them at
|
|
||||||
* zero cost.
|
|
||||||
*/
|
|
||||||
#if DEBUG_STRESS > 0
|
|
||||||
# define BUG_ON_STRESS(cond, ...) BUG_ON(cond, __VA_ARGS__)
|
|
||||||
#else
|
|
||||||
# define BUG_ON_STRESS(cond, ...) do { } while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* When not optimizing, clang won't remove that code, so only compile it in when optimizing */
|
/* When not optimizing, clang won't remove that code, so only compile it in when optimizing */
|
||||||
#if defined(__GNUC__) && defined(__OPTIMIZE__)
|
#if defined(__GNUC__) && defined(__OPTIMIZE__)
|
||||||
@ -537,7 +506,7 @@ struct mem_stats {
|
|||||||
size_t size;
|
size_t size;
|
||||||
struct ha_caller caller;
|
struct ha_caller caller;
|
||||||
const void *extra; // extra info specific to this call (e.g. pool ptr)
|
const void *extra; // extra info specific to this call (e.g. pool ptr)
|
||||||
} ALIGNED(sizeof(void*));
|
} __attribute__((aligned(sizeof(void*))));
|
||||||
|
|
||||||
#undef calloc
|
#undef calloc
|
||||||
#define calloc(x,y) ({ \
|
#define calloc(x,y) ({ \
|
||||||
@ -651,172 +620,9 @@ struct mem_stats {
|
|||||||
_HA_ATOMIC_ADD(&_.size, __y); \
|
_HA_ATOMIC_ADD(&_.size, __y); \
|
||||||
strdup(__x); \
|
strdup(__x); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#undef ha_aligned_alloc
|
|
||||||
#define ha_aligned_alloc(a,s) ({ \
|
|
||||||
size_t __a = (a); \
|
|
||||||
size_t __s = (s); \
|
|
||||||
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
|
|
||||||
.caller = { \
|
|
||||||
.file = __FILE__, .line = __LINE__, \
|
|
||||||
.what = MEM_STATS_TYPE_MALLOC, \
|
|
||||||
.func = __func__, \
|
|
||||||
}, \
|
|
||||||
}; \
|
|
||||||
HA_WEAK(__start_mem_stats); \
|
|
||||||
HA_WEAK(__stop_mem_stats); \
|
|
||||||
_HA_ATOMIC_INC(&_.calls); \
|
|
||||||
_HA_ATOMIC_ADD(&_.size, __s); \
|
|
||||||
_ha_aligned_alloc(__a, __s); \
|
|
||||||
})
|
|
||||||
|
|
||||||
#undef ha_aligned_zalloc
|
|
||||||
#define ha_aligned_zalloc(a,s) ({ \
|
|
||||||
size_t __a = (a); \
|
|
||||||
size_t __s = (s); \
|
|
||||||
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
|
|
||||||
.caller = { \
|
|
||||||
.file = __FILE__, .line = __LINE__, \
|
|
||||||
.what = MEM_STATS_TYPE_CALLOC, \
|
|
||||||
.func = __func__, \
|
|
||||||
}, \
|
|
||||||
}; \
|
|
||||||
HA_WEAK(__start_mem_stats); \
|
|
||||||
HA_WEAK(__stop_mem_stats); \
|
|
||||||
_HA_ATOMIC_INC(&_.calls); \
|
|
||||||
_HA_ATOMIC_ADD(&_.size, __s); \
|
|
||||||
_ha_aligned_zalloc(__a, __s); \
|
|
||||||
})
|
|
||||||
|
|
||||||
#undef ha_aligned_alloc_safe
|
|
||||||
#define ha_aligned_alloc_safe(a,s) ({ \
|
|
||||||
size_t __a = (a); \
|
|
||||||
size_t __s = (s); \
|
|
||||||
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
|
|
||||||
.caller = { \
|
|
||||||
.file = __FILE__, .line = __LINE__, \
|
|
||||||
.what = MEM_STATS_TYPE_MALLOC, \
|
|
||||||
.func = __func__, \
|
|
||||||
}, \
|
|
||||||
}; \
|
|
||||||
HA_WEAK(__start_mem_stats); \
|
|
||||||
HA_WEAK(__stop_mem_stats); \
|
|
||||||
_HA_ATOMIC_INC(&_.calls); \
|
|
||||||
_HA_ATOMIC_ADD(&_.size, __s); \
|
|
||||||
_ha_aligned_alloc_safe(__a, __s); \
|
|
||||||
})
|
|
||||||
|
|
||||||
#undef ha_aligned_zalloc_safe
|
|
||||||
#define ha_aligned_zalloc_safe(a,s) ({ \
|
|
||||||
size_t __a = (a); \
|
|
||||||
size_t __s = (s); \
|
|
||||||
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
|
|
||||||
.caller = { \
|
|
||||||
.file = __FILE__, .line = __LINE__, \
|
|
||||||
.what = MEM_STATS_TYPE_CALLOC, \
|
|
||||||
.func = __func__, \
|
|
||||||
}, \
|
|
||||||
}; \
|
|
||||||
HA_WEAK(__start_mem_stats); \
|
|
||||||
HA_WEAK(__stop_mem_stats); \
|
|
||||||
_HA_ATOMIC_INC(&_.calls); \
|
|
||||||
_HA_ATOMIC_ADD(&_.size, __s); \
|
|
||||||
_ha_aligned_zalloc_safe(__a, __s); \
|
|
||||||
})
|
|
||||||
|
|
||||||
// Since the type is known, the .extra field will contain its name
|
|
||||||
#undef ha_aligned_alloc_typed
|
|
||||||
#define ha_aligned_alloc_typed(cnt,type) ({ \
|
|
||||||
size_t __a = __alignof__(type); \
|
|
||||||
size_t __s = ((size_t)cnt) * sizeof(type); \
|
|
||||||
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
|
|
||||||
.caller = { \
|
|
||||||
.file = __FILE__, .line = __LINE__, \
|
|
||||||
.what = MEM_STATS_TYPE_MALLOC, \
|
|
||||||
.func = __func__, \
|
|
||||||
}, \
|
|
||||||
.extra = #type, \
|
|
||||||
}; \
|
|
||||||
HA_WEAK(__start_mem_stats); \
|
|
||||||
HA_WEAK(__stop_mem_stats); \
|
|
||||||
_HA_ATOMIC_INC(&_.calls); \
|
|
||||||
_HA_ATOMIC_ADD(&_.size, __s); \
|
|
||||||
(type*)_ha_aligned_alloc(__a, __s); \
|
|
||||||
})
|
|
||||||
|
|
||||||
// Since the type is known, the .extra field will contain its name
|
|
||||||
#undef ha_aligned_zalloc_typed
|
|
||||||
#define ha_aligned_zalloc_typed(cnt,type) ({ \
|
|
||||||
size_t __a = __alignof__(type); \
|
|
||||||
size_t __s = ((size_t)cnt) * sizeof(type); \
|
|
||||||
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
|
|
||||||
.caller = { \
|
|
||||||
.file = __FILE__, .line = __LINE__, \
|
|
||||||
.what = MEM_STATS_TYPE_CALLOC, \
|
|
||||||
.func = __func__, \
|
|
||||||
}, \
|
|
||||||
.extra = #type, \
|
|
||||||
}; \
|
|
||||||
HA_WEAK(__start_mem_stats); \
|
|
||||||
HA_WEAK(__stop_mem_stats); \
|
|
||||||
_HA_ATOMIC_INC(&_.calls); \
|
|
||||||
_HA_ATOMIC_ADD(&_.size, __s); \
|
|
||||||
(type*)_ha_aligned_zalloc_safe(__a, __s); \
|
|
||||||
})
|
|
||||||
|
|
||||||
#undef ha_aligned_free
|
|
||||||
#define ha_aligned_free(x) ({ \
|
|
||||||
typeof(x) __x = (x); \
|
|
||||||
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
|
|
||||||
.caller = { \
|
|
||||||
.file = __FILE__, .line = __LINE__, \
|
|
||||||
.what = MEM_STATS_TYPE_FREE, \
|
|
||||||
.func = __func__, \
|
|
||||||
}, \
|
|
||||||
}; \
|
|
||||||
HA_WEAK(__start_mem_stats); \
|
|
||||||
HA_WEAK(__stop_mem_stats); \
|
|
||||||
if (__builtin_constant_p((x))) { \
|
|
||||||
HA_LINK_ERROR(call_to_ha_aligned_free_attempts_to_free_a_constant); \
|
|
||||||
} \
|
|
||||||
if (__x) \
|
|
||||||
_HA_ATOMIC_INC(&_.calls); \
|
|
||||||
_ha_aligned_free(__x); \
|
|
||||||
})
|
|
||||||
|
|
||||||
#undef ha_aligned_free_size
|
|
||||||
#define ha_aligned_free_size(p,s) ({ \
|
|
||||||
void *__p = (p); size_t __s = (s); \
|
|
||||||
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
|
|
||||||
.caller = { \
|
|
||||||
.file = __FILE__, .line = __LINE__, \
|
|
||||||
.what = MEM_STATS_TYPE_FREE, \
|
|
||||||
.func = __func__, \
|
|
||||||
}, \
|
|
||||||
}; \
|
|
||||||
HA_WEAK(__start_mem_stats); \
|
|
||||||
HA_WEAK(__stop_mem_stats); \
|
|
||||||
if (__builtin_constant_p((p))) { \
|
|
||||||
HA_LINK_ERROR(call_to_ha_aligned_free_attempts_to_free_a_constant); \
|
|
||||||
} \
|
|
||||||
if (__p) { \
|
|
||||||
_HA_ATOMIC_INC(&_.calls); \
|
|
||||||
_HA_ATOMIC_ADD(&_.size, __s); \
|
|
||||||
} \
|
|
||||||
_ha_aligned_free(__p); \
|
|
||||||
})
|
|
||||||
|
|
||||||
#else // DEBUG_MEM_STATS
|
#else // DEBUG_MEM_STATS
|
||||||
|
|
||||||
#define will_free(x, y) do { } while (0)
|
#define will_free(x, y) do { } while (0)
|
||||||
#define ha_aligned_alloc(a,s) _ha_aligned_alloc(a, s)
|
|
||||||
#define ha_aligned_zalloc(a,s) _ha_aligned_zalloc(a, s)
|
|
||||||
#define ha_aligned_alloc_safe(a,s) _ha_aligned_alloc_safe(a, s)
|
|
||||||
#define ha_aligned_zalloc_safe(a,s) _ha_aligned_zalloc_safe(a, s)
|
|
||||||
#define ha_aligned_alloc_typed(cnt,type) ((type*)_ha_aligned_alloc(__alignof__(type), ((size_t)cnt) * sizeof(type)))
|
|
||||||
#define ha_aligned_zalloc_typed(cnt,type) ((type*)_ha_aligned_zalloc(__alignof__(type), ((size_t)cnt) * sizeof(type)))
|
|
||||||
#define ha_aligned_free(p) _ha_aligned_free(p)
|
|
||||||
#define ha_aligned_free_size(p,s) _ha_aligned_free(p)
|
|
||||||
|
|
||||||
#endif /* DEBUG_MEM_STATS*/
|
#endif /* DEBUG_MEM_STATS*/
|
||||||
|
|
||||||
|
|||||||
46
include/haproxy/cbuf-t.h
Normal file
46
include/haproxy/cbuf-t.h
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
/*
|
||||||
|
* include/haprox/cbuf-t.h
|
||||||
|
* This file contains definition for circular buffers.
|
||||||
|
*
|
||||||
|
* Copyright 2021 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
|
||||||
|
*
|
||||||
|
* This library is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU Lesser General Public
|
||||||
|
* License as published by the Free Software Foundation, version 2.1
|
||||||
|
* exclusively.
|
||||||
|
*
|
||||||
|
* This library is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* Lesser General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Lesser General Public
|
||||||
|
* License along with this library; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _HAPROXY_CBUF_T_H
|
||||||
|
#define _HAPROXY_CBUF_T_H
|
||||||
|
#ifdef USE_QUIC
|
||||||
|
#ifndef USE_OPENSSL
|
||||||
|
#error "Must define USE_OPENSSL"
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <stddef.h>
|
||||||
|
#include <haproxy/list-t.h>
|
||||||
|
|
||||||
|
extern struct pool_head *pool_head_cbuf;
|
||||||
|
|
||||||
|
struct cbuf {
|
||||||
|
/* buffer */
|
||||||
|
unsigned char *buf;
|
||||||
|
/* buffer size */
|
||||||
|
size_t sz;
|
||||||
|
/* Writer index */
|
||||||
|
size_t wr;
|
||||||
|
/* Reader index */
|
||||||
|
size_t rd;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif /* _HAPROXY_CBUF_T_H */
|
||||||
136
include/haproxy/cbuf.h
Normal file
136
include/haproxy/cbuf.h
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
/*
|
||||||
|
* include/haprox/cbuf.h
|
||||||
|
* This file contains definitions and prototypes for circular buffers.
|
||||||
|
* Inspired from Linux circular buffers (include/linux/circ_buf.h).
|
||||||
|
*
|
||||||
|
* Copyright 2021 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
|
||||||
|
*
|
||||||
|
* This library is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU Lesser General Public
|
||||||
|
* License as published by the Free Software Foundation, version 2.1
|
||||||
|
* exclusively.
|
||||||
|
*
|
||||||
|
* This library is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* Lesser General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Lesser General Public
|
||||||
|
* License along with this library; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _HAPROXY_CBUF_H
|
||||||
|
#define _HAPROXY_CBUF_H
|
||||||
|
#ifdef USE_QUIC
|
||||||
|
#ifndef USE_OPENSSL
|
||||||
|
#error "Must define USE_OPENSSL"
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <haproxy/atomic.h>
|
||||||
|
#include <haproxy/list.h>
|
||||||
|
#include <haproxy/cbuf-t.h>
|
||||||
|
|
||||||
|
struct cbuf *cbuf_new(unsigned char *buf, size_t sz);
|
||||||
|
void cbuf_free(struct cbuf *cbuf);
|
||||||
|
|
||||||
|
/* Amount of data between <rd> and <wr> */
|
||||||
|
#define CBUF_DATA(wr, rd, size) (((wr) - (rd)) & ((size) - 1))
|
||||||
|
|
||||||
|
/* Return the writer position in <cbuf>.
|
||||||
|
* To be used only by the writer!
|
||||||
|
*/
|
||||||
|
static inline unsigned char *cb_wr(struct cbuf *cbuf)
|
||||||
|
{
|
||||||
|
return cbuf->buf + cbuf->wr;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Reset the reader index.
|
||||||
|
* To be used by a reader!
|
||||||
|
*/
|
||||||
|
static inline void cb_rd_reset(struct cbuf *cbuf)
|
||||||
|
{
|
||||||
|
cbuf->rd = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Reset the writer index.
|
||||||
|
* To be used by a writer!
|
||||||
|
*/
|
||||||
|
static inline void cb_wr_reset(struct cbuf *cbuf)
|
||||||
|
{
|
||||||
|
cbuf->wr = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Increase <cbuf> circular buffer data by <count>.
|
||||||
|
* To be used by a writer!
|
||||||
|
*/
|
||||||
|
static inline void cb_add(struct cbuf *cbuf, size_t count)
|
||||||
|
{
|
||||||
|
cbuf->wr = (cbuf->wr + count) & (cbuf->sz - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Return the reader position in <cbuf>.
|
||||||
|
* To be used only by the reader!
|
||||||
|
*/
|
||||||
|
static inline unsigned char *cb_rd(struct cbuf *cbuf)
|
||||||
|
{
|
||||||
|
return cbuf->buf + cbuf->rd;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Skip <count> byte in <cbuf> circular buffer.
|
||||||
|
* To be used by a reader!
|
||||||
|
*/
|
||||||
|
static inline void cb_del(struct cbuf *cbuf, size_t count)
|
||||||
|
{
|
||||||
|
cbuf->rd = (cbuf->rd + count) & (cbuf->sz - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Return the amount of data left in <cbuf>.
|
||||||
|
* To be used only by the writer!
|
||||||
|
*/
|
||||||
|
static inline size_t cb_data(struct cbuf *cbuf)
|
||||||
|
{
|
||||||
|
size_t rd;
|
||||||
|
|
||||||
|
rd = HA_ATOMIC_LOAD(&cbuf->rd);
|
||||||
|
return CBUF_DATA(cbuf->wr, rd, cbuf->sz);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Return the amount of room left in <cbuf> minus 1 to distinguish
|
||||||
|
* the case where the buffer is full from the case where is is empty
|
||||||
|
* To be used only by the write!
|
||||||
|
*/
|
||||||
|
static inline size_t cb_room(struct cbuf *cbuf)
|
||||||
|
{
|
||||||
|
size_t rd;
|
||||||
|
|
||||||
|
rd = HA_ATOMIC_LOAD(&cbuf->rd);
|
||||||
|
return CBUF_DATA(rd, cbuf->wr + 1, cbuf->sz);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Return the amount of contiguous data left in <cbuf>.
|
||||||
|
* To be used only by the reader!
|
||||||
|
*/
|
||||||
|
static inline size_t cb_contig_data(struct cbuf *cbuf)
|
||||||
|
{
|
||||||
|
size_t end, n;
|
||||||
|
|
||||||
|
end = cbuf->sz - cbuf->rd;
|
||||||
|
n = (HA_ATOMIC_LOAD(&cbuf->wr) + end) & (cbuf->sz - 1);
|
||||||
|
return n < end ? n : end;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Return the amount of contiguous space left in <cbuf>.
|
||||||
|
* To be used only by the writer!
|
||||||
|
*/
|
||||||
|
static inline size_t cb_contig_space(struct cbuf *cbuf)
|
||||||
|
{
|
||||||
|
size_t end, n;
|
||||||
|
|
||||||
|
end = cbuf->sz - 1 - cbuf->wr;
|
||||||
|
n = (HA_ATOMIC_LOAD(&cbuf->rd) + end) & (cbuf->sz - 1);
|
||||||
|
return n <= end ? n : end + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* _HAPROXY_CBUF_H */
|
||||||
@ -54,8 +54,6 @@ enum cond_predicate {
|
|||||||
CFG_PRED_OSSL_VERSION_ATLEAST, // "openssl_version_atleast"
|
CFG_PRED_OSSL_VERSION_ATLEAST, // "openssl_version_atleast"
|
||||||
CFG_PRED_OSSL_VERSION_BEFORE, // "openssl_version_before"
|
CFG_PRED_OSSL_VERSION_BEFORE, // "openssl_version_before"
|
||||||
CFG_PRED_SSLLIB_NAME_STARTSWITH, // "ssllib_name_startswith"
|
CFG_PRED_SSLLIB_NAME_STARTSWITH, // "ssllib_name_startswith"
|
||||||
CFG_PRED_AWSLC_API_ATLEAST, // "awslc_api_atleast"
|
|
||||||
CFG_PRED_AWSLC_API_BEFORE, // "awslc_api_before"
|
|
||||||
CFG_PRED_ENABLED, // "enabled"
|
CFG_PRED_ENABLED, // "enabled"
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@ -140,7 +140,7 @@ int warnif_misplaced_tcp_req_sess(struct proxy *proxy, const char *file, int lin
|
|||||||
int warnif_misplaced_tcp_req_cont(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
|
int warnif_misplaced_tcp_req_cont(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
|
||||||
int warnif_misplaced_tcp_res_cont(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
|
int warnif_misplaced_tcp_res_cont(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
|
||||||
int warnif_misplaced_quic_init(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
|
int warnif_misplaced_quic_init(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
|
||||||
int warnif_cond_conflicts(const struct acl_cond *cond, unsigned int where, char **err);
|
int warnif_cond_conflicts(const struct acl_cond *cond, unsigned int where, const char *file, int line);
|
||||||
int warnif_tcp_http_cond(const struct proxy *px, const struct acl_cond *cond);
|
int warnif_tcp_http_cond(const struct proxy *px, const struct acl_cond *cond);
|
||||||
int too_many_args_idx(int maxarg, int index, char **args, char **msg, int *err_code);
|
int too_many_args_idx(int maxarg, int index, char **args, char **msg, int *err_code);
|
||||||
int too_many_args(int maxarg, char **args, char **msg, int *err_code);
|
int too_many_args(int maxarg, char **args, char **msg, int *err_code);
|
||||||
|
|||||||
@ -204,6 +204,7 @@ struct channel {
|
|||||||
unsigned short last_read; /* 16 lower bits of last read date (max pause=65s) */
|
unsigned short last_read; /* 16 lower bits of last read date (max pause=65s) */
|
||||||
unsigned char xfer_large; /* number of consecutive large xfers */
|
unsigned char xfer_large; /* number of consecutive large xfers */
|
||||||
unsigned char xfer_small; /* number of consecutive small xfers */
|
unsigned char xfer_small; /* number of consecutive small xfers */
|
||||||
|
unsigned long long total; /* total data read */
|
||||||
int analyse_exp; /* expiration date for current analysers (if set) */
|
int analyse_exp; /* expiration date for current analysers (if set) */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@ -323,6 +323,7 @@ static inline void channel_init(struct channel *chn)
|
|||||||
chn->to_forward = 0;
|
chn->to_forward = 0;
|
||||||
chn->last_read = now_ms;
|
chn->last_read = now_ms;
|
||||||
chn->xfer_small = chn->xfer_large = 0;
|
chn->xfer_small = chn->xfer_large = 0;
|
||||||
|
chn->total = 0;
|
||||||
chn->analysers = 0;
|
chn->analysers = 0;
|
||||||
chn->flags = 0;
|
chn->flags = 0;
|
||||||
chn->output = 0;
|
chn->output = 0;
|
||||||
@ -376,6 +377,7 @@ static inline void channel_add_input(struct channel *chn, unsigned int len)
|
|||||||
c_adv(chn, fwd);
|
c_adv(chn, fwd);
|
||||||
}
|
}
|
||||||
/* notify that some data was read */
|
/* notify that some data was read */
|
||||||
|
chn->total += len;
|
||||||
chn->flags |= CF_READ_EVENT;
|
chn->flags |= CF_READ_EVENT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -47,7 +47,6 @@
|
|||||||
#define APPCTX_CLI_ST1_INTER (1 << 3) /* interactive mode (i.e. don't close after 1st cmd) */
|
#define APPCTX_CLI_ST1_INTER (1 << 3) /* interactive mode (i.e. don't close after 1st cmd) */
|
||||||
#define APPCTX_CLI_ST1_PROMPT (1 << 4) /* display prompt */
|
#define APPCTX_CLI_ST1_PROMPT (1 << 4) /* display prompt */
|
||||||
#define APPCTX_CLI_ST1_TIMED (1 << 5) /* display timer in prompt */
|
#define APPCTX_CLI_ST1_TIMED (1 << 5) /* display timer in prompt */
|
||||||
#define APPCTX_CLI_ST1_YIELD (1 << 6) /* forced yield between commands */
|
|
||||||
|
|
||||||
#define CLI_PREFIX_KW_NB 5
|
#define CLI_PREFIX_KW_NB 5
|
||||||
#define CLI_MAX_MATCHES 5
|
#define CLI_MAX_MATCHES 5
|
||||||
|
|||||||
@ -28,7 +28,7 @@
|
|||||||
extern struct timeval start_date; /* the process's start date in wall-clock time */
|
extern struct timeval start_date; /* the process's start date in wall-clock time */
|
||||||
extern struct timeval ready_date; /* date when the process was considered ready */
|
extern struct timeval ready_date; /* date when the process was considered ready */
|
||||||
extern ullong start_time_ns; /* the process's start date in internal monotonic time (ns) */
|
extern ullong start_time_ns; /* the process's start date in internal monotonic time (ns) */
|
||||||
extern volatile ullong *global_now_ns;/* common monotonic date between all threads, in ns (wraps every 585 yr) */
|
extern volatile ullong global_now_ns; /* common monotonic date between all threads, in ns (wraps every 585 yr) */
|
||||||
|
|
||||||
extern THREAD_LOCAL ullong now_ns; /* internal monotonic date derived from real clock, in ns (wraps every 585 yr) */
|
extern THREAD_LOCAL ullong now_ns; /* internal monotonic date derived from real clock, in ns (wraps every 585 yr) */
|
||||||
extern THREAD_LOCAL struct timeval date; /* the real current date (wall-clock time) */
|
extern THREAD_LOCAL struct timeval date; /* the real current date (wall-clock time) */
|
||||||
@ -49,8 +49,6 @@ uint clock_report_idle(void);
|
|||||||
void clock_leaving_poll(int timeout, int interrupted);
|
void clock_leaving_poll(int timeout, int interrupted);
|
||||||
void clock_entering_poll(void);
|
void clock_entering_poll(void);
|
||||||
void clock_adjust_now_offset(void);
|
void clock_adjust_now_offset(void);
|
||||||
void clock_set_now_offset(llong ofs);
|
|
||||||
llong clock_get_now_offset(void);
|
|
||||||
|
|
||||||
static inline void clock_update_date(int max_wait, int interrupted)
|
static inline void clock_update_date(int max_wait, int interrupted)
|
||||||
{
|
{
|
||||||
|
|||||||
@ -94,21 +94,11 @@ typedef struct { } empty_t;
|
|||||||
# endif
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* unsafe ones for use with constant macros needed in initializers */
|
|
||||||
#ifndef _MIN
|
|
||||||
#define _MIN(a, b) ((a < b) ? a : b)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef _MAX
|
|
||||||
#define _MAX(a, b) ((a > b) ? a : b)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* safe versions for use anywhere except in initializers */
|
|
||||||
#ifndef MIN
|
#ifndef MIN
|
||||||
#define MIN(a, b) ({ \
|
#define MIN(a, b) ({ \
|
||||||
typeof(a) _a = (a); \
|
typeof(a) _a = (a); \
|
||||||
typeof(a) _b = (b); \
|
typeof(a) _b = (b); \
|
||||||
_MIN(_a, _b); \
|
((_a < _b) ? _a : _b); \
|
||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -116,15 +106,10 @@ typedef struct { } empty_t;
|
|||||||
#define MAX(a, b) ({ \
|
#define MAX(a, b) ({ \
|
||||||
typeof(a) _a = (a); \
|
typeof(a) _a = (a); \
|
||||||
typeof(a) _b = (b); \
|
typeof(a) _b = (b); \
|
||||||
_MAX(_a, _b); \
|
((_a > _b) ? _a : _b); \
|
||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* always set a _POSIX_VERSION if there isn't any, in order to ease compares */
|
|
||||||
#ifndef _POSIX_VERSION
|
|
||||||
# define _POSIX_VERSION 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* this is for libc5 for example */
|
/* this is for libc5 for example */
|
||||||
#ifndef TCP_NODELAY
|
#ifndef TCP_NODELAY
|
||||||
#define TCP_NODELAY 1
|
#define TCP_NODELAY 1
|
||||||
|
|||||||
@ -31,23 +31,6 @@
|
|||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* DEFVAL() returns either the second argument as-is, or <def> if absent. This
|
|
||||||
* is for use in macros arguments.
|
|
||||||
*/
|
|
||||||
#define DEFVAL(_def,...) _FIRST_ARG(NULL, ##__VA_ARGS__, (_def))
|
|
||||||
|
|
||||||
/* DEFNULL() returns either the argument as-is, or NULL if absent. This is for
|
|
||||||
* use in macros arguments.
|
|
||||||
*/
|
|
||||||
#define DEFNULL(...) DEFVAL(NULL, ##__VA_ARGS__)
|
|
||||||
|
|
||||||
/* DEFZERO() returns either the argument as-is, or 0 if absent. This is for
|
|
||||||
* use in macros arguments.
|
|
||||||
*/
|
|
||||||
#define DEFZERO(...) DEFVAL(0, ##__VA_ARGS__)
|
|
||||||
|
|
||||||
#define _FIRST_ARG(a, b, ...) b
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Gcc before 3.0 needs [0] to declare a variable-size array
|
* Gcc before 3.0 needs [0] to declare a variable-size array
|
||||||
*/
|
*/
|
||||||
@ -367,7 +350,7 @@
|
|||||||
* <type> which has its member <name> stored at address <ptr>.
|
* <type> which has its member <name> stored at address <ptr>.
|
||||||
*/
|
*/
|
||||||
#ifndef container_of
|
#ifndef container_of
|
||||||
#define container_of(ptr, type, name) ((type *)(((char *)(ptr)) - offsetof(type, name)))
|
#define container_of(ptr, type, name) ((type *)(((void *)(ptr)) - ((long)&((type *)0)->name)))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* returns a pointer to the structure of type <type> which has its member <name>
|
/* returns a pointer to the structure of type <type> which has its member <name>
|
||||||
@ -376,7 +359,7 @@
|
|||||||
#ifndef container_of_safe
|
#ifndef container_of_safe
|
||||||
#define container_of_safe(ptr, type, name) \
|
#define container_of_safe(ptr, type, name) \
|
||||||
({ void *__p = (ptr); \
|
({ void *__p = (ptr); \
|
||||||
__p ? (type *)((char *)__p - offsetof(type, name)) : (type *)0; \
|
__p ? (type *)(__p - ((long)&((type *)0)->name)) : (type *)0; \
|
||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -432,13 +415,6 @@
|
|||||||
* for multi_threading, see THREAD_PAD() below. *
|
* for multi_threading, see THREAD_PAD() below. *
|
||||||
\*****************************************************************************/
|
\*****************************************************************************/
|
||||||
|
|
||||||
/* Cache line size for alignment purposes. This value is incorrect for some
|
|
||||||
* Apple CPUs which have 128 bytes cache lines.
|
|
||||||
*/
|
|
||||||
#ifndef CACHELINE_SIZE
|
|
||||||
#define CACHELINE_SIZE 64
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* sets alignment for current field or variable */
|
/* sets alignment for current field or variable */
|
||||||
#ifndef ALIGNED
|
#ifndef ALIGNED
|
||||||
#define ALIGNED(x) __attribute__((aligned(x)))
|
#define ALIGNED(x) __attribute__((aligned(x)))
|
||||||
@ -462,12 +438,12 @@
|
|||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Sets alignment for current field or variable only when threads are enabled.
|
/* sets alignment for current field or variable only when threads are enabled.
|
||||||
* When no parameters are provided, we align to the cache line size.
|
* Typically used to respect cache line alignment to avoid false sharing.
|
||||||
*/
|
*/
|
||||||
#ifndef THREAD_ALIGNED
|
#ifndef THREAD_ALIGNED
|
||||||
#ifdef USE_THREAD
|
#ifdef USE_THREAD
|
||||||
#define THREAD_ALIGNED(...) ALIGNED(DEFVAL(CACHELINE_SIZE, ##__VA_ARGS__))
|
#define THREAD_ALIGNED(x) __attribute__((aligned(x)))
|
||||||
#else
|
#else
|
||||||
#define THREAD_ALIGNED(x)
|
#define THREAD_ALIGNED(x)
|
||||||
#endif
|
#endif
|
||||||
@ -500,44 +476,32 @@
|
|||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Add an optional alignment for next fields in a structure, only when threads
|
/* add an optional alignment for next fields in a structure, only when threads
|
||||||
* are enabled. When no parameters are provided, we align to the cache line size.
|
* are enabled. Typically used to respect cache line alignment to avoid false
|
||||||
|
* sharing.
|
||||||
*/
|
*/
|
||||||
#ifndef THREAD_ALIGN
|
#ifndef THREAD_ALIGN
|
||||||
#ifdef USE_THREAD
|
#ifdef USE_THREAD
|
||||||
#define THREAD_ALIGN(...) union { } ALIGNED(DEFVAL(CACHELINE_SIZE, ##__VA_ARGS__))
|
#define THREAD_ALIGN(x) union { } ALIGNED(x)
|
||||||
#else
|
#else
|
||||||
#define THREAD_ALIGN(x)
|
#define THREAD_ALIGN(x)
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* add padding of the specified size */
|
|
||||||
#define _PAD(x,l) char __pad_##l[x]
|
|
||||||
|
|
||||||
/* add optional padding of the specified size between fields in a structure,
|
/* add optional padding of the specified size between fields in a structure,
|
||||||
* only when threads are enabled. This is used to avoid false sharing of cache
|
* only when threads are enabled. This is used to avoid false sharing of cache
|
||||||
* lines for dynamically allocated structures which cannot guarantee alignment.
|
* lines for dynamically allocated structures which cannot guarantee alignment.
|
||||||
*/
|
*/
|
||||||
#ifndef THREAD_PAD
|
#ifndef THREAD_PAD
|
||||||
# ifdef USE_THREAD
|
# ifdef USE_THREAD
|
||||||
# define _THREAD_PAD(x,l) _PAD(x, l)
|
# define __THREAD_PAD(x,l) char __pad_##l[x]
|
||||||
|
# define _THREAD_PAD(x,l) __THREAD_PAD(x, l)
|
||||||
# define THREAD_PAD(x) _THREAD_PAD(x, __LINE__)
|
# define THREAD_PAD(x) _THREAD_PAD(x, __LINE__)
|
||||||
# else
|
# else
|
||||||
# define THREAD_PAD(x)
|
# define THREAD_PAD(x)
|
||||||
# endif
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* add mandatory padding of the specified size between fields in a structure,
|
|
||||||
* This is used to avoid false sharing of cache lines for dynamically allocated
|
|
||||||
* structures which cannot guarantee alignment, or to ensure that the size of
|
|
||||||
* the struct remains consistent on architectures with different alignment
|
|
||||||
* constraints
|
|
||||||
*/
|
|
||||||
#ifndef ALWAYS_PAD
|
|
||||||
# define _ALWAYS_PAD(x,l) _PAD(x, l)
|
|
||||||
# define ALWAYS_PAD(x) _ALWAYS_PAD(x, __LINE__)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* The THREAD_LOCAL type attribute defines thread-local storage and is defined
|
/* The THREAD_LOCAL type attribute defines thread-local storage and is defined
|
||||||
* to __thread when threads are enabled or empty when disabled.
|
* to __thread when threads are enabled or empty when disabled.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@ -28,7 +28,7 @@
|
|||||||
#include <netinet/ip.h>
|
#include <netinet/ip.h>
|
||||||
#include <netinet/ip6.h>
|
#include <netinet/ip6.h>
|
||||||
|
|
||||||
#include <import/cebtree.h>
|
#include <import/ebtree-t.h>
|
||||||
#include <import/ist.h>
|
#include <import/ist.h>
|
||||||
|
|
||||||
#include <haproxy/api-t.h>
|
#include <haproxy/api-t.h>
|
||||||
@ -68,50 +68,6 @@ struct ssl_sock_ctx;
|
|||||||
* conn_cond_update_polling().
|
* conn_cond_update_polling().
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* A bit of explanation is required for backend connection reuse. A connection
|
|
||||||
* may be shared between multiple streams of the same thread (e.g. h2, fcgi,
|
|
||||||
* quic) and may be reused by subsequent streams of a different thread if it
|
|
||||||
* is totally idle (i.e. not used at all). In order to permit other streams
|
|
||||||
* to find a connection, it has to appear in lists and/or trees that reflect
|
|
||||||
* its current state. If the connection is full and cannot be shared anymore,
|
|
||||||
* it is not in any of such places. The various states are the following:
|
|
||||||
*
|
|
||||||
* - private: a private connection is not visible to other threads. It is
|
|
||||||
* attached via its <idle_list> member to the <conn_list> head of a
|
|
||||||
* sess_priv_conns struct specific to the server, itself attached to the
|
|
||||||
* session. Only other streams of the same session may find this connection.
|
|
||||||
* Such connections include totally idle connections as well as connections
|
|
||||||
* with available slots left. The <hash_node> part is still used to store
|
|
||||||
* the hash key but the tree node part is otherwise left unused.
|
|
||||||
*
|
|
||||||
* - avail: an available connection is a connection that has at least one
|
|
||||||
* stream in use and at least one slot available for a new stream. Such a
|
|
||||||
* connection is indexed in the server's <avail_conns> member based on the
|
|
||||||
* key of the hash_node. It cannot be used by other threads, and is not
|
|
||||||
* present in the server's <idle_conn_list>, so its <idle_list> member is
|
|
||||||
* always empty. Since this connection is in use by a single thread and
|
|
||||||
* cannot be taken over, it doesn't require any locking to enter/leave the
|
|
||||||
* tree.
|
|
||||||
*
|
|
||||||
* - safe: a safe connection is an idle connection that has proven that it
|
|
||||||
* could reliably be reused. Such a connection may be taken over at any
|
|
||||||
* instant by other threads, and must only be manipulated under the server's
|
|
||||||
* <idle_lock>. It is indexed in the server's <safe_conns> member based on
|
|
||||||
* the key of the hash_node. It is attached to the server's <idle_conn_list>
|
|
||||||
* via its <idle_list> member. It may be purged after too long inactivity,
|
|
||||||
* though the thread responsible for doing this will first take it over. Such
|
|
||||||
* a connection has (conn->flags & CO_FL_LIST_MASK) = CO_FL_SAFE_LIST.
|
|
||||||
*
|
|
||||||
* - idle: a purely idle connection has not yet proven that it could reliably
|
|
||||||
* be reused. Such a connection may be taken over at any instant by other
|
|
||||||
* threads, and must only be manipulated under the server's <idle_lock>. It
|
|
||||||
* is indexed in the server's <idle_conns> member based on the key of the
|
|
||||||
* hash_node. It is attached to the server's <idle_conn_list> via its
|
|
||||||
* <idle_list> member. It may be purged after too long inactivity, though the
|
|
||||||
* thread responsible for doing this will first take it over. Such a
|
|
||||||
* connection has (conn->flags & CO_FL_LIST_MASK) = CO_FL_IDLE_LIST.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* flags for use in connection->flags. Please also update the conn_show_flags()
|
/* flags for use in connection->flags. Please also update the conn_show_flags()
|
||||||
* function below in case of changes.
|
* function below in case of changes.
|
||||||
*/
|
*/
|
||||||
@ -144,8 +100,9 @@ enum {
|
|||||||
*/
|
*/
|
||||||
CO_FL_WAIT_ROOM = 0x00000800, /* data sink is full */
|
CO_FL_WAIT_ROOM = 0x00000800, /* data sink is full */
|
||||||
|
|
||||||
CO_FL_WANT_SPLICING = 0x00001000, /* we wish to use splicing on the connection when possible */
|
/* These flags are used to report whether the from/to addresses are set or not */
|
||||||
CO_FL_SSL_NO_CACHED_INFO = 0x00002000, /* Don't use any cached information when creating a new SSL connection */
|
/* unused: 0x00001000 */
|
||||||
|
/* unused: 0x00002000 */
|
||||||
|
|
||||||
CO_FL_EARLY_SSL_HS = 0x00004000, /* We have early data pending, don't start SSL handshake yet */
|
CO_FL_EARLY_SSL_HS = 0x00004000, /* We have early data pending, don't start SSL handshake yet */
|
||||||
CO_FL_EARLY_DATA = 0x00008000, /* At least some of the data are early data */
|
CO_FL_EARLY_DATA = 0x00008000, /* At least some of the data are early data */
|
||||||
@ -212,13 +169,13 @@ static forceinline char *conn_show_flags(char *buf, size_t len, const char *deli
|
|||||||
/* flags */
|
/* flags */
|
||||||
_(CO_FL_SAFE_LIST, _(CO_FL_IDLE_LIST, _(CO_FL_CTRL_READY,
|
_(CO_FL_SAFE_LIST, _(CO_FL_IDLE_LIST, _(CO_FL_CTRL_READY,
|
||||||
_(CO_FL_REVERSED, _(CO_FL_ACT_REVERSING, _(CO_FL_OPT_MARK, _(CO_FL_OPT_TOS,
|
_(CO_FL_REVERSED, _(CO_FL_ACT_REVERSING, _(CO_FL_OPT_MARK, _(CO_FL_OPT_TOS,
|
||||||
_(CO_FL_XPRT_READY, _(CO_FL_WANT_DRAIN, _(CO_FL_WAIT_ROOM, _(CO_FL_SSL_NO_CACHED_INFO, _(CO_FL_EARLY_SSL_HS,
|
_(CO_FL_XPRT_READY, _(CO_FL_WANT_DRAIN, _(CO_FL_WAIT_ROOM, _(CO_FL_EARLY_SSL_HS,
|
||||||
_(CO_FL_EARLY_DATA, _(CO_FL_SOCKS4_SEND, _(CO_FL_SOCKS4_RECV, _(CO_FL_SOCK_RD_SH,
|
_(CO_FL_EARLY_DATA, _(CO_FL_SOCKS4_SEND, _(CO_FL_SOCKS4_RECV, _(CO_FL_SOCK_RD_SH,
|
||||||
_(CO_FL_SOCK_WR_SH, _(CO_FL_ERROR, _(CO_FL_FDLESS, _(CO_FL_WAIT_L4_CONN,
|
_(CO_FL_SOCK_WR_SH, _(CO_FL_ERROR, _(CO_FL_FDLESS, _(CO_FL_WAIT_L4_CONN,
|
||||||
_(CO_FL_WAIT_L6_CONN, _(CO_FL_SEND_PROXY, _(CO_FL_ACCEPT_PROXY, _(CO_FL_ACCEPT_CIP,
|
_(CO_FL_WAIT_L6_CONN, _(CO_FL_SEND_PROXY, _(CO_FL_ACCEPT_PROXY, _(CO_FL_ACCEPT_CIP,
|
||||||
_(CO_FL_SSL_WAIT_HS, _(CO_FL_PRIVATE, _(CO_FL_RCVD_PROXY, _(CO_FL_SESS_IDLE,
|
_(CO_FL_SSL_WAIT_HS, _(CO_FL_PRIVATE, _(CO_FL_RCVD_PROXY, _(CO_FL_SESS_IDLE,
|
||||||
_(CO_FL_XPRT_TRACKED
|
_(CO_FL_XPRT_TRACKED
|
||||||
)))))))))))))))))))))))))))));
|
))))))))))))))))))))))))))));
|
||||||
/* epilogue */
|
/* epilogue */
|
||||||
_(~0U);
|
_(~0U);
|
||||||
return buf;
|
return buf;
|
||||||
@ -329,7 +286,6 @@ enum {
|
|||||||
CO_RFL_KEEP_RECV = 0x0008, /* Instruct the mux to still wait for read events */
|
CO_RFL_KEEP_RECV = 0x0008, /* Instruct the mux to still wait for read events */
|
||||||
CO_RFL_BUF_NOT_STUCK = 0x0010, /* Buffer is not stuck. Optims are possible during data copy */
|
CO_RFL_BUF_NOT_STUCK = 0x0010, /* Buffer is not stuck. Optims are possible during data copy */
|
||||||
CO_RFL_MAY_SPLICE = 0x0020, /* The producer can use the kernel splicing */
|
CO_RFL_MAY_SPLICE = 0x0020, /* The producer can use the kernel splicing */
|
||||||
CO_RFL_TRY_HARDER = 0x0040, /* Try to read till READ0 even on short reads */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* flags that can be passed to xprt->snd_buf() and mux->snd_buf() */
|
/* flags that can be passed to xprt->snd_buf() and mux->snd_buf() */
|
||||||
@ -433,24 +389,14 @@ union conn_handle {
|
|||||||
int fd; /* file descriptor, for regular sockets (CO_FL_FDLESS=0) */
|
int fd; /* file descriptor, for regular sockets (CO_FL_FDLESS=0) */
|
||||||
};
|
};
|
||||||
|
|
||||||
enum xprt_capabilities {
|
|
||||||
XPRT_CAN_SPLICE,
|
|
||||||
};
|
|
||||||
|
|
||||||
enum xprt_splice_cap {
|
|
||||||
XPRT_CONN_CAN_NOT_SPLICE, /* This connection can't, and won't ever be able to splice */
|
|
||||||
XPRT_CONN_COULD_SPLICE, /* This connection can't splice, but may later */
|
|
||||||
XPRT_CONN_CAN_SPLICE /* This connection can splice */
|
|
||||||
};
|
|
||||||
|
|
||||||
/* xprt_ops describes transport-layer operations for a connection. They
|
/* xprt_ops describes transport-layer operations for a connection. They
|
||||||
* generally run over a socket-based control layer, but not always. Some
|
* generally run over a socket-based control layer, but not always. Some
|
||||||
* of them are used for data transfer with the upper layer (rcv_*, snd_*)
|
* of them are used for data transfer with the upper layer (rcv_*, snd_*)
|
||||||
* and the other ones are used to setup and release the transport layer.
|
* and the other ones are used to setup and release the transport layer.
|
||||||
*/
|
*/
|
||||||
struct xprt_ops {
|
struct xprt_ops {
|
||||||
size_t (*rcv_buf)(struct connection *conn, void *xprt_ctx, struct buffer *buf, size_t count, void *msg_control, size_t *msg_controllen, int flags); /* recv callback */
|
size_t (*rcv_buf)(struct connection *conn, void *xprt_ctx, struct buffer *buf, size_t count, int flags); /* recv callback */
|
||||||
size_t (*snd_buf)(struct connection *conn, void *xprt_ctx, const struct buffer *buf, size_t count, void *msg_control, size_t msg_controllen, int flags); /* send callback */
|
size_t (*snd_buf)(struct connection *conn, void *xprt_ctx, const struct buffer *buf, size_t count, int flags); /* send callback */
|
||||||
int (*rcv_pipe)(struct connection *conn, void *xprt_ctx, struct pipe *pipe, unsigned int count); /* recv-to-pipe callback */
|
int (*rcv_pipe)(struct connection *conn, void *xprt_ctx, struct pipe *pipe, unsigned int count); /* recv-to-pipe callback */
|
||||||
int (*snd_pipe)(struct connection *conn, void *xprt_ctx, struct pipe *pipe, unsigned int count); /* send-to-pipe callback */
|
int (*snd_pipe)(struct connection *conn, void *xprt_ctx, struct pipe *pipe, unsigned int count); /* send-to-pipe callback */
|
||||||
void (*shutr)(struct connection *conn, void *xprt_ctx, int); /* shutr function */
|
void (*shutr)(struct connection *conn, void *xprt_ctx, int); /* shutr function */
|
||||||
@ -474,12 +420,6 @@ struct xprt_ops {
|
|||||||
struct ssl_sock_ctx *(*get_ssl_sock_ctx)(struct connection *); /* retrieve the ssl_sock_ctx in use, or NULL if none */
|
struct ssl_sock_ctx *(*get_ssl_sock_ctx)(struct connection *); /* retrieve the ssl_sock_ctx in use, or NULL if none */
|
||||||
int (*show_fd)(struct buffer *, const struct connection *, const void *ctx); /* append some data about xprt for "show fd"; returns non-zero if suspicious */
|
int (*show_fd)(struct buffer *, const struct connection *, const void *ctx); /* append some data about xprt for "show fd"; returns non-zero if suspicious */
|
||||||
void (*dump_info)(struct buffer *, const struct connection *);
|
void (*dump_info)(struct buffer *, const struct connection *);
|
||||||
/*
|
|
||||||
* Returns the value for various capabilities.
|
|
||||||
* Returns 0 if the capability is known, with the actual value in arg,
|
|
||||||
* or -1 otherwise
|
|
||||||
*/
|
|
||||||
int (*get_capability)(struct connection *connection, void *xprt_ctx, enum xprt_capabilities, void *arg);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* mux_ops describes the mux operations, which are to be performed at the
|
/* mux_ops describes the mux operations, which are to be performed at the
|
||||||
@ -509,6 +449,8 @@ struct mux_ops {
|
|||||||
int (*unsubscribe)(struct stconn *sc, int event_type, struct wait_event *es); /* Unsubscribe <es> from events */
|
int (*unsubscribe)(struct stconn *sc, int event_type, struct wait_event *es); /* Unsubscribe <es> from events */
|
||||||
int (*sctl)(struct stconn *sc, enum mux_sctl_type mux_sctl, void *arg); /* Provides information about the mux stream */
|
int (*sctl)(struct stconn *sc, enum mux_sctl_type mux_sctl, void *arg); /* Provides information about the mux stream */
|
||||||
int (*avail_streams)(struct connection *conn); /* Returns the number of streams still available for a connection */
|
int (*avail_streams)(struct connection *conn); /* Returns the number of streams still available for a connection */
|
||||||
|
int (*avail_streams_bidi)(struct connection *conn); /* Returns the number of bidirectional streams still available for a connection */
|
||||||
|
int (*avail_streams_uni)(struct connection *conn); /* Returns the number of unidirectional streams still available for a connection */
|
||||||
int (*used_streams)(struct connection *conn); /* Returns the number of streams in use on a connection. */
|
int (*used_streams)(struct connection *conn); /* Returns the number of streams in use on a connection. */
|
||||||
void (*destroy)(void *ctx); /* Let the mux know one of its users left, so it may have to disappear */
|
void (*destroy)(void *ctx); /* Let the mux know one of its users left, so it may have to disappear */
|
||||||
int (*ctl)(struct connection *conn, enum mux_ctl_type mux_ctl, void *arg); /* Provides information about the mux connection */
|
int (*ctl)(struct connection *conn, enum mux_ctl_type mux_ctl, void *arg); /* Provides information about the mux connection */
|
||||||
@ -567,7 +509,7 @@ enum conn_hash_params_t {
|
|||||||
#define CONN_HASH_PARAMS_TYPE_COUNT 7
|
#define CONN_HASH_PARAMS_TYPE_COUNT 7
|
||||||
|
|
||||||
#define CONN_HASH_PAYLOAD_LEN \
|
#define CONN_HASH_PAYLOAD_LEN \
|
||||||
(((sizeof(((struct conn_hash_node *)0)->key)) * 8) - CONN_HASH_PARAMS_TYPE_COUNT)
|
(((sizeof(((struct conn_hash_node *)0)->node.key)) * 8) - CONN_HASH_PARAMS_TYPE_COUNT)
|
||||||
|
|
||||||
#define CONN_HASH_GET_PAYLOAD(hash) \
|
#define CONN_HASH_GET_PAYLOAD(hash) \
|
||||||
(((hash) << CONN_HASH_PARAMS_TYPE_COUNT) >> CONN_HASH_PARAMS_TYPE_COUNT)
|
(((hash) << CONN_HASH_PARAMS_TYPE_COUNT) >> CONN_HASH_PARAMS_TYPE_COUNT)
|
||||||
@ -599,14 +541,6 @@ struct conn_tlv_list {
|
|||||||
} __attribute__((packed));
|
} __attribute__((packed));
|
||||||
|
|
||||||
|
|
||||||
/* node for backend connection in the idle trees for http-reuse
|
|
||||||
* A connection is identified by a hash generated from its specific parameters
|
|
||||||
*/
|
|
||||||
struct conn_hash_node {
|
|
||||||
struct ceb_node node; /* indexes the hashing key for safe/idle/avail */
|
|
||||||
uint64_t key; /* the hashing key, also used by session-owned */
|
|
||||||
};
|
|
||||||
|
|
||||||
/* This structure describes a connection with its methods and data.
|
/* This structure describes a connection with its methods and data.
|
||||||
* A connection may be performed to proxy or server via a local or remote
|
* A connection may be performed to proxy or server via a local or remote
|
||||||
* socket, and can also be made to an internal applet. It can support
|
* socket, and can also be made to an internal applet. It can support
|
||||||
@ -631,14 +565,12 @@ struct connection {
|
|||||||
/* second cache line */
|
/* second cache line */
|
||||||
struct wait_event *subs; /* Task to wake when awaited events are ready */
|
struct wait_event *subs; /* Task to wake when awaited events are ready */
|
||||||
union {
|
union {
|
||||||
/* Backend connections only */
|
struct list idle_list; /* list element for idle connection in server idle list */
|
||||||
struct {
|
struct mt_list toremove_list; /* list element when idle connection is ready to be purged */
|
||||||
struct mt_list toremove_list; /* list element when idle connection is ready to be purged */
|
};
|
||||||
struct list idle_list; /* list element for idle connection in server idle list */
|
union {
|
||||||
struct list sess_el; /* used by private connections, list elem into session */
|
struct list sess_el; /* used by private backend conns, list elem into session */
|
||||||
};
|
struct list stopping_list; /* used by frontend conns, attach point in mux stopping list */
|
||||||
/* Frontend connections only */
|
|
||||||
struct list stopping_list; /* attach point in mux stopping list */
|
|
||||||
};
|
};
|
||||||
union conn_handle handle; /* connection handle at the socket layer */
|
union conn_handle handle; /* connection handle at the socket layer */
|
||||||
const struct netns_entry *proxy_netns;
|
const struct netns_entry *proxy_netns;
|
||||||
@ -652,7 +584,7 @@ struct connection {
|
|||||||
/* used to identify a backend connection for http-reuse,
|
/* used to identify a backend connection for http-reuse,
|
||||||
* thus only present if conn.target is of type OBJ_TYPE_SERVER
|
* thus only present if conn.target is of type OBJ_TYPE_SERVER
|
||||||
*/
|
*/
|
||||||
struct conn_hash_node hash_node;
|
struct conn_hash_node *hash_node;
|
||||||
|
|
||||||
/* Members used if connection must be reversed. */
|
/* Members used if connection must be reversed. */
|
||||||
struct {
|
struct {
|
||||||
@ -660,18 +592,24 @@ struct connection {
|
|||||||
struct buffer name; /* Only used for passive reverse. Used as SNI when connection added to server idle pool. */
|
struct buffer name; /* Only used for passive reverse. Used as SNI when connection added to server idle pool. */
|
||||||
} reverse;
|
} reverse;
|
||||||
|
|
||||||
uint64_t sni_hash; /* Hash of the SNI. Used to cache the TLS session and try to reuse it. set to 0 is there is no SNI */
|
|
||||||
uint32_t term_evts_log; /* Termination events log: first 4 events reported from fd, handshake or xprt */
|
uint32_t term_evts_log; /* Termination events log: first 4 events reported from fd, handshake or xprt */
|
||||||
uint32_t mark; /* set network mark, if CO_FL_OPT_MARK is set */
|
uint32_t mark; /* set network mark, if CO_FL_OPT_MARK is set */
|
||||||
uint8_t tos; /* set ip tos, if CO_FL_OPT_TOS is set */
|
uint8_t tos; /* set ip tos, if CO_FL_OPT_TOS is set */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* node for backend connection in the idle trees for http-reuse
|
||||||
|
* A connection is identified by a hash generated from its specific parameters
|
||||||
|
*/
|
||||||
|
struct conn_hash_node {
|
||||||
|
struct eb64_node node; /* contains the hashing key */
|
||||||
|
struct connection *conn; /* connection owner of the node */
|
||||||
|
};
|
||||||
|
|
||||||
struct mux_proto_list {
|
struct mux_proto_list {
|
||||||
const struct ist token; /* token name and length. Empty is catch-all */
|
const struct ist token; /* token name and length. Empty is catch-all */
|
||||||
enum proto_proxy_mode mode;
|
enum proto_proxy_mode mode;
|
||||||
enum proto_proxy_side side;
|
enum proto_proxy_side side;
|
||||||
const struct mux_ops *mux;
|
const struct mux_ops *mux;
|
||||||
const char *alpn; /* Default alpn to set by default when the mux protocol is forced (optional, in binary form) */
|
|
||||||
struct list list;
|
struct list list;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -795,7 +733,7 @@ struct idle_conns {
|
|||||||
struct mt_list toremove_conns;
|
struct mt_list toremove_conns;
|
||||||
struct task *cleanup_task;
|
struct task *cleanup_task;
|
||||||
__decl_thread(HA_SPINLOCK_T idle_conns_lock);
|
__decl_thread(HA_SPINLOCK_T idle_conns_lock);
|
||||||
} THREAD_ALIGNED();
|
} THREAD_ALIGNED(64);
|
||||||
|
|
||||||
|
|
||||||
/* Termination events logs:
|
/* Termination events logs:
|
||||||
|
|||||||
@ -39,6 +39,7 @@
|
|||||||
#include <haproxy/task-t.h>
|
#include <haproxy/task-t.h>
|
||||||
|
|
||||||
extern struct pool_head *pool_head_connection;
|
extern struct pool_head *pool_head_connection;
|
||||||
|
extern struct pool_head *pool_head_conn_hash_node;
|
||||||
extern struct pool_head *pool_head_sockaddr;
|
extern struct pool_head *pool_head_sockaddr;
|
||||||
extern struct pool_head *pool_head_pp_tlv_128;
|
extern struct pool_head *pool_head_pp_tlv_128;
|
||||||
extern struct pool_head *pool_head_pp_tlv_256;
|
extern struct pool_head *pool_head_pp_tlv_256;
|
||||||
@ -83,13 +84,14 @@ int conn_install_mux_be(struct connection *conn, void *ctx, struct session *sess
|
|||||||
const struct mux_ops *force_mux_ops);
|
const struct mux_ops *force_mux_ops);
|
||||||
int conn_install_mux_chk(struct connection *conn, void *ctx, struct session *sess);
|
int conn_install_mux_chk(struct connection *conn, void *ctx, struct session *sess);
|
||||||
|
|
||||||
void conn_delete_from_tree(struct connection *conn, int thr);
|
void conn_delete_from_tree(struct connection *conn);
|
||||||
|
|
||||||
void conn_init(struct connection *conn, void *target);
|
void conn_init(struct connection *conn, void *target);
|
||||||
struct connection *conn_new(void *target);
|
struct connection *conn_new(void *target);
|
||||||
void conn_free(struct connection *conn);
|
void conn_free(struct connection *conn);
|
||||||
void conn_release(struct connection *conn);
|
void conn_release(struct connection *conn);
|
||||||
void conn_set_errno(struct connection *conn, int err);
|
void conn_set_errno(struct connection *conn, int err);
|
||||||
|
struct conn_hash_node *conn_alloc_hash_node(struct connection *conn);
|
||||||
struct sockaddr_storage *sockaddr_alloc(struct sockaddr_storage **sap, const struct sockaddr_storage *orig, socklen_t len);
|
struct sockaddr_storage *sockaddr_alloc(struct sockaddr_storage **sap, const struct sockaddr_storage *orig, socklen_t len);
|
||||||
void sockaddr_free(struct sockaddr_storage **sap);
|
void sockaddr_free(struct sockaddr_storage **sap);
|
||||||
|
|
||||||
|
|||||||
@ -25,164 +25,108 @@
|
|||||||
|
|
||||||
#include <haproxy/freq_ctr-t.h>
|
#include <haproxy/freq_ctr-t.h>
|
||||||
|
|
||||||
#define COUNTERS_SHARED_F_NONE 0x0000
|
|
||||||
#define COUNTERS_SHARED_F_LOCAL 0x0001 // shared counter struct is actually process-local
|
|
||||||
|
|
||||||
// common to fe_counters_shared and be_counters_shared
|
|
||||||
#define COUNTERS_SHARED \
|
|
||||||
struct { \
|
|
||||||
uint16_t flags; /* COUNTERS_SHARED_F flags */\
|
|
||||||
};
|
|
||||||
|
|
||||||
/* /!\ any change performed here will impact shm-stats-file mapping because the
|
|
||||||
* struct is embedded in shm_stats_file_object struct, so proceed with caution
|
|
||||||
* and change shm stats file version if needed. Also please always keep this
|
|
||||||
* struct 64b-aligned.
|
|
||||||
*/
|
|
||||||
#define COUNTERS_SHARED_TG \
|
|
||||||
struct { \
|
|
||||||
long long srv_aborts; /* aborted responses during DATA phase caused by the server */\
|
|
||||||
long long cli_aborts; /* aborted responses during DATA phase caused by the client */\
|
|
||||||
long long internal_errors; /* internal processing errors */\
|
|
||||||
long long failed_rewrites; /* failed rewrites (warning) */\
|
|
||||||
long long req_in; /* number of bytes received from the client */\
|
|
||||||
long long req_out; /* number of bytes sent to the server */\
|
|
||||||
long long res_in; /* number of bytes received from the server */\
|
|
||||||
long long res_out; /* number of bytes sent to the client */\
|
|
||||||
long long denied_resp; /* blocked responses because of security concerns */\
|
|
||||||
long long denied_req; /* blocked requests because of security concerns */\
|
|
||||||
long long cum_sess; /* cumulated number of accepted connections */\
|
|
||||||
/* compression counters, index 0 for requests, 1 for responses */\
|
|
||||||
long long comp_in[2]; /* input bytes fed to the compressor */\
|
|
||||||
long long comp_out[2]; /* output bytes emitted by the compressor */\
|
|
||||||
long long comp_byp[2]; /* input bytes that bypassed the compressor (cpu/ram/bw limitation) */\
|
|
||||||
struct freq_ctr sess_per_sec; /* sessions per second on this server (3x32b) */\
|
|
||||||
unsigned int last_state_change; /* last time, when the state was changed (32b) */\
|
|
||||||
/* we're still 64b-aligned here */ \
|
|
||||||
}
|
|
||||||
|
|
||||||
// for convenience (generic pointer)
|
|
||||||
struct counters_shared {
|
|
||||||
COUNTERS_SHARED;
|
|
||||||
struct {
|
|
||||||
COUNTERS_SHARED_TG;
|
|
||||||
} **tg;
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* /!\ any change performed here will impact shm-stats-file mapping because the
|
|
||||||
* struct is embedded in shm_stats_file_object struct, so proceed with caution
|
|
||||||
* and change shm stats file version if needed
|
|
||||||
*/
|
|
||||||
struct fe_counters_shared_tg {
|
|
||||||
COUNTERS_SHARED_TG;
|
|
||||||
|
|
||||||
long long denied_sess; /* denied session requests (tcp-req-sess rules) */
|
|
||||||
long long denied_conn; /* denied connection requests (tcp-req-conn rules) */
|
|
||||||
long long intercepted_req; /* number of monitoring or stats requests intercepted by the frontend */
|
|
||||||
long long cum_conn; /* cumulated number of received connections */
|
|
||||||
struct freq_ctr conn_per_sec; /* received connections per second on the frontend */
|
|
||||||
|
|
||||||
struct freq_ctr req_per_sec; /* HTTP requests per second on the frontend */
|
|
||||||
|
|
||||||
long long cum_sess_ver[3]; /* cumulated number of h1/h2/h3 sessions */
|
|
||||||
union {
|
|
||||||
struct {
|
|
||||||
long long cum_req[4]; /* cumulated number of processed other/h1/h2/h3 requests */
|
|
||||||
long long cache_hits; /* cache hits */
|
|
||||||
long long cache_lookups;/* cache lookups */
|
|
||||||
long long comp_rsp; /* number of compressed responses */
|
|
||||||
long long rsp[6]; /* http response codes */
|
|
||||||
} http;
|
|
||||||
} p; /* protocol-specific stats */
|
|
||||||
|
|
||||||
long long failed_req; /* failed requests (eg: invalid or timeout) */
|
|
||||||
} ALIGNED(8);
|
|
||||||
|
|
||||||
struct fe_counters_shared {
|
|
||||||
COUNTERS_SHARED;
|
|
||||||
struct fe_counters_shared_tg **tg;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* counters used by listeners and frontends */
|
/* counters used by listeners and frontends */
|
||||||
struct fe_counters {
|
struct fe_counters {
|
||||||
struct fe_counters_shared shared; /* shared counters */
|
|
||||||
unsigned int conn_max; /* max # of active sessions */
|
unsigned int conn_max; /* max # of active sessions */
|
||||||
|
long long cum_conn; /* cumulated number of received connections */
|
||||||
|
long long cum_sess; /* cumulated number of accepted connections */
|
||||||
|
long long cum_sess_ver[3]; /* cumulated number of h1/h2/h3 sessions */
|
||||||
|
|
||||||
unsigned int cps_max; /* maximum of new connections received per second */
|
unsigned int cps_max; /* maximum of new connections received per second */
|
||||||
unsigned int sps_max; /* maximum of new connections accepted per second (sessions) */
|
unsigned int sps_max; /* maximum of new connections accepted per second (sessions) */
|
||||||
struct freq_ctr _sess_per_sec; /* sessions per second on this frontend, used to compute sps_max (internal use only) */
|
|
||||||
struct freq_ctr _conn_per_sec; /* connections per second on this frontend, used to compute cps_max (internal use only) */
|
long long bytes_in; /* number of bytes transferred from the client to the server */
|
||||||
|
long long bytes_out; /* number of bytes transferred from the server to the client */
|
||||||
|
|
||||||
|
/* compression counters, index 0 for requests, 1 for responses */
|
||||||
|
long long comp_in[2]; /* input bytes fed to the compressor */
|
||||||
|
long long comp_out[2]; /* output bytes emitted by the compressor */
|
||||||
|
long long comp_byp[2]; /* input bytes that bypassed the compressor (cpu/ram/bw limitation) */
|
||||||
|
|
||||||
|
long long denied_req; /* blocked requests because of security concerns */
|
||||||
|
long long denied_resp; /* blocked responses because of security concerns */
|
||||||
|
long long failed_req; /* failed requests (eg: invalid or timeout) */
|
||||||
|
long long denied_conn; /* denied connection requests (tcp-req-conn rules) */
|
||||||
|
long long denied_sess; /* denied session requests (tcp-req-sess rules) */
|
||||||
|
long long failed_rewrites; /* failed rewrites (warning) */
|
||||||
|
long long internal_errors; /* internal processing errors */
|
||||||
|
|
||||||
|
long long cli_aborts; /* aborted responses during DATA phase caused by the client */
|
||||||
|
long long srv_aborts; /* aborted responses during DATA phase caused by the server */
|
||||||
|
long long intercepted_req; /* number of monitoring or stats requests intercepted by the frontend */
|
||||||
|
|
||||||
union {
|
union {
|
||||||
struct {
|
struct {
|
||||||
unsigned int rps_max; /* maximum of new HTTP requests second observed */
|
long long cum_req[4]; /* cumulated number of processed other/h1/h2/h3 requests */
|
||||||
struct freq_ctr _req_per_sec; /* HTTP requests per second on the frontend, only used to compute rps_max */
|
|
||||||
} http;
|
|
||||||
} p; /* protocol-specific stats */
|
|
||||||
};
|
|
||||||
|
|
||||||
/* /!\ any change performed here will impact shm-stats-file mapping because the
|
|
||||||
* struct is embedded in shm_stats_file_object struct, so proceed with caution
|
|
||||||
* and change shm stats file version if needed. Pay attention to keeping the
|
|
||||||
* struct 64b-aligned.
|
|
||||||
*/
|
|
||||||
struct be_counters_shared_tg {
|
|
||||||
COUNTERS_SHARED_TG;
|
|
||||||
|
|
||||||
long long cum_lbconn; /* cumulated number of sessions processed by load balancing (BE only) */
|
|
||||||
|
|
||||||
long long connect; /* number of connection establishment attempts */
|
|
||||||
long long reuse; /* number of connection reuses */
|
|
||||||
|
|
||||||
long long failed_checks, failed_hana; /* failed health checks and health analyses for servers */
|
|
||||||
long long down_trans; /* up->down transitions */
|
|
||||||
|
|
||||||
union {
|
|
||||||
struct {
|
|
||||||
long long cum_req; /* cumulated number of processed HTTP requests */
|
|
||||||
|
|
||||||
long long cache_hits; /* cache hits */
|
|
||||||
long long cache_lookups;/* cache lookups */
|
|
||||||
long long comp_rsp; /* number of compressed responses */
|
long long comp_rsp; /* number of compressed responses */
|
||||||
|
unsigned int rps_max; /* maximum of new HTTP requests second observed */
|
||||||
long long rsp[6]; /* http response codes */
|
long long rsp[6]; /* http response codes */
|
||||||
|
long long cache_lookups;/* cache lookups */
|
||||||
|
long long cache_hits; /* cache hits */
|
||||||
} http;
|
} http;
|
||||||
} p; /* protocol-specific stats */
|
} p; /* protocol-specific stats */
|
||||||
|
|
||||||
long long redispatches; /* retried and redispatched connections (BE only) */
|
struct freq_ctr sess_per_sec; /* sessions per second on this server */
|
||||||
long long retries; /* retried and redispatched connections (BE only) */
|
struct freq_ctr req_per_sec; /* HTTP requests per second on the frontend */
|
||||||
long long failed_resp; /* failed responses (BE only) */
|
struct freq_ctr conn_per_sec; /* received connections per second on the frontend */
|
||||||
long long failed_conns; /* failed connect() attempts (BE only) */
|
|
||||||
unsigned int last_sess; /* last session time */
|
|
||||||
/* 32-bit hole here */
|
|
||||||
} ALIGNED(8);
|
|
||||||
|
|
||||||
struct be_counters_shared {
|
unsigned long last_change; /* last time, when the state was changed */
|
||||||
COUNTERS_SHARED;
|
|
||||||
struct be_counters_shared_tg **tg;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* counters used by servers and backends */
|
/* counters used by servers and backends */
|
||||||
struct be_counters {
|
struct be_counters {
|
||||||
struct be_counters_shared shared; /* shared counters */
|
|
||||||
unsigned int conn_max; /* max # of active sessions */
|
unsigned int conn_max; /* max # of active sessions */
|
||||||
|
long long cum_sess; /* cumulated number of accepted connections */
|
||||||
|
long long cum_lbconn; /* cumulated number of sessions processed by load balancing (BE only) */
|
||||||
|
|
||||||
unsigned int cps_max; /* maximum of new connections received per second */
|
unsigned int cps_max; /* maximum of new connections received per second */
|
||||||
unsigned int sps_max; /* maximum of new connections accepted per second (sessions) */
|
unsigned int sps_max; /* maximum of new connections accepted per second (sessions) */
|
||||||
unsigned int nbpend_max; /* max number of pending connections with no server assigned yet */
|
unsigned int nbpend_max; /* max number of pending connections with no server assigned yet */
|
||||||
unsigned int cur_sess_max; /* max number of currently active sessions */
|
unsigned int cur_sess_max; /* max number of currently active sessions */
|
||||||
|
|
||||||
struct freq_ctr _sess_per_sec; /* sessions per second on this frontend, used to compute sps_max (internal use only) */
|
long long bytes_in; /* number of bytes transferred from the client to the server */
|
||||||
|
long long bytes_out; /* number of bytes transferred from the server to the client */
|
||||||
|
|
||||||
|
/* compression counters, index 0 for requests, 1 for responses */
|
||||||
|
long long comp_in[2]; /* input bytes fed to the compressor */
|
||||||
|
long long comp_out[2]; /* output bytes emitted by the compressor */
|
||||||
|
long long comp_byp[2]; /* input bytes that bypassed the compressor (cpu/ram/bw limitation) */
|
||||||
|
|
||||||
|
long long denied_req; /* blocked requests because of security concerns */
|
||||||
|
long long denied_resp; /* blocked responses because of security concerns */
|
||||||
|
|
||||||
|
long long connect; /* number of connection establishment attempts */
|
||||||
|
long long reuse; /* number of connection reuses */
|
||||||
|
long long failed_conns; /* failed connect() attempts (BE only) */
|
||||||
|
long long failed_resp; /* failed responses (BE only) */
|
||||||
|
long long cli_aborts; /* aborted responses during DATA phase caused by the client */
|
||||||
|
long long srv_aborts; /* aborted responses during DATA phase caused by the server */
|
||||||
|
long long retries; /* retried and redispatched connections (BE only) */
|
||||||
|
long long redispatches; /* retried and redispatched connections (BE only) */
|
||||||
|
long long failed_rewrites; /* failed rewrites (warning) */
|
||||||
|
long long internal_errors; /* internal processing errors */
|
||||||
|
|
||||||
|
long long failed_checks, failed_hana; /* failed health checks and health analyses for servers */
|
||||||
|
long long down_trans; /* up->down transitions */
|
||||||
|
|
||||||
unsigned int q_time, c_time, d_time, t_time; /* sums of conn_time, queue_time, data_time, total_time */
|
unsigned int q_time, c_time, d_time, t_time; /* sums of conn_time, queue_time, data_time, total_time */
|
||||||
unsigned int qtime_max, ctime_max, dtime_max, ttime_max; /* maximum of conn_time, queue_time, data_time, total_time observed */
|
unsigned int qtime_max, ctime_max, dtime_max, ttime_max; /* maximum of conn_time, queue_time, data_time, total_time observed */
|
||||||
|
|
||||||
union {
|
union {
|
||||||
struct {
|
struct {
|
||||||
|
long long cum_req; /* cumulated number of processed HTTP requests */
|
||||||
|
long long comp_rsp; /* number of compressed responses */
|
||||||
unsigned int rps_max; /* maximum of new HTTP requests second observed */
|
unsigned int rps_max; /* maximum of new HTTP requests second observed */
|
||||||
|
long long rsp[6]; /* http response codes */
|
||||||
|
long long cache_lookups;/* cache lookups */
|
||||||
|
long long cache_hits; /* cache hits */
|
||||||
} http;
|
} http;
|
||||||
} p; /* protocol-specific stats */
|
} p; /* protocol-specific stats */
|
||||||
|
|
||||||
|
struct freq_ctr sess_per_sec; /* sessions per second on this server */
|
||||||
|
|
||||||
|
unsigned long last_sess; /* last session time */
|
||||||
|
unsigned long last_change; /* last time, when the state was changed */
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* _HAPROXY_COUNTERS_T_H */
|
#endif /* _HAPROXY_COUNTERS_T_H */
|
||||||
|
|||||||
@ -1,104 +0,0 @@
|
|||||||
/*
|
|
||||||
* include/haproxy/counters.h
|
|
||||||
* objects counters management
|
|
||||||
*
|
|
||||||
* Copyright 2025 HAProxy Technologies
|
|
||||||
*
|
|
||||||
* This library is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU Lesser General Public
|
|
||||||
* License as published by the Free Software Foundation, version 2.1
|
|
||||||
* exclusively.
|
|
||||||
*
|
|
||||||
* This library is distributed in the hope that it will be useful,
|
|
||||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
||||||
* Lesser General Public License for more details.
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU Lesser General Public
|
|
||||||
* License along with this library; if not, write to the Free Software
|
|
||||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _HAPROXY_COUNTERS_H
|
|
||||||
# define _HAPROXY_COUNTERS_H
|
|
||||||
|
|
||||||
#include <stddef.h>
|
|
||||||
|
|
||||||
#include <haproxy/counters-t.h>
|
|
||||||
#include <haproxy/guid-t.h>
|
|
||||||
|
|
||||||
int counters_fe_shared_prepare(struct fe_counters_shared *counters, const struct guid_node *guid, char **errmsg);
|
|
||||||
int counters_be_shared_prepare(struct be_counters_shared *counters, const struct guid_node *guid, char **errmsg);
|
|
||||||
|
|
||||||
void counters_fe_shared_drop(struct fe_counters_shared *counters);
|
|
||||||
void counters_be_shared_drop(struct be_counters_shared *counters);
|
|
||||||
|
|
||||||
/* time oriented helper: get last time (relative to current time) on a given
|
|
||||||
* <scounter> array, for <elem> member (one member per thread group) which is
|
|
||||||
* assumed to be unsigned long type.
|
|
||||||
*
|
|
||||||
* wrapping is handled by taking the lowest diff between now and last counter.
|
|
||||||
* But since wrapping is expected once every ~136 years (starting 01/01/1970),
|
|
||||||
* perhaps it's not worth the extra CPU cost.. let's see.
|
|
||||||
*/
|
|
||||||
#define COUNTERS_SHARED_LAST_OFFSET(scounters, type, offset) \
|
|
||||||
({ \
|
|
||||||
unsigned long last = 0; \
|
|
||||||
unsigned long now_seconds = ns_to_sec(now_ns); \
|
|
||||||
int it; \
|
|
||||||
\
|
|
||||||
if (scounters) \
|
|
||||||
last = HA_ATOMIC_LOAD((type *)((char *)scounters[0] + offset));\
|
|
||||||
for (it = 1; (it < global.nbtgroups && scounters); it++) { \
|
|
||||||
unsigned long cur = HA_ATOMIC_LOAD((type *)((char *)scounters[it] + offset));\
|
|
||||||
if ((now_seconds - cur) < (now_seconds - last)) \
|
|
||||||
last = cur; \
|
|
||||||
} \
|
|
||||||
last; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define COUNTERS_SHARED_LAST(scounters, elem) \
|
|
||||||
({ \
|
|
||||||
int offset = offsetof(typeof(**scounters), elem); \
|
|
||||||
unsigned long last = COUNTERS_SHARED_LAST_OFFSET(scounters, typeof(scounters[0]->elem), offset); \
|
|
||||||
\
|
|
||||||
last; \
|
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
/* generic unsigned integer addition for all <elem> members from
|
|
||||||
* <scounters> array (one member per thread group)
|
|
||||||
* <rfunc> is function taking pointer as parameter to read from the memory
|
|
||||||
* location pointed to scounters[it].elem
|
|
||||||
*/
|
|
||||||
#define COUNTERS_SHARED_TOTAL_OFFSET(scounters, type, offset, rfunc) \
|
|
||||||
({ \
|
|
||||||
uint64_t __ret = 0; \
|
|
||||||
int it; \
|
|
||||||
\
|
|
||||||
for (it = 0; (it < global.nbtgroups && scounters); it++) \
|
|
||||||
__ret += rfunc((type *)((char *)scounters[it] + offset)); \
|
|
||||||
__ret; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define COUNTERS_SHARED_TOTAL(scounters, elem, rfunc) \
|
|
||||||
({ \
|
|
||||||
int offset = offsetof(typeof(**scounters), elem); \
|
|
||||||
uint64_t __ret = COUNTERS_SHARED_TOTAL_OFFSET(scounters, typeof(scounters[0]->elem), offset, rfunc);\
|
|
||||||
\
|
|
||||||
__ret; \
|
|
||||||
})
|
|
||||||
/* same as COUNTERS_SHARED_TOTAL but with <rfunc> taking 2 extras arguments:
|
|
||||||
* <arg1> and <arg2>
|
|
||||||
*/
|
|
||||||
#define COUNTERS_SHARED_TOTAL_ARG2(scounters, elem, rfunc, arg1, arg2) \
|
|
||||||
({ \
|
|
||||||
uint64_t __ret = 0; \
|
|
||||||
int it; \
|
|
||||||
\
|
|
||||||
for (it = 0; (it < global.nbtgroups && scounters); it++) \
|
|
||||||
__ret += rfunc(&scounters[it]->elem, arg1, arg2); \
|
|
||||||
__ret; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#endif /* _HAPROXY_COUNTERS_H */
|
|
||||||
@ -2,7 +2,6 @@
|
|||||||
#define _HAPROXY_CPU_TOPO_H
|
#define _HAPROXY_CPU_TOPO_H
|
||||||
|
|
||||||
#include <haproxy/api.h>
|
#include <haproxy/api.h>
|
||||||
#include <haproxy/chunk.h>
|
|
||||||
#include <haproxy/cpuset-t.h>
|
#include <haproxy/cpuset-t.h>
|
||||||
#include <haproxy/cpu_topo-t.h>
|
#include <haproxy/cpu_topo-t.h>
|
||||||
|
|
||||||
@ -56,12 +55,7 @@ int cpu_map_configured(void);
|
|||||||
/* Dump the CPU topology <topo> for up to cpu_topo_maxcpus CPUs for
|
/* Dump the CPU topology <topo> for up to cpu_topo_maxcpus CPUs for
|
||||||
* debugging purposes. Offline CPUs are skipped.
|
* debugging purposes. Offline CPUs are skipped.
|
||||||
*/
|
*/
|
||||||
void cpu_topo_debug(const struct ha_cpu_topo *topo);
|
void cpu_dump_topology(const struct ha_cpu_topo *topo);
|
||||||
|
|
||||||
/* Dump the summary of CPU topology <topo>, i.e. clusters info and thread-cpu
|
|
||||||
* bindings.
|
|
||||||
*/
|
|
||||||
void cpu_topo_dump_summary(const struct ha_cpu_topo *topo, struct buffer *trash);
|
|
||||||
|
|
||||||
/* re-order a CPU topology array by locality to help form groups. */
|
/* re-order a CPU topology array by locality to help form groups. */
|
||||||
void cpu_reorder_by_locality(struct ha_cpu_topo *topo, int entries);
|
void cpu_reorder_by_locality(struct ha_cpu_topo *topo, int entries);
|
||||||
|
|||||||
@ -44,7 +44,7 @@
|
|||||||
* doesn't engage us too far.
|
* doesn't engage us too far.
|
||||||
*/
|
*/
|
||||||
#ifndef MAX_TGROUPS
|
#ifndef MAX_TGROUPS
|
||||||
#define MAX_TGROUPS 32
|
#define MAX_TGROUPS 16
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define MAX_THREADS_PER_GROUP __WORDSIZE
|
#define MAX_THREADS_PER_GROUP __WORDSIZE
|
||||||
@ -53,7 +53,7 @@
|
|||||||
* long bits if more tgroups are enabled.
|
* long bits if more tgroups are enabled.
|
||||||
*/
|
*/
|
||||||
#ifndef MAX_THREADS
|
#ifndef MAX_THREADS
|
||||||
#define MAX_THREADS ((((MAX_TGROUPS) > 1) ? 16 : 1) * (MAX_THREADS_PER_GROUP))
|
#define MAX_THREADS ((((MAX_TGROUPS) > 1) ? 4 : 1) * (MAX_THREADS_PER_GROUP))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif // USE_THREAD
|
#endif // USE_THREAD
|
||||||
@ -115,10 +115,6 @@
|
|||||||
// via standard input.
|
// via standard input.
|
||||||
#define MAX_CFG_SIZE 10485760
|
#define MAX_CFG_SIZE 10485760
|
||||||
|
|
||||||
// may be handy for some system config files, where we just need to find
|
|
||||||
// some specific values (read with fgets)
|
|
||||||
#define MAX_LINES_TO_READ 32
|
|
||||||
|
|
||||||
// max # args on a configuration line
|
// max # args on a configuration line
|
||||||
#define MAX_LINE_ARGS 64
|
#define MAX_LINE_ARGS 64
|
||||||
|
|
||||||
@ -353,11 +349,6 @@
|
|||||||
#define SRV_CHK_INTER_THRES 1000
|
#define SRV_CHK_INTER_THRES 1000
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* INET6 connectivity caching interval (in ms) */
|
|
||||||
#ifndef INET6_CONNECTIVITY_CACHE_TIME
|
|
||||||
#define INET6_CONNECTIVITY_CACHE_TIME 30000
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Specifies the string used to report the version and release date on the
|
/* Specifies the string used to report the version and release date on the
|
||||||
* statistics page. May be defined to the empty string ("") to permanently
|
* statistics page. May be defined to the empty string ("") to permanently
|
||||||
* disable the feature.
|
* disable the feature.
|
||||||
@ -366,13 +357,6 @@
|
|||||||
#define STATS_VERSION_STRING " version " HAPROXY_VERSION ", released " HAPROXY_DATE
|
#define STATS_VERSION_STRING " version " HAPROXY_VERSION ", released " HAPROXY_DATE
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* specifies the default max number of object per thread group that the shm stats file
|
|
||||||
* will be able to handle
|
|
||||||
*/
|
|
||||||
#ifndef SHM_STATS_FILE_MAX_OBJECTS
|
|
||||||
#define SHM_STATS_FILE_MAX_OBJECTS 2000
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* This is the default statistics URI */
|
/* This is the default statistics URI */
|
||||||
#ifdef CONFIG_STATS_DEFAULT_URI
|
#ifdef CONFIG_STATS_DEFAULT_URI
|
||||||
#define STATS_DEFAULT_URI CONFIG_STATS_DEFAULT_URI
|
#define STATS_DEFAULT_URI CONFIG_STATS_DEFAULT_URI
|
||||||
@ -670,8 +654,4 @@
|
|||||||
#define QUIC_MAX_TX_MEM 0
|
#define QUIC_MAX_TX_MEM 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef STKTABLE_MAX_UPDATES_AT_ONCE
|
|
||||||
#define STKTABLE_MAX_UPDATES_AT_ONCE 100
|
|
||||||
#endif /* STKTABLE_MAX_UPDATES_AT_ONCE */
|
|
||||||
|
|
||||||
#endif /* _HAPROXY_DEFAULTS_H */
|
#endif /* _HAPROXY_DEFAULTS_H */
|
||||||
|
|||||||
@ -1,13 +0,0 @@
|
|||||||
/* SPDX-License-Identifier: LGPL-2.1-or-later */
|
|
||||||
#ifndef _HAPROXY_ECH_H
|
|
||||||
# define _HAPROXY_ECH_H
|
|
||||||
#ifdef USE_ECH
|
|
||||||
|
|
||||||
#include <openssl/ech.h>
|
|
||||||
|
|
||||||
int load_echkeys(SSL_CTX *ctx, char *dirname, int *loaded);
|
|
||||||
int conn_get_ech_status(struct connection *conn, struct buffer *buf);
|
|
||||||
int conn_get_ech_outer_sni(struct connection *conn, struct buffer *buf);
|
|
||||||
|
|
||||||
# endif /* USE_ECH */
|
|
||||||
#endif /* _HAPROXY_ECH_H */
|
|
||||||
@ -202,7 +202,7 @@ struct fdtab {
|
|||||||
#ifdef DEBUG_FD
|
#ifdef DEBUG_FD
|
||||||
unsigned int event_count; /* number of events reported */
|
unsigned int event_count; /* number of events reported */
|
||||||
#endif
|
#endif
|
||||||
} THREAD_ALIGNED();
|
} THREAD_ALIGNED(64);
|
||||||
|
|
||||||
/* polled mask, one bit per thread and per direction for each FD */
|
/* polled mask, one bit per thread and per direction for each FD */
|
||||||
struct polled_mask {
|
struct polled_mask {
|
||||||
|
|||||||
@ -499,7 +499,6 @@ static inline long fd_clr_running(int fd)
|
|||||||
static inline void fd_insert(int fd, void *owner, void (*iocb)(int fd), int tgid, unsigned long thread_mask)
|
static inline void fd_insert(int fd, void *owner, void (*iocb)(int fd), int tgid, unsigned long thread_mask)
|
||||||
{
|
{
|
||||||
extern void sock_conn_iocb(int);
|
extern void sock_conn_iocb(int);
|
||||||
struct tgroup_info *tginfo = &ha_tgroup_info[tgid - 1];
|
|
||||||
int newstate;
|
int newstate;
|
||||||
|
|
||||||
/* conn_fd_handler should support edge-triggered FDs */
|
/* conn_fd_handler should support edge-triggered FDs */
|
||||||
@ -529,7 +528,7 @@ static inline void fd_insert(int fd, void *owner, void (*iocb)(int fd), int tgid
|
|||||||
BUG_ON(fdtab[fd].state != 0);
|
BUG_ON(fdtab[fd].state != 0);
|
||||||
BUG_ON(tgid < 1 || tgid > MAX_TGROUPS);
|
BUG_ON(tgid < 1 || tgid > MAX_TGROUPS);
|
||||||
|
|
||||||
thread_mask &= tginfo->threads_enabled;
|
thread_mask &= tg->threads_enabled;
|
||||||
BUG_ON(thread_mask == 0);
|
BUG_ON(thread_mask == 0);
|
||||||
|
|
||||||
fd_claim_tgid(fd, tgid);
|
fd_claim_tgid(fd, tgid);
|
||||||
|
|||||||
@ -31,7 +31,7 @@
|
|||||||
ullong _freq_ctr_total_from_values(uint period, int pend, uint tick, ullong past, ullong curr);
|
ullong _freq_ctr_total_from_values(uint period, int pend, uint tick, ullong past, ullong curr);
|
||||||
ullong freq_ctr_total(const struct freq_ctr *ctr, uint period, int pend);
|
ullong freq_ctr_total(const struct freq_ctr *ctr, uint period, int pend);
|
||||||
ullong freq_ctr_total_estimate(const struct freq_ctr *ctr, uint period, int pend);
|
ullong freq_ctr_total_estimate(const struct freq_ctr *ctr, uint period, int pend);
|
||||||
uint freq_ctr_overshoot_period(const struct freq_ctr *ctr, uint period, uint freq);
|
int freq_ctr_overshoot_period(const struct freq_ctr *ctr, uint period, uint freq);
|
||||||
uint update_freq_ctr_period_slow(struct freq_ctr *ctr, uint period, uint inc);
|
uint update_freq_ctr_period_slow(struct freq_ctr *ctr, uint period, uint inc);
|
||||||
|
|
||||||
/* Only usable during single threaded startup phase. */
|
/* Only usable during single threaded startup phase. */
|
||||||
|
|||||||
@ -80,12 +80,11 @@
|
|||||||
#define GTUNE_DISABLE_ACTIVE_CLOSE (1<<22)
|
#define GTUNE_DISABLE_ACTIVE_CLOSE (1<<22)
|
||||||
#define GTUNE_QUICK_EXIT (1<<23)
|
#define GTUNE_QUICK_EXIT (1<<23)
|
||||||
/* (1<<24) unused */
|
/* (1<<24) unused */
|
||||||
/* (1<<25) unused */
|
#define GTUNE_NO_QUIC (1<<25)
|
||||||
#define GTUNE_USE_FAST_FWD (1<<26)
|
#define GTUNE_USE_FAST_FWD (1<<26)
|
||||||
#define GTUNE_LISTENER_MQ_FAIR (1<<27)
|
#define GTUNE_LISTENER_MQ_FAIR (1<<27)
|
||||||
#define GTUNE_LISTENER_MQ_OPT (1<<28)
|
#define GTUNE_LISTENER_MQ_OPT (1<<28)
|
||||||
#define GTUNE_LISTENER_MQ_ANY (GTUNE_LISTENER_MQ_FAIR | GTUNE_LISTENER_MQ_OPT)
|
#define GTUNE_LISTENER_MQ_ANY (GTUNE_LISTENER_MQ_FAIR | GTUNE_LISTENER_MQ_OPT)
|
||||||
#define GTUNE_NO_KTLS (1<<29)
|
|
||||||
|
|
||||||
/* subsystem-specific debugging options for tune.debug */
|
/* subsystem-specific debugging options for tune.debug */
|
||||||
#define GDBG_CPU_AFFINITY (1U<< 0)
|
#define GDBG_CPU_AFFINITY (1U<< 0)
|
||||||
@ -167,7 +166,6 @@ struct global {
|
|||||||
char *server_state_base; /* path to a directory where server state files can be found */
|
char *server_state_base; /* path to a directory where server state files can be found */
|
||||||
char *server_state_file; /* path to the file where server states are loaded from */
|
char *server_state_file; /* path to the file where server states are loaded from */
|
||||||
char *stats_file; /* path to stats-file */
|
char *stats_file; /* path to stats-file */
|
||||||
char *shm_stats_file; /* path to shm-stats-file */
|
|
||||||
unsigned char cluster_secret[16]; /* 128 bits of an SHA1 digest of a secret defined as ASCII string */
|
unsigned char cluster_secret[16]; /* 128 bits of an SHA1 digest of a secret defined as ASCII string */
|
||||||
struct {
|
struct {
|
||||||
int maxpollevents; /* max number of poll events at once */
|
int maxpollevents; /* max number of poll events at once */
|
||||||
@ -199,7 +197,6 @@ struct global {
|
|||||||
int pattern_cache; /* max number of entries in the pattern cache. */
|
int pattern_cache; /* max number of entries in the pattern cache. */
|
||||||
int sslcachesize; /* SSL cache size in session, defaults to 20000 */
|
int sslcachesize; /* SSL cache size in session, defaults to 20000 */
|
||||||
int comp_maxlevel; /* max HTTP compression level */
|
int comp_maxlevel; /* max HTTP compression level */
|
||||||
uint glitch_kill_maxidle; /* have glitches kill only below this level of idle */
|
|
||||||
int pool_low_ratio; /* max ratio of FDs used before we stop using new idle connections */
|
int pool_low_ratio; /* max ratio of FDs used before we stop using new idle connections */
|
||||||
int pool_high_ratio; /* max ratio of FDs used before we start killing idle connections when creating new connections */
|
int pool_high_ratio; /* max ratio of FDs used before we start killing idle connections when creating new connections */
|
||||||
int pool_low_count; /* max number of opened fd before we stop using new idle connections */
|
int pool_low_count; /* max number of opened fd before we stop using new idle connections */
|
||||||
@ -214,6 +211,20 @@ struct global {
|
|||||||
uint max_checks_per_thread; /* if >0, no more than this concurrent checks per thread */
|
uint max_checks_per_thread; /* if >0, no more than this concurrent checks per thread */
|
||||||
uint ring_queues; /* if >0, #ring queues, otherwise equals #thread groups */
|
uint ring_queues; /* if >0, #ring queues, otherwise equals #thread groups */
|
||||||
enum threadgroup_takeover tg_takeover; /* Policy for threadgroup takeover */
|
enum threadgroup_takeover tg_takeover; /* Policy for threadgroup takeover */
|
||||||
|
#ifdef USE_QUIC
|
||||||
|
unsigned int quic_backend_max_idle_timeout;
|
||||||
|
unsigned int quic_frontend_max_idle_timeout;
|
||||||
|
unsigned int quic_frontend_glitches_threshold;
|
||||||
|
unsigned int quic_frontend_max_data;
|
||||||
|
unsigned int quic_frontend_max_streams_bidi;
|
||||||
|
uint64_t quic_frontend_max_tx_mem;
|
||||||
|
size_t quic_frontend_max_window_size;
|
||||||
|
unsigned int quic_frontend_stream_data_ratio;
|
||||||
|
unsigned int quic_retry_threshold;
|
||||||
|
unsigned int quic_reorder_ratio;
|
||||||
|
unsigned int quic_max_frame_loss;
|
||||||
|
unsigned int quic_cubic_loss_tol;
|
||||||
|
#endif /* USE_QUIC */
|
||||||
} tune;
|
} tune;
|
||||||
struct {
|
struct {
|
||||||
char *prefix; /* path prefix of unix bind socket */
|
char *prefix; /* path prefix of unix bind socket */
|
||||||
@ -233,7 +244,6 @@ struct global {
|
|||||||
* than 255 arguments
|
* than 255 arguments
|
||||||
*/
|
*/
|
||||||
/* 2-bytes hole */
|
/* 2-bytes hole */
|
||||||
int est_fd_usage; /* rough estimate of reserved FDs (listeners, pollers etc) */
|
|
||||||
int cfg_curr_line; /* line number currently being parsed */
|
int cfg_curr_line; /* line number currently being parsed */
|
||||||
const char *cfg_curr_file; /* config file currently being parsed or NULL */
|
const char *cfg_curr_file; /* config file currently being parsed or NULL */
|
||||||
char *cfg_curr_section; /* config section name currently being parsed or NULL */
|
char *cfg_curr_section; /* config section name currently being parsed or NULL */
|
||||||
@ -261,7 +271,6 @@ struct global {
|
|||||||
unsigned int req_count; /* request counter (HTTP or TCP session) for logs and unique_id */
|
unsigned int req_count; /* request counter (HTTP or TCP session) for logs and unique_id */
|
||||||
int last_checks;
|
int last_checks;
|
||||||
uint32_t anon_key;
|
uint32_t anon_key;
|
||||||
int maxthrpertgroup; /* Maximum number of threads per thread group */
|
|
||||||
|
|
||||||
/* leave this at the end to make sure we don't share this cache line by accident */
|
/* leave this at the end to make sure we don't share this cache line by accident */
|
||||||
ALWAYS_ALIGN(64);
|
ALWAYS_ALIGN(64);
|
||||||
|
|||||||
@ -53,7 +53,6 @@ extern char *progname;
|
|||||||
extern char **old_argv;
|
extern char **old_argv;
|
||||||
extern const char *old_unixsocket;
|
extern const char *old_unixsocket;
|
||||||
extern int daemon_fd[2];
|
extern int daemon_fd[2];
|
||||||
extern int devnullfd;
|
|
||||||
|
|
||||||
struct proxy;
|
struct proxy;
|
||||||
struct server;
|
struct server;
|
||||||
|
|||||||
@ -1,15 +1,14 @@
|
|||||||
#ifndef _HAPROXY_GUID_T_H
|
#ifndef _HAPROXY_GUID_T_H
|
||||||
#define _HAPROXY_GUID_T_H
|
#define _HAPROXY_GUID_T_H
|
||||||
|
|
||||||
#include <import/cebtree.h>
|
#include <import/ebtree-t.h>
|
||||||
#include <haproxy/obj_type-t.h>
|
#include <haproxy/obj_type-t.h>
|
||||||
|
|
||||||
/* Maximum GUID size excluding final '\0' */
|
/* Maximum GUID size excluding final '\0' */
|
||||||
#define GUID_MAX_LEN 127
|
#define GUID_MAX_LEN 127
|
||||||
|
|
||||||
struct guid_node {
|
struct guid_node {
|
||||||
struct ceb_node node; /* attach point into GUID global tree */
|
struct ebpt_node node; /* attach point into GUID global tree */
|
||||||
char *key; /* the key itself */
|
|
||||||
enum obj_type *obj_type; /* pointer to GUID obj owner */
|
enum obj_type *obj_type; /* pointer to GUID obj owner */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@ -12,16 +12,7 @@ int guid_insert(enum obj_type *obj_type, const char *uid, char **errmsg);
|
|||||||
void guid_remove(struct guid_node *guid);
|
void guid_remove(struct guid_node *guid);
|
||||||
struct guid_node *guid_lookup(const char *uid);
|
struct guid_node *guid_lookup(const char *uid);
|
||||||
|
|
||||||
/* Returns the actual text key associated to <guid> node or NULL if not
|
|
||||||
* set
|
|
||||||
*/
|
|
||||||
static inline const char *guid_get(const struct guid_node *guid)
|
|
||||||
{
|
|
||||||
return guid->key;
|
|
||||||
}
|
|
||||||
|
|
||||||
int guid_is_valid_fmt(const char *uid, char **errmsg);
|
int guid_is_valid_fmt(const char *uid, char **errmsg);
|
||||||
char *guid_name(const struct guid_node *guid);
|
char *guid_name(const struct guid_node *guid);
|
||||||
int guid_count(void);
|
|
||||||
|
|
||||||
#endif /* _HAPROXY_GUID_H */
|
#endif /* _HAPROXY_GUID_H */
|
||||||
|
|||||||
@ -65,7 +65,6 @@ int h1_format_htx_reqline(const struct htx_sl *sl, struct buffer *chk);
|
|||||||
int h1_format_htx_stline(const struct htx_sl *sl, struct buffer *chk);
|
int h1_format_htx_stline(const struct htx_sl *sl, struct buffer *chk);
|
||||||
int h1_format_htx_hdr(const struct ist n, const struct ist v, struct buffer *chk);
|
int h1_format_htx_hdr(const struct ist n, const struct ist v, struct buffer *chk);
|
||||||
int h1_format_htx_data(const struct ist data, struct buffer *chk, int chunked);
|
int h1_format_htx_data(const struct ist data, struct buffer *chk, int chunked);
|
||||||
int h1_format_htx_msg(const struct htx *htx, struct buffer *outbuf);
|
|
||||||
|
|
||||||
#endif /* _HAPROXY_H1_HTX_H */
|
#endif /* _HAPROXY_H1_HTX_H */
|
||||||
|
|
||||||
|
|||||||
@ -72,8 +72,8 @@ struct stream;
|
|||||||
#define HLUA_NOYIELD 0x00000020
|
#define HLUA_NOYIELD 0x00000020
|
||||||
#define HLUA_BUSY 0x00000040
|
#define HLUA_BUSY 0x00000040
|
||||||
|
|
||||||
#define HLUA_F_AS_STRING 0x01
|
#define HLUA_F_AS_STRING 0x01
|
||||||
#define HLUA_F_MAY_USE_CHANNELS_DATA 0x02
|
#define HLUA_F_MAY_USE_HTTP 0x02
|
||||||
|
|
||||||
/* HLUA TXN flags */
|
/* HLUA TXN flags */
|
||||||
#define HLUA_TXN_NOTERM 0x00000001
|
#define HLUA_TXN_NOTERM 0x00000001
|
||||||
@ -255,7 +255,6 @@ struct hlua_patref_iterator_context {
|
|||||||
struct hlua_patref *ref;
|
struct hlua_patref *ref;
|
||||||
struct bref bref; /* back-reference from the pat_ref_elt being accessed
|
struct bref bref; /* back-reference from the pat_ref_elt being accessed
|
||||||
* during listing */
|
* during listing */
|
||||||
struct pat_ref_gen *gen; /* the generation we are iterating over */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#else /* USE_LUA */
|
#else /* USE_LUA */
|
||||||
|
|||||||
@ -232,52 +232,6 @@ static inline int http_path_has_forbidden_char(const struct ist ist, const char
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Checks whether the :authority pseudo header contains dangerous chars that
|
|
||||||
* might affect its reassembly. We want to catch anything below 0x21, above
|
|
||||||
* 0x7e, as well as '@', '[', ']', '/','?', '#', '\', CR, LF, NUL. Then we
|
|
||||||
* fall back to the slow path and decide. Brackets are used for IP-literal and
|
|
||||||
* deserve special case, that is better handled in the slow path. The function
|
|
||||||
* returns 0 if no forbidden char is presnet, non-zero otherwise.
|
|
||||||
*/
|
|
||||||
static inline int http_authority_has_forbidden_char(const struct ist ist)
|
|
||||||
{
|
|
||||||
size_t ofs, len = istlen(ist);
|
|
||||||
const char *p = istptr(ist);
|
|
||||||
int brackets = 0;
|
|
||||||
uchar c;
|
|
||||||
|
|
||||||
/* Many attempts with various methods have shown that moderately recent
|
|
||||||
* compilers (gcc >= 9, clang >= 13) will arrange the code below as an
|
|
||||||
* evaluation tree that remains efficient at -O2 and above (~1.2ns per
|
|
||||||
* char). The immediate next efficient one is the bitmap from 64-bit
|
|
||||||
* registers but it's extremely sensitive to code arrangements and
|
|
||||||
* optimization.
|
|
||||||
*/
|
|
||||||
for (ofs = 0; ofs < len; ofs++) {
|
|
||||||
c = p[ofs];
|
|
||||||
|
|
||||||
if (unlikely(c < 0x21 || c > 0x7e ||
|
|
||||||
c == '#' || c == '/' || c == '?' || c == '@' ||
|
|
||||||
c == '[' || c == '\\' || c == ']')) {
|
|
||||||
/* all of them must be rejected, except '[' which may
|
|
||||||
* only appear at the beginning, and ']' which may
|
|
||||||
* only appear at the end or before a colon.
|
|
||||||
*/
|
|
||||||
if ((c == '[' && ofs == 0) ||
|
|
||||||
(c == ']' && (ofs == len - 1 || p[ofs + 1] == ':'))) {
|
|
||||||
/* that's an IP-literal (see RFC3986#3.2), it's
|
|
||||||
* OK for now.
|
|
||||||
*/
|
|
||||||
brackets ^= 1;
|
|
||||||
} else {
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/* there must be no opening bracket left nor lone closing one */
|
|
||||||
return brackets;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Checks status code array <array> for the presence of status code <status>.
|
/* Checks status code array <array> for the presence of status code <status>.
|
||||||
* Returns non-zero if the code is present, zero otherwise. Any status code is
|
* Returns non-zero if the code is present, zero otherwise. Any status code is
|
||||||
* permitted.
|
* permitted.
|
||||||
|
|||||||
@ -184,7 +184,6 @@ enum {
|
|||||||
PERSIST_TYPE_NONE = 0, /* no persistence */
|
PERSIST_TYPE_NONE = 0, /* no persistence */
|
||||||
PERSIST_TYPE_FORCE, /* force-persist */
|
PERSIST_TYPE_FORCE, /* force-persist */
|
||||||
PERSIST_TYPE_IGNORE, /* ignore-persist */
|
PERSIST_TYPE_IGNORE, /* ignore-persist */
|
||||||
PERSIST_TYPE_BE_SWITCH, /* force-be-switch */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* final results for http-request rules */
|
/* final results for http-request rules */
|
||||||
|
|||||||
@ -32,7 +32,6 @@ struct httpclient {
|
|||||||
int timeout_server; /* server timeout in ms */
|
int timeout_server; /* server timeout in ms */
|
||||||
void *caller; /* ptr of the caller */
|
void *caller; /* ptr of the caller */
|
||||||
unsigned int flags; /* other flags */
|
unsigned int flags; /* other flags */
|
||||||
unsigned int options; /* options */
|
|
||||||
struct proxy *px; /* proxy for special cases */
|
struct proxy *px; /* proxy for special cases */
|
||||||
struct server *srv_raw; /* server for clear connections */
|
struct server *srv_raw; /* server for clear connections */
|
||||||
#ifdef USE_OPENSSL
|
#ifdef USE_OPENSSL
|
||||||
@ -43,16 +42,11 @@ struct httpclient {
|
|||||||
/* Action (FA) to do */
|
/* Action (FA) to do */
|
||||||
#define HTTPCLIENT_FA_STOP 0x00000001 /* stops the httpclient at the next IO handler call */
|
#define HTTPCLIENT_FA_STOP 0x00000001 /* stops the httpclient at the next IO handler call */
|
||||||
#define HTTPCLIENT_FA_AUTOKILL 0x00000002 /* sets the applet to destroy the httpclient struct itself */
|
#define HTTPCLIENT_FA_AUTOKILL 0x00000002 /* sets the applet to destroy the httpclient struct itself */
|
||||||
#define HTTPCLIENT_FA_DRAIN_REQ 0x00000004 /* drains the request */
|
|
||||||
|
|
||||||
/* status (FS) */
|
/* status (FS) */
|
||||||
#define HTTPCLIENT_FS_STARTED 0x00010000 /* the httpclient was started */
|
#define HTTPCLIENT_FS_STARTED 0x00010000 /* the httpclient was started */
|
||||||
#define HTTPCLIENT_FS_ENDED 0x00020000 /* the httpclient is stopped */
|
#define HTTPCLIENT_FS_ENDED 0x00020000 /* the httpclient is stopped */
|
||||||
|
|
||||||
/* options */
|
|
||||||
#define HTTPCLIENT_O_HTTPPROXY 0x00000001 /* the request must be use an absolute URI */
|
|
||||||
#define HTTPCLIENT_O_RES_HTX 0x00000002 /* response is stored in HTX */
|
|
||||||
|
|
||||||
/* States of the HTTP Client Appctx */
|
/* States of the HTTP Client Appctx */
|
||||||
enum {
|
enum {
|
||||||
HTTPCLIENT_S_REQ = 0,
|
HTTPCLIENT_S_REQ = 0,
|
||||||
@ -65,4 +59,12 @@ enum {
|
|||||||
|
|
||||||
#define HTTPCLIENT_USERAGENT "HAProxy"
|
#define HTTPCLIENT_USERAGENT "HAProxy"
|
||||||
|
|
||||||
|
/* What kind of data we need to read */
|
||||||
|
#define HC_F_RES_STLINE 0x01
|
||||||
|
#define HC_F_RES_HDR 0x02
|
||||||
|
#define HC_F_RES_BODY 0x04
|
||||||
|
#define HC_F_RES_END 0x08
|
||||||
|
#define HC_F_HTTPPROXY 0x10
|
||||||
|
|
||||||
|
|
||||||
#endif /* ! _HAPROXY_HTTCLIENT__T_H */
|
#endif /* ! _HAPROXY_HTTCLIENT__T_H */
|
||||||
|
|||||||
@ -177,7 +177,7 @@ static forceinline char *hsl_show_flags(char *buf, size_t len, const char *delim
|
|||||||
#define HTX_FL_PARSING_ERROR 0x00000001 /* Set when a parsing error occurred */
|
#define HTX_FL_PARSING_ERROR 0x00000001 /* Set when a parsing error occurred */
|
||||||
#define HTX_FL_PROCESSING_ERROR 0x00000002 /* Set when a processing error occurred */
|
#define HTX_FL_PROCESSING_ERROR 0x00000002 /* Set when a processing error occurred */
|
||||||
#define HTX_FL_FRAGMENTED 0x00000004 /* Set when the HTX buffer is fragmented */
|
#define HTX_FL_FRAGMENTED 0x00000004 /* Set when the HTX buffer is fragmented */
|
||||||
/* 0x00000008 unused */
|
#define HTX_FL_ALTERED_PAYLOAD 0x00000008 /* The payload is altered, the extra value must not be trusted */
|
||||||
#define HTX_FL_EOM 0x00000010 /* Set when end-of-message is reached from the HTTP point of view
|
#define HTX_FL_EOM 0x00000010 /* Set when end-of-message is reached from the HTTP point of view
|
||||||
* (at worst, on the EOM block is missing)
|
* (at worst, on the EOM block is missing)
|
||||||
*/
|
*/
|
||||||
@ -265,12 +265,13 @@ struct htx {
|
|||||||
uint32_t head_addr; /* start address of the free space at the beginning */
|
uint32_t head_addr; /* start address of the free space at the beginning */
|
||||||
uint32_t end_addr; /* end address of the free space at the beginning */
|
uint32_t end_addr; /* end address of the free space at the beginning */
|
||||||
|
|
||||||
|
uint64_t extra; /* known bytes amount remaining to receive */
|
||||||
uint32_t flags; /* HTX_FL_* */
|
uint32_t flags; /* HTX_FL_* */
|
||||||
|
|
||||||
/* XXX 4 bytes unused */
|
/* XXX 4 bytes unused */
|
||||||
|
|
||||||
/* Blocks representing the HTTP message itself */
|
/* Blocks representing the HTTP message itself */
|
||||||
char blocks[VAR_ARRAY] ALIGNED(8);
|
char blocks[VAR_ARRAY] __attribute__((aligned(8)));
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* _HAPROXY_HTX_T_H */
|
#endif /* _HAPROXY_HTX_T_H */
|
||||||
|
|||||||
@ -30,6 +30,11 @@
|
|||||||
#include <haproxy/http-t.h>
|
#include <haproxy/http-t.h>
|
||||||
#include <haproxy/htx-t.h>
|
#include <haproxy/htx-t.h>
|
||||||
|
|
||||||
|
/* ->extra field value when the payload length is unknown (non-chunked message
|
||||||
|
* with no "Content-length" header)
|
||||||
|
*/
|
||||||
|
#define HTX_UNKOWN_PAYLOAD_LENGTH ULLONG_MAX
|
||||||
|
|
||||||
extern struct htx htx_empty;
|
extern struct htx htx_empty;
|
||||||
|
|
||||||
struct htx_blk *htx_defrag(struct htx *htx, struct htx_blk *blk, uint32_t info);
|
struct htx_blk *htx_defrag(struct htx *htx, struct htx_blk *blk, uint32_t info);
|
||||||
@ -655,6 +660,7 @@ static inline void htx_reset(struct htx *htx)
|
|||||||
htx->tail = htx->head = htx->first = -1;
|
htx->tail = htx->head = htx->first = -1;
|
||||||
htx->data = 0;
|
htx->data = 0;
|
||||||
htx->tail_addr = htx->head_addr = htx->end_addr = 0;
|
htx->tail_addr = htx->head_addr = htx->end_addr = 0;
|
||||||
|
htx->extra = 0;
|
||||||
htx->flags = HTX_FL_NONE;
|
htx->flags = HTX_FL_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -694,6 +700,8 @@ static inline struct htx *htxbuf(const struct buffer *buf)
|
|||||||
htx->size = buf->size - sizeof(*htx);
|
htx->size = buf->size - sizeof(*htx);
|
||||||
htx_reset(htx);
|
htx_reset(htx);
|
||||||
}
|
}
|
||||||
|
if (htx->flags & HTX_FL_ALTERED_PAYLOAD)
|
||||||
|
htx->extra = 0;
|
||||||
return htx;
|
return htx;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -829,10 +837,10 @@ static inline void htx_dump(struct buffer *chunk, const struct htx *htx, int ful
|
|||||||
{
|
{
|
||||||
int32_t pos;
|
int32_t pos;
|
||||||
|
|
||||||
chunk_appendf(chunk, " htx=%p(size=%u,data=%u,used=%u,wrap=%s,flags=0x%08x,"
|
chunk_appendf(chunk, " htx=%p(size=%u,data=%u,used=%u,wrap=%s,flags=0x%08x,extra=%llu,"
|
||||||
"first=%d,head=%d,tail=%d,tail_addr=%d,head_addr=%d,end_addr=%d)",
|
"first=%d,head=%d,tail=%d,tail_addr=%d,head_addr=%d,end_addr=%d)",
|
||||||
htx, htx->size, htx->data, htx_nbblks(htx), (!htx->head_addr) ? "NO" : "YES",
|
htx, htx->size, htx->data, htx_nbblks(htx), (!htx->head_addr) ? "NO" : "YES",
|
||||||
htx->flags, htx->first, htx->head, htx->tail,
|
htx->flags, (unsigned long long)htx->extra, htx->first, htx->head, htx->tail,
|
||||||
htx->tail_addr, htx->head_addr, htx->end_addr);
|
htx->tail_addr, htx->head_addr, htx->end_addr);
|
||||||
|
|
||||||
if (!full || !htx_nbblks(htx))
|
if (!full || !htx_nbblks(htx))
|
||||||
|
|||||||
@ -14,7 +14,6 @@ extern struct list post_server_check_list;
|
|||||||
extern struct list per_thread_alloc_list;
|
extern struct list per_thread_alloc_list;
|
||||||
extern struct list per_thread_init_list;
|
extern struct list per_thread_init_list;
|
||||||
extern struct list post_deinit_list;
|
extern struct list post_deinit_list;
|
||||||
extern struct list post_deinit_master_list;
|
|
||||||
extern struct list proxy_deinit_list;
|
extern struct list proxy_deinit_list;
|
||||||
extern struct list server_deinit_list;
|
extern struct list server_deinit_list;
|
||||||
extern struct list per_thread_free_list;
|
extern struct list per_thread_free_list;
|
||||||
@ -25,7 +24,6 @@ void hap_register_post_check(int (*fct)());
|
|||||||
void hap_register_post_proxy_check(int (*fct)(struct proxy *));
|
void hap_register_post_proxy_check(int (*fct)(struct proxy *));
|
||||||
void hap_register_post_server_check(int (*fct)(struct server *));
|
void hap_register_post_server_check(int (*fct)(struct server *));
|
||||||
void hap_register_post_deinit(void (*fct)());
|
void hap_register_post_deinit(void (*fct)());
|
||||||
void hap_register_post_deinit_master(void (*fct)());
|
|
||||||
void hap_register_proxy_deinit(void (*fct)(struct proxy *));
|
void hap_register_proxy_deinit(void (*fct)(struct proxy *));
|
||||||
void hap_register_server_deinit(void (*fct)(struct server *));
|
void hap_register_server_deinit(void (*fct)(struct server *));
|
||||||
|
|
||||||
@ -65,10 +63,6 @@ void hap_register_unittest(const char *name, int (*fct)(int, char **));
|
|||||||
#define REGISTER_POST_DEINIT(fct) \
|
#define REGISTER_POST_DEINIT(fct) \
|
||||||
INITCALL1(STG_REGISTER, hap_register_post_deinit, (fct))
|
INITCALL1(STG_REGISTER, hap_register_post_deinit, (fct))
|
||||||
|
|
||||||
/* simplified way to declare a post-deinit (master process when launched in master/worker mode) callback in a file */
|
|
||||||
#define REGISTER_POST_DEINIT_MASTER(fct) \
|
|
||||||
INITCALL1(STG_REGISTER, hap_register_post_deinit_master, (fct))
|
|
||||||
|
|
||||||
/* simplified way to declare a proxy-deinit callback in a file */
|
/* simplified way to declare a proxy-deinit callback in a file */
|
||||||
#define REGISTER_PROXY_DEINIT(fct) \
|
#define REGISTER_PROXY_DEINIT(fct) \
|
||||||
INITCALL1(STG_REGISTER, hap_register_proxy_deinit, (fct))
|
INITCALL1(STG_REGISTER, hap_register_proxy_deinit, (fct))
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user