mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2026-04-02 09:31:01 +02:00
Compare commits
No commits in common. "master" and "v3.1-dev12" have entirely different histories.
master
...
v3.1-dev12
@ -1,7 +1,7 @@
|
|||||||
FreeBSD_task:
|
FreeBSD_task:
|
||||||
freebsd_instance:
|
freebsd_instance:
|
||||||
matrix:
|
matrix:
|
||||||
image_family: freebsd-14-3
|
image_family: freebsd-14-1
|
||||||
only_if: $CIRRUS_BRANCH =~ 'master|next'
|
only_if: $CIRRUS_BRANCH =~ 'master|next'
|
||||||
install_script:
|
install_script:
|
||||||
- pkg update -f && pkg upgrade -y && pkg install -y openssl git gmake lua54 socat pcre2
|
- pkg update -f && pkg upgrade -y && pkg install -y openssl git gmake lua54 socat pcre2
|
||||||
|
|||||||
34
.github/actions/setup-vtest/action.yml
vendored
34
.github/actions/setup-vtest/action.yml
vendored
@ -1,34 +0,0 @@
|
|||||||
name: 'setup VTest'
|
|
||||||
description: 'ssss'
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: "composite"
|
|
||||||
steps:
|
|
||||||
|
|
||||||
- name: Setup coredumps
|
|
||||||
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
sudo sysctl -w fs.suid_dumpable=1
|
|
||||||
sudo sysctl kernel.core_pattern=/tmp/core.%h.%e.%t
|
|
||||||
|
|
||||||
- name: Setup ulimit for core dumps
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
# This is required for macOS which does not actually allow to increase
|
|
||||||
# the '-n' soft limit to the hard limit, thus failing to run.
|
|
||||||
ulimit -n 65536
|
|
||||||
ulimit -c unlimited
|
|
||||||
|
|
||||||
- name: Install VTest
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
scripts/build-vtest.sh
|
|
||||||
|
|
||||||
- name: Install problem matcher for VTest
|
|
||||||
shell: bash
|
|
||||||
# This allows one to more easily see which tests fail.
|
|
||||||
run: echo "::add-matcher::.github/vtest.json"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
6
.github/h2spec.config
vendored
6
.github/h2spec.config
vendored
@ -19,9 +19,9 @@ defaults
|
|||||||
|
|
||||||
frontend h2
|
frontend h2
|
||||||
mode http
|
mode http
|
||||||
bind 127.0.0.1:8443 ssl crt reg-tests/ssl/certs/common.pem alpn h2,http/1.1
|
bind 127.0.0.1:8443 ssl crt reg-tests/ssl/common.pem alpn h2,http/1.1
|
||||||
default_backend h2b
|
default_backend h2
|
||||||
|
|
||||||
backend h2b
|
backend h2
|
||||||
errorfile 200 .github/errorfile
|
errorfile 200 .github/errorfile
|
||||||
http-request deny deny_status 200
|
http-request deny deny_status 200
|
||||||
|
|||||||
110
.github/matrix.py
vendored
110
.github/matrix.py
vendored
@ -12,7 +12,6 @@ import functools
|
|||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
import urllib.error
|
|
||||||
import urllib.request
|
import urllib.request
|
||||||
from os import environ
|
from os import environ
|
||||||
from packaging import version
|
from packaging import version
|
||||||
@ -20,10 +19,9 @@ from packaging import version
|
|||||||
#
|
#
|
||||||
# this CI is used for both development and stable branches of HAProxy
|
# this CI is used for both development and stable branches of HAProxy
|
||||||
#
|
#
|
||||||
# naming convention used, if branch/tag name matches:
|
# naming convention used, if branch name matches:
|
||||||
#
|
#
|
||||||
# "haproxy-" - stable branches
|
# "haproxy-" - stable branches
|
||||||
# "vX.Y.Z" - release tags
|
|
||||||
# otherwise - development branch (i.e. "latest" ssl variants, "latest" github images)
|
# otherwise - development branch (i.e. "latest" ssl variants, "latest" github images)
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -34,24 +32,13 @@ def get_all_github_tags(url):
|
|||||||
headers = {}
|
headers = {}
|
||||||
if environ.get("GITHUB_TOKEN") is not None:
|
if environ.get("GITHUB_TOKEN") is not None:
|
||||||
headers["Authorization"] = "token {}".format(environ.get("GITHUB_TOKEN"))
|
headers["Authorization"] = "token {}".format(environ.get("GITHUB_TOKEN"))
|
||||||
all_tags = []
|
request = urllib.request.Request(url, headers=headers)
|
||||||
page = 1
|
|
||||||
sep = "&" if "?" in url else "?"
|
|
||||||
while True:
|
|
||||||
paginated_url = "{}{}per_page=100&page={}".format(url, sep, page)
|
|
||||||
request = urllib.request.Request(paginated_url, headers=headers)
|
|
||||||
try:
|
try:
|
||||||
response = urllib.request.urlopen(request)
|
tags = urllib.request.urlopen(request)
|
||||||
except urllib.error.URLError:
|
except:
|
||||||
return all_tags if all_tags else None
|
return None
|
||||||
tags = json.loads(response.read().decode("utf-8"))
|
tags = json.loads(tags.read().decode("utf-8"))
|
||||||
if not tags:
|
return [tag['name'] for tag in tags]
|
||||||
break
|
|
||||||
all_tags.extend([tag['name'] for tag in tags])
|
|
||||||
if len(tags) < 100:
|
|
||||||
break
|
|
||||||
page += 1
|
|
||||||
return all_tags if all_tags else None
|
|
||||||
|
|
||||||
@functools.lru_cache(5)
|
@functools.lru_cache(5)
|
||||||
def determine_latest_openssl(ssl):
|
def determine_latest_openssl(ssl):
|
||||||
@ -77,43 +64,9 @@ def determine_latest_aws_lc(ssl):
|
|||||||
if not tags:
|
if not tags:
|
||||||
return "AWS_LC_VERSION=failed_to_detect"
|
return "AWS_LC_VERSION=failed_to_detect"
|
||||||
valid_tags = list(filter(aws_lc_version_valid, tags))
|
valid_tags = list(filter(aws_lc_version_valid, tags))
|
||||||
if not valid_tags:
|
|
||||||
return "AWS_LC_VERSION=failed_to_detect"
|
|
||||||
latest_tag = max(valid_tags, key=aws_lc_version_string_to_num)
|
latest_tag = max(valid_tags, key=aws_lc_version_string_to_num)
|
||||||
return "AWS_LC_VERSION={}".format(latest_tag[1:])
|
return "AWS_LC_VERSION={}".format(latest_tag[1:])
|
||||||
|
|
||||||
def aws_lc_fips_version_string_to_num(version_string):
|
|
||||||
return tuple(map(int, version_string[12:].split('.')))
|
|
||||||
|
|
||||||
def aws_lc_fips_version_valid(version_string):
|
|
||||||
return re.match('^AWS-LC-FIPS-[0-9]+(\.[0-9]+)*$', version_string)
|
|
||||||
|
|
||||||
@functools.lru_cache(5)
|
|
||||||
def determine_latest_aws_lc_fips(ssl):
|
|
||||||
tags = get_all_github_tags("https://api.github.com/repos/aws/aws-lc/tags")
|
|
||||||
if not tags:
|
|
||||||
return "AWS_LC_FIPS_VERSION=failed_to_detect"
|
|
||||||
valid_tags = list(filter(aws_lc_fips_version_valid, tags))
|
|
||||||
if not valid_tags:
|
|
||||||
return "AWS_LC_FIPS_VERSION=failed_to_detect"
|
|
||||||
latest_tag = max(valid_tags, key=aws_lc_fips_version_string_to_num)
|
|
||||||
return "AWS_LC_FIPS_VERSION={}".format(latest_tag[12:])
|
|
||||||
|
|
||||||
def wolfssl_version_string_to_num(version_string):
|
|
||||||
return tuple(map(int, version_string[1:].removesuffix('-stable').split('.')))
|
|
||||||
|
|
||||||
def wolfssl_version_valid(version_string):
|
|
||||||
return re.match('^v[0-9]+(\.[0-9]+)*-stable$', version_string)
|
|
||||||
|
|
||||||
@functools.lru_cache(5)
|
|
||||||
def determine_latest_wolfssl(ssl):
|
|
||||||
tags = get_all_github_tags("https://api.github.com/repos/wolfssl/wolfssl/tags")
|
|
||||||
if not tags:
|
|
||||||
return "WOLFSSL_VERSION=failed_to_detect"
|
|
||||||
valid_tags = list(filter(wolfssl_version_valid, tags))
|
|
||||||
latest_tag = max(valid_tags, key=wolfssl_version_string_to_num)
|
|
||||||
return "WOLFSSL_VERSION={}".format(latest_tag[1:].removesuffix('-stable'))
|
|
||||||
|
|
||||||
@functools.lru_cache(5)
|
@functools.lru_cache(5)
|
||||||
def determine_latest_libressl(ssl):
|
def determine_latest_libressl(ssl):
|
||||||
try:
|
try:
|
||||||
@ -136,18 +89,14 @@ def clean_compression(compression):
|
|||||||
def main(ref_name):
|
def main(ref_name):
|
||||||
print("Generating matrix for branch '{}'.".format(ref_name))
|
print("Generating matrix for branch '{}'.".format(ref_name))
|
||||||
|
|
||||||
is_stable = "haproxy-" in ref_name or re.match(r'^v\d+\.\d+\.\d+$', ref_name)
|
|
||||||
|
|
||||||
matrix = []
|
matrix = []
|
||||||
|
|
||||||
# Ubuntu
|
# Ubuntu
|
||||||
|
|
||||||
if is_stable:
|
if "haproxy-" in ref_name:
|
||||||
os = "ubuntu-24.04" # stable branch
|
os = "ubuntu-22.04" # stable branch
|
||||||
os_arm = "ubuntu-24.04-arm" # stable branch
|
|
||||||
else:
|
else:
|
||||||
os = "ubuntu-24.04" # development branch
|
os = "ubuntu-24.04" # development branch
|
||||||
os_arm = "ubuntu-24.04-arm" # development branch
|
|
||||||
|
|
||||||
TARGET = "linux-glibc"
|
TARGET = "linux-glibc"
|
||||||
for CC in ["gcc", "clang"]:
|
for CC in ["gcc", "clang"]:
|
||||||
@ -178,6 +127,7 @@ def main(ref_name):
|
|||||||
"USE_PCRE2_JIT=1",
|
"USE_PCRE2_JIT=1",
|
||||||
"USE_LUA=1",
|
"USE_LUA=1",
|
||||||
"USE_OPENSSL=1",
|
"USE_OPENSSL=1",
|
||||||
|
"USE_SYSTEMD=1",
|
||||||
"USE_WURFL=1",
|
"USE_WURFL=1",
|
||||||
"WURFL_INC=addons/wurfl/dummy",
|
"WURFL_INC=addons/wurfl/dummy",
|
||||||
"WURFL_LIB=addons/wurfl/dummy",
|
"WURFL_LIB=addons/wurfl/dummy",
|
||||||
@ -192,11 +142,10 @@ def main(ref_name):
|
|||||||
|
|
||||||
# ASAN
|
# ASAN
|
||||||
|
|
||||||
for os_asan in [os, os_arm]:
|
|
||||||
matrix.append(
|
matrix.append(
|
||||||
{
|
{
|
||||||
"name": "{}, {}, ASAN, all features".format(os_asan, CC),
|
"name": "{}, {}, ASAN, all features".format(os, CC),
|
||||||
"os": os_asan,
|
"os": os,
|
||||||
"TARGET": TARGET,
|
"TARGET": TARGET,
|
||||||
"CC": CC,
|
"CC": CC,
|
||||||
"FLAGS": [
|
"FLAGS": [
|
||||||
@ -212,6 +161,7 @@ def main(ref_name):
|
|||||||
"USE_PCRE2_JIT=1",
|
"USE_PCRE2_JIT=1",
|
||||||
"USE_LUA=1",
|
"USE_LUA=1",
|
||||||
"USE_OPENSSL=1",
|
"USE_OPENSSL=1",
|
||||||
|
"USE_SYSTEMD=1",
|
||||||
"USE_WURFL=1",
|
"USE_WURFL=1",
|
||||||
"WURFL_INC=addons/wurfl/dummy",
|
"WURFL_INC=addons/wurfl/dummy",
|
||||||
"WURFL_LIB=addons/wurfl/dummy",
|
"WURFL_LIB=addons/wurfl/dummy",
|
||||||
@ -239,14 +189,13 @@ def main(ref_name):
|
|||||||
"stock",
|
"stock",
|
||||||
"OPENSSL_VERSION=1.0.2u",
|
"OPENSSL_VERSION=1.0.2u",
|
||||||
"OPENSSL_VERSION=1.1.1s",
|
"OPENSSL_VERSION=1.1.1s",
|
||||||
"OPENSSL_VERSION=3.5.1",
|
"QUICTLS=yes",
|
||||||
"QUICTLS_VERSION=OpenSSL_1_1_1w-quic1",
|
|
||||||
"WOLFSSL_VERSION=5.7.0",
|
"WOLFSSL_VERSION=5.7.0",
|
||||||
"AWS_LC_VERSION=1.39.0",
|
"AWS_LC_VERSION=1.29.0",
|
||||||
# "BORINGSSL=yes",
|
# "BORINGSSL=yes",
|
||||||
]
|
]
|
||||||
|
|
||||||
if not is_stable: # development branch
|
if "haproxy-" not in ref_name: # development branch
|
||||||
ssl_versions = ssl_versions + [
|
ssl_versions = ssl_versions + [
|
||||||
"OPENSSL_VERSION=latest",
|
"OPENSSL_VERSION=latest",
|
||||||
"LIBRESSL_VERSION=latest",
|
"LIBRESSL_VERSION=latest",
|
||||||
@ -254,7 +203,8 @@ def main(ref_name):
|
|||||||
|
|
||||||
for ssl in ssl_versions:
|
for ssl in ssl_versions:
|
||||||
flags = ["USE_OPENSSL=1"]
|
flags = ["USE_OPENSSL=1"]
|
||||||
skipdup=0
|
if ssl == "BORINGSSL=yes" or ssl == "QUICTLS=yes" or "LIBRESSL" in ssl or "WOLFSSL" in ssl or "AWS_LC" in ssl:
|
||||||
|
flags.append("USE_QUIC=1")
|
||||||
if "WOLFSSL" in ssl:
|
if "WOLFSSL" in ssl:
|
||||||
flags.append("USE_OPENSSL_WOLFSSL=1")
|
flags.append("USE_OPENSSL_WOLFSSL=1")
|
||||||
if "AWS_LC" in ssl:
|
if "AWS_LC" in ssl:
|
||||||
@ -264,23 +214,8 @@ def main(ref_name):
|
|||||||
flags.append("SSL_INC=${HOME}/opt/include")
|
flags.append("SSL_INC=${HOME}/opt/include")
|
||||||
if "LIBRESSL" in ssl and "latest" in ssl:
|
if "LIBRESSL" in ssl and "latest" in ssl:
|
||||||
ssl = determine_latest_libressl(ssl)
|
ssl = determine_latest_libressl(ssl)
|
||||||
skipdup=1
|
|
||||||
if "OPENSSL" in ssl and "latest" in ssl:
|
if "OPENSSL" in ssl and "latest" in ssl:
|
||||||
ssl = determine_latest_openssl(ssl)
|
ssl = determine_latest_openssl(ssl)
|
||||||
skipdup=1
|
|
||||||
|
|
||||||
# if "latest" equals a version already in the list
|
|
||||||
if ssl in ssl_versions and skipdup == 1:
|
|
||||||
continue
|
|
||||||
|
|
||||||
openssl_supports_quic = False
|
|
||||||
try:
|
|
||||||
openssl_supports_quic = version.Version(ssl.split("OPENSSL_VERSION=",1)[1]) >= version.Version("3.5.0")
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if ssl == "BORINGSSL=yes" or "QUICTLS" in ssl or "LIBRESSL" in ssl or "WOLFSSL" in ssl or "AWS_LC" in ssl or openssl_supports_quic:
|
|
||||||
flags.append("USE_QUIC=1")
|
|
||||||
|
|
||||||
matrix.append(
|
matrix.append(
|
||||||
{
|
{
|
||||||
@ -293,9 +228,12 @@ def main(ref_name):
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
# macOS on dev branches
|
# macOS
|
||||||
if not is_stable:
|
|
||||||
os = "macos-26" # development branch
|
if "haproxy-" in ref_name:
|
||||||
|
os = "macos-13" # stable branch
|
||||||
|
else:
|
||||||
|
os = "macos-15" # development branch
|
||||||
|
|
||||||
TARGET = "osx"
|
TARGET = "osx"
|
||||||
for CC in ["clang"]:
|
for CC in ["clang"]:
|
||||||
|
|||||||
12
.github/workflows/aws-lc-fips.yml
vendored
12
.github/workflows/aws-lc-fips.yml
vendored
@ -1,12 +0,0 @@
|
|||||||
name: AWS-LC-FIPS
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 0 * * 4"
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
uses: ./.github/workflows/aws-lc-template.yml
|
|
||||||
with:
|
|
||||||
command: "from matrix import determine_latest_aws_lc_fips; print(determine_latest_aws_lc_fips(''))"
|
|
||||||
94
.github/workflows/aws-lc-template.yml
vendored
94
.github/workflows/aws-lc-template.yml
vendored
@ -1,94 +0,0 @@
|
|||||||
name: AWS-LC template
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
inputs:
|
|
||||||
command:
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
- name: Determine latest AWS-LC release
|
|
||||||
id: get_aws_lc_release
|
|
||||||
run: |
|
|
||||||
result=$(cd .github && python3 -c "${{ inputs.command }}")
|
|
||||||
echo $result
|
|
||||||
echo "result=$result" >> $GITHUB_OUTPUT
|
|
||||||
- name: Cache AWS-LC
|
|
||||||
id: cache_aws_lc
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: '~/opt/'
|
|
||||||
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
|
|
||||||
- name: Install apt dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
|
||||||
sudo apt-get --no-install-recommends -y install socat gdb jose
|
|
||||||
- name: Install AWS-LC
|
|
||||||
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
|
|
||||||
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
|
|
||||||
- name: Compile HAProxy
|
|
||||||
run: |
|
|
||||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
|
||||||
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
|
|
||||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
|
||||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
|
||||||
sudo make install
|
|
||||||
- name: Show HAProxy version
|
|
||||||
id: show-version
|
|
||||||
run: |
|
|
||||||
ldd $(which haproxy)
|
|
||||||
haproxy -vv
|
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
|
||||||
- uses: ./.github/actions/setup-vtest
|
|
||||||
- name: Run VTest for HAProxy
|
|
||||||
id: vtest
|
|
||||||
run: |
|
|
||||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
|
||||||
- name: Run Unit tests
|
|
||||||
id: unittests
|
|
||||||
run: |
|
|
||||||
make unit-tests
|
|
||||||
- name: Show VTest results
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $folder/INFO
|
|
||||||
cat $folder/LOG
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
- name: Show coredumps
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
failed=false
|
|
||||||
shopt -s nullglob
|
|
||||||
for file in /tmp/core.*; do
|
|
||||||
failed=true
|
|
||||||
printf "::group::"
|
|
||||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
if [ "$failed" = true ]; then
|
|
||||||
exit 1;
|
|
||||||
fi
|
|
||||||
- name: Show Unit-Tests results
|
|
||||||
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $result
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
|
|
||||||
58
.github/workflows/aws-lc.yml
vendored
58
.github/workflows/aws-lc.yml
vendored
@ -5,8 +5,62 @@ on:
|
|||||||
- cron: "0 0 * * 4"
|
- cron: "0 0 * * 4"
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
uses: ./.github/workflows/aws-lc-template.yml
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Install VTest
|
||||||
|
run: |
|
||||||
|
scripts/build-vtest.sh
|
||||||
|
- name: Determine latest AWS-LC release
|
||||||
|
id: get_aws_lc_release
|
||||||
|
run: |
|
||||||
|
result=$(cd .github && python3 -c "from matrix import determine_latest_aws_lc; print(determine_latest_aws_lc(''))")
|
||||||
|
echo $result
|
||||||
|
echo "result=$result" >> $GITHUB_OUTPUT
|
||||||
|
- name: Cache AWS-LC
|
||||||
|
id: cache_aws_lc
|
||||||
|
uses: actions/cache@v4
|
||||||
with:
|
with:
|
||||||
command: "from matrix import determine_latest_aws_lc; print(determine_latest_aws_lc(''))"
|
path: '~/opt/'
|
||||||
|
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
|
||||||
|
- name: Install AWS-LC
|
||||||
|
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
|
||||||
|
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
|
||||||
|
- name: Compile HAProxy
|
||||||
|
run: |
|
||||||
|
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
||||||
|
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
|
||||||
|
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
||||||
|
DEBUG="-DDEBUG_POOL_INTEGRITY" \
|
||||||
|
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
||||||
|
sudo make install
|
||||||
|
- name: Show HAProxy version
|
||||||
|
id: show-version
|
||||||
|
run: |
|
||||||
|
ldd $(which haproxy)
|
||||||
|
haproxy -vv
|
||||||
|
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||||
|
- name: Install problem matcher for VTest
|
||||||
|
run: echo "::add-matcher::.github/vtest.json"
|
||||||
|
- name: Run VTest for HAProxy
|
||||||
|
id: vtest
|
||||||
|
run: |
|
||||||
|
# This is required for macOS which does not actually allow to increase
|
||||||
|
# the '-n' soft limit to the hard limit, thus failing to run.
|
||||||
|
ulimit -n 65536
|
||||||
|
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||||
|
- name: Show VTest results
|
||||||
|
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||||
|
run: |
|
||||||
|
for folder in ${TMPDIR}/haregtests-*/vtc.*; do
|
||||||
|
printf "::group::"
|
||||||
|
cat $folder/INFO
|
||||||
|
cat $folder/LOG
|
||||||
|
echo "::endgroup::"
|
||||||
|
done
|
||||||
|
exit 1
|
||||||
|
|||||||
7
.github/workflows/codespell.yml
vendored
7
.github/workflows/codespell.yml
vendored
@ -3,7 +3,6 @@ name: Spelling Check
|
|||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: "0 0 * * 2"
|
- cron: "0 0 * * 2"
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
@ -11,12 +10,12 @@ permissions:
|
|||||||
jobs:
|
jobs:
|
||||||
codespell:
|
codespell:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
if: ${{ github.repository_owner == 'haproxy' }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
- uses: codespell-project/codespell-problem-matcher@v1.2.0
|
- uses: codespell-project/codespell-problem-matcher@v1.2.0
|
||||||
- uses: codespell-project/actions-codespell@master
|
- uses: codespell-project/actions-codespell@master
|
||||||
with:
|
with:
|
||||||
skip: CHANGELOG,Makefile,*.fig,*.pem,./doc/design-thoughts,./doc/internals
|
skip: CHANGELOG,Makefile,*.fig,*.pem,./doc/design-thoughts,./doc/internals
|
||||||
ignore_words_list: pres,ist,ists,hist,wan,ca,cas,que,ans,te,nd,referer,ot,uint,iif,fo,keep-alives,dosen,ifset,thrid,strack,ba,chck,hel,unx,mor,clen,collet,bu,htmp,siz,experim
|
ignore_words_list: ist,ists,hist,wan,ca,cas,que,ans,te,nd,referer,ot,uint,iif,fo,keep-alives,dosen,ifset,thrid,strack,ba,chck,hel,unx,mor,clen,collet,bu,htmp,siz,experim
|
||||||
uri_ignore_words_list: trafic,ressources
|
uri_ignore_words_list: trafic,ressources
|
||||||
|
|||||||
17
.github/workflows/compliance.yml
vendored
17
.github/workflows/compliance.yml
vendored
@ -11,10 +11,15 @@ permissions:
|
|||||||
jobs:
|
jobs:
|
||||||
h2spec:
|
h2spec:
|
||||||
name: h2spec
|
name: h2spec
|
||||||
runs-on: ubuntu-latest
|
runs-on: ${{ matrix.os }}
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- TARGET: linux-glibc
|
||||||
|
CC: gcc
|
||||||
|
os: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
- name: Install h2spec
|
- name: Install h2spec
|
||||||
id: install-h2spec
|
id: install-h2spec
|
||||||
run: |
|
run: |
|
||||||
@ -23,12 +28,12 @@ jobs:
|
|||||||
tar xvf h2spec.tar.gz
|
tar xvf h2spec.tar.gz
|
||||||
sudo install -m755 h2spec /usr/local/bin/h2spec
|
sudo install -m755 h2spec /usr/local/bin/h2spec
|
||||||
echo "version=${H2SPEC_VERSION}" >> $GITHUB_OUTPUT
|
echo "version=${H2SPEC_VERSION}" >> $GITHUB_OUTPUT
|
||||||
- name: Compile HAProxy with gcc
|
- name: Compile HAProxy with ${{ matrix.CC }}
|
||||||
run: |
|
run: |
|
||||||
make -j$(nproc) all \
|
make -j$(nproc) all \
|
||||||
ERR=1 \
|
ERR=1 \
|
||||||
TARGET=linux-glibc \
|
TARGET=${{ matrix.TARGET }} \
|
||||||
CC=gcc \
|
CC=${{ matrix.CC }} \
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY" \
|
DEBUG="-DDEBUG_POOL_INTEGRITY" \
|
||||||
USE_OPENSSL=1
|
USE_OPENSSL=1
|
||||||
sudo make install
|
sudo make install
|
||||||
|
|||||||
5
.github/workflows/contrib.yml
vendored
5
.github/workflows/contrib.yml
vendored
@ -10,7 +10,10 @@ jobs:
|
|||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
|
- name: Compile admin/halog/halog
|
||||||
|
run: |
|
||||||
|
make admin/halog/halog
|
||||||
- name: Compile dev/flags/flags
|
- name: Compile dev/flags/flags
|
||||||
run: |
|
run: |
|
||||||
make dev/flags/flags
|
make dev/flags/flags
|
||||||
|
|||||||
8
.github/workflows/coverity.yml
vendored
8
.github/workflows/coverity.yml
vendored
@ -15,9 +15,9 @@ permissions:
|
|||||||
jobs:
|
jobs:
|
||||||
scan:
|
scan:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
if: ${{ github.repository_owner == 'haproxy' }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
- name: Install apt dependencies
|
- name: Install apt dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
||||||
@ -27,7 +27,7 @@ jobs:
|
|||||||
libsystemd-dev
|
libsystemd-dev
|
||||||
- name: Install QUICTLS
|
- name: Install QUICTLS
|
||||||
run: |
|
run: |
|
||||||
QUICTLS_VERSION=OpenSSL_1_1_1w-quic1 scripts/build-ssl.sh
|
QUICTLS=yes scripts/build-ssl.sh
|
||||||
- name: Download Coverity build tool
|
- name: Download Coverity build tool
|
||||||
run: |
|
run: |
|
||||||
wget -c -N https://scan.coverity.com/download/linux64 --post-data "token=${{ secrets.COVERITY_SCAN_TOKEN }}&project=Haproxy" -O coverity_tool.tar.gz
|
wget -c -N https://scan.coverity.com/download/linux64 --post-data "token=${{ secrets.COVERITY_SCAN_TOKEN }}&project=Haproxy" -O coverity_tool.tar.gz
|
||||||
@ -38,7 +38,7 @@ jobs:
|
|||||||
- name: Build with Coverity build tool
|
- name: Build with Coverity build tool
|
||||||
run: |
|
run: |
|
||||||
export PATH=`pwd`/coverity_tool/bin:$PATH
|
export PATH=`pwd`/coverity_tool/bin:$PATH
|
||||||
cov-build --dir cov-int make CC=clang TARGET=linux-glibc USE_ZLIB=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_LUA=1 USE_OPENSSL=1 USE_QUIC=1 USE_WURFL=1 WURFL_INC=addons/wurfl/dummy WURFL_LIB=addons/wurfl/dummy USE_DEVICEATLAS=1 DEVICEATLAS_SRC=addons/deviceatlas/dummy USE_51DEGREES=1 51DEGREES_SRC=addons/51degrees/dummy/pattern ADDLIB=\"-Wl,-rpath,$HOME/opt/lib/\" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include DEBUG+=-DDEBUG_STRICT=2 DEBUG+=-DDEBUG_USE_ABORT=1
|
cov-build --dir cov-int make CC=clang TARGET=linux-glibc USE_ZLIB=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_LUA=1 USE_OPENSSL=1 USE_QUIC=1 USE_SYSTEMD=1 USE_WURFL=1 WURFL_INC=addons/wurfl/dummy WURFL_LIB=addons/wurfl/dummy USE_DEVICEATLAS=1 DEVICEATLAS_SRC=addons/deviceatlas/dummy USE_51DEGREES=1 51DEGREES_SRC=addons/51degrees/dummy/pattern ADDLIB=\"-Wl,-rpath,$HOME/opt/lib/\" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include DEBUG+=-DDEBUG_STRICT=1 DEBUG+=-DDEBUG_USE_ABORT=1
|
||||||
- name: Submit build result to Coverity Scan
|
- name: Submit build result to Coverity Scan
|
||||||
run: |
|
run: |
|
||||||
tar czvf cov.tar.gz cov-int
|
tar czvf cov.tar.gz cov-int
|
||||||
|
|||||||
7
.github/workflows/cross-zoo.yml
vendored
7
.github/workflows/cross-zoo.yml
vendored
@ -6,7 +6,6 @@ name: Cross Compile
|
|||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: "0 0 21 * *"
|
- cron: "0 0 21 * *"
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
@ -91,7 +90,7 @@ jobs:
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
if: ${{ github.repository_owner == 'haproxy' }}
|
||||||
steps:
|
steps:
|
||||||
- name: install packages
|
- name: install packages
|
||||||
run: |
|
run: |
|
||||||
@ -99,12 +98,12 @@ jobs:
|
|||||||
sudo apt-get -yq --force-yes install \
|
sudo apt-get -yq --force-yes install \
|
||||||
gcc-${{ matrix.platform.arch }} \
|
gcc-${{ matrix.platform.arch }} \
|
||||||
${{ matrix.platform.libs }}
|
${{ matrix.platform.libs }}
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
|
||||||
- name: install quictls
|
- name: install quictls
|
||||||
run: |
|
run: |
|
||||||
QUICTLS_EXTRA_ARGS="--cross-compile-prefix=${{ matrix.platform.arch }}- ${{ matrix.platform.target }}" QUICTLS_VERSION=OpenSSL_1_1_1w-quic1 scripts/build-ssl.sh
|
QUICTLS_EXTRA_ARGS="--cross-compile-prefix=${{ matrix.platform.arch }}- ${{ matrix.platform.target }}" QUICTLS=yes scripts/build-ssl.sh
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
40
.github/workflows/fedora-rawhide.yml
vendored
40
.github/workflows/fedora-rawhide.yml
vendored
@ -1,9 +1,8 @@
|
|||||||
name: Fedora/Rawhide/OpenSSL
|
name: Fedora/Rawhide/QuicTLS
|
||||||
|
|
||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: "0 0 25 * *"
|
- cron: "0 0 25 * *"
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
@ -13,24 +12,26 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
platform: [
|
platform: [
|
||||||
{ name: x64, cc: gcc, ADDLIB_ATOMIC: "", ARCH_FLAGS: "" },
|
{ name: x64, cc: gcc, QUICTLS_EXTRA_ARGS: "", ADDLIB_ATOMIC: "", ARCH_FLAGS: "" },
|
||||||
{ name: x64, cc: clang, ADDLIB_ATOMIC: "", ARCH_FLAGS: "" },
|
{ name: x64, cc: clang, QUICTLS_EXTRA_ARGS: "", ADDLIB_ATOMIC: "", ARCH_FLAGS: "" },
|
||||||
{ name: x86, cc: gcc, ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" },
|
{ name: x86, cc: gcc, QUICTLS_EXTRA_ARGS: "-m32 linux-generic32", ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" },
|
||||||
{ name: x86, cc: clang, ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" }
|
{ name: x86, cc: clang, QUICTLS_EXTRA_ARGS: "-m32 linux-generic32", ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" }
|
||||||
]
|
]
|
||||||
fail-fast: false
|
|
||||||
name: ${{ matrix.platform.cc }}.${{ matrix.platform.name }}
|
name: ${{ matrix.platform.cc }}.${{ matrix.platform.name }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
if: ${{ github.repository_owner == 'haproxy' }}
|
||||||
container:
|
container:
|
||||||
image: fedora:rawhide
|
image: fedora:rawhide
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
dnf -y install awk diffutils git pcre-devel zlib-devel pcre2-devel 'perl(FindBin)' perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils systemd-devel clang openssl-devel.x86_64
|
dnf -y install diffutils git pcre-devel zlib-devel pcre2-devel 'perl(FindBin)' perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils systemd-devel clang
|
||||||
dnf -y install 'perl(FindBin)' 'perl(File::Compare)' perl-IPC-Cmd 'perl(File::Copy)' glibc-devel.i686 lua-devel.i686 lua-devel.x86_64 systemd-devel.i686 zlib-ng-compat-devel.i686 pcre-devel.i686 libatomic.i686 openssl-devel.i686
|
dnf -y install 'perl(FindBin)' 'perl(File::Compare)' perl-IPC-Cmd 'perl(File::Copy)' glibc-devel.i686 lua-devel.i686 lua-devel.x86_64 systemd-devel.i686 zlib-ng-compat-devel.i686 pcre-devel.i686 libatomic.i686
|
||||||
- uses: ./.github/actions/setup-vtest
|
- name: Install VTest
|
||||||
|
run: scripts/build-vtest.sh
|
||||||
|
- name: Install QuicTLS
|
||||||
|
run: QUICTLS=yes QUICTLS_EXTRA_ARGS="${{ matrix.platform.QUICTLS_EXTRA_ARGS }}" scripts/build-ssl.sh
|
||||||
- name: Build contrib tools
|
- name: Build contrib tools
|
||||||
run: |
|
run: |
|
||||||
make admin/halog/halog
|
make admin/halog/halog
|
||||||
@ -39,7 +40,7 @@ jobs:
|
|||||||
make dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
|
make dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
|
||||||
- name: Compile HAProxy with ${{ matrix.platform.cc }}
|
- name: Compile HAProxy with ${{ matrix.platform.cc }}
|
||||||
run: |
|
run: |
|
||||||
make -j3 CC=${{ matrix.platform.cc }} V=1 ERR=1 TARGET=linux-glibc DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" USE_PROMEX=1 USE_OPENSSL=1 USE_QUIC=1 USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 ADDLIB="${{ matrix.platform.ADDLIB_ATOMIC }}" ARCH_FLAGS="${{ matrix.platform.ARCH_FLAGS }}"
|
make -j3 CC=${{ matrix.platform.cc }} V=1 ERR=1 TARGET=linux-glibc USE_OPENSSL=1 USE_QUIC=1 USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 USE_SYSTEMD=1 ADDLIB="${{ matrix.platform.ADDLIB_ATOMIC }} -Wl,-rpath,${HOME}/opt/lib" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include ARCH_FLAGS="${{ matrix.platform.ARCH_FLAGS }}"
|
||||||
make install
|
make install
|
||||||
- name: Show HAProxy version
|
- name: Show HAProxy version
|
||||||
id: show-version
|
id: show-version
|
||||||
@ -49,13 +50,6 @@ jobs:
|
|||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
haproxy -vv
|
haproxy -vv
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||||
#
|
|
||||||
# TODO: review this workaround later
|
|
||||||
- name: relax crypto policies
|
|
||||||
run: |
|
|
||||||
dnf -y install crypto-policies-scripts
|
|
||||||
echo LEGACY > /etc/crypto-policies/config
|
|
||||||
update-crypto-policies
|
|
||||||
- name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
|
- name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
|
||||||
id: vtest
|
id: vtest
|
||||||
run: |
|
run: |
|
||||||
@ -63,13 +57,9 @@ jobs:
|
|||||||
- name: Show VTest results
|
- name: Show VTest results
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||||
run: |
|
run: |
|
||||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
for folder in ${TMPDIR}/haregtests-*/vtc.*; do
|
||||||
printf "::group::"
|
printf "::group::"
|
||||||
cat $folder/INFO
|
cat $folder/INFO
|
||||||
cat $folder/LOG
|
cat $folder/LOG
|
||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
done
|
done
|
||||||
- name: Run Unit tests
|
|
||||||
id: unittests
|
|
||||||
run: |
|
|
||||||
make unit-tests
|
|
||||||
|
|||||||
5
.github/workflows/illumos.yml
vendored
5
.github/workflows/illumos.yml
vendored
@ -3,17 +3,16 @@ name: Illumos
|
|||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: "0 0 25 * *"
|
- cron: "0 0 25 * *"
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
gcc:
|
gcc:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
if: ${{ github.repository_owner == 'haproxy' }}
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
steps:
|
steps:
|
||||||
- name: "Checkout repository"
|
- name: "Checkout repository"
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: "Build on VM"
|
- name: "Build on VM"
|
||||||
uses: vmactions/solaris-vm@v1
|
uses: vmactions/solaris-vm@v1
|
||||||
|
|||||||
20
.github/workflows/musl.yml
vendored
20
.github/workflows/musl.yml
vendored
@ -20,13 +20,13 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
ulimit -c unlimited
|
ulimit -c unlimited
|
||||||
echo '/tmp/core/core.%h.%e.%t' > /proc/sys/kernel/core_pattern
|
echo '/tmp/core/core.%h.%e.%t' > /proc/sys/kernel/core_pattern
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: apk add gcc gdb make tar git python3 libc-dev linux-headers pcre-dev pcre2-dev openssl-dev lua5.3-dev grep socat curl musl-dbg lua5.3-dbg jose
|
run: apk add gcc gdb make tar git python3 libc-dev linux-headers pcre-dev pcre2-dev openssl-dev lua5.3-dev grep socat curl musl-dbg lua5.3-dbg
|
||||||
- name: Install VTest
|
- name: Install VTest
|
||||||
run: scripts/build-vtest.sh
|
run: scripts/build-vtest.sh
|
||||||
- name: Build
|
- name: Build
|
||||||
run: make -j$(nproc) TARGET=linux-musl DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" ARCH_FLAGS='-ggdb3' CC=cc V=1 USE_LUA=1 LUA_INC=/usr/include/lua5.3 LUA_LIB=/usr/lib/lua5.3 USE_OPENSSL=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_PROMEX=1
|
run: make -j$(nproc) TARGET=linux-musl ARCH_FLAGS='-ggdb3' CC=cc V=1 USE_LUA=1 LUA_INC=/usr/include/lua5.3 LUA_LIB=/usr/lib/lua5.3 USE_OPENSSL=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_PROMEX=1
|
||||||
- name: Show version
|
- name: Show version
|
||||||
run: ./haproxy -vv
|
run: ./haproxy -vv
|
||||||
- name: Show linked libraries
|
- name: Show linked libraries
|
||||||
@ -37,10 +37,6 @@ jobs:
|
|||||||
- name: Run VTest
|
- name: Run VTest
|
||||||
id: vtest
|
id: vtest
|
||||||
run: make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
run: make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||||
- name: Run Unit tests
|
|
||||||
id: unittests
|
|
||||||
run: |
|
|
||||||
make unit-tests
|
|
||||||
- name: Show coredumps
|
- name: Show coredumps
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||||
run: |
|
run: |
|
||||||
@ -64,13 +60,3 @@ jobs:
|
|||||||
cat $folder/LOG
|
cat $folder/LOG
|
||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
done
|
done
|
||||||
- name: Show Unit-Tests results
|
|
||||||
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $result
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
|
|
||||||
|
|||||||
5
.github/workflows/netbsd.yml
vendored
5
.github/workflows/netbsd.yml
vendored
@ -3,17 +3,16 @@ name: NetBSD
|
|||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: "0 0 25 * *"
|
- cron: "0 0 25 * *"
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
gcc:
|
gcc:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
if: ${{ github.repository_owner == 'haproxy' }}
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
steps:
|
steps:
|
||||||
- name: "Checkout repository"
|
- name: "Checkout repository"
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: "Build on VM"
|
- name: "Build on VM"
|
||||||
uses: vmactions/netbsd-vm@v1
|
uses: vmactions/netbsd-vm@v1
|
||||||
|
|||||||
82
.github/workflows/openssl-ech.yml
vendored
82
.github/workflows/openssl-ech.yml
vendored
@ -1,82 +0,0 @@
|
|||||||
name: openssl ECH
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 3 * * *"
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
- name: Install VTest
|
|
||||||
run: |
|
|
||||||
scripts/build-vtest.sh
|
|
||||||
- name: Install apt dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
|
||||||
sudo apt-get --no-install-recommends -y install socat gdb
|
|
||||||
sudo apt-get --no-install-recommends -y install libpsl-dev
|
|
||||||
- name: Install OpenSSL+ECH
|
|
||||||
run: env OPENSSL_VERSION="git-feature/ech" GIT_TYPE="branch" scripts/build-ssl.sh
|
|
||||||
- name: Install curl+ECH
|
|
||||||
run: env SSL_LIB=${HOME}/opt/ scripts/build-curl.sh
|
|
||||||
- name: Compile HAProxy
|
|
||||||
run: |
|
|
||||||
make -j$(nproc) CC=gcc TARGET=linux-glibc \
|
|
||||||
USE_QUIC=1 USE_OPENSSL=1 USE_ECH=1 \
|
|
||||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
|
||||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
|
|
||||||
ARCH_FLAGS="-ggdb3 -fsanitize=address"
|
|
||||||
sudo make install
|
|
||||||
- name: Show HAProxy version
|
|
||||||
id: show-version
|
|
||||||
run: |
|
|
||||||
ldd $(which haproxy)
|
|
||||||
haproxy -vv
|
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
|
||||||
- name: Install problem matcher for VTest
|
|
||||||
run: echo "::add-matcher::.github/vtest.json"
|
|
||||||
- name: Run VTest for HAProxy
|
|
||||||
id: vtest
|
|
||||||
run: |
|
|
||||||
# This is required for macOS which does not actually allow to increase
|
|
||||||
# the '-n' soft limit to the hard limit, thus failing to run.
|
|
||||||
ulimit -n 65536
|
|
||||||
# allow to catch coredumps
|
|
||||||
ulimit -c unlimited
|
|
||||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
|
||||||
- name: Show VTest results
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $folder/INFO
|
|
||||||
cat $folder/LOG
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
- name: Run Unit tests
|
|
||||||
id: unittests
|
|
||||||
run: |
|
|
||||||
make unit-tests
|
|
||||||
- name: Show coredumps
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
failed=false
|
|
||||||
shopt -s nullglob
|
|
||||||
for file in /tmp/core.*; do
|
|
||||||
failed=true
|
|
||||||
printf "::group::"
|
|
||||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
if [ "$failed" = true ]; then
|
|
||||||
exit 1;
|
|
||||||
fi
|
|
||||||
77
.github/workflows/openssl-master.yml
vendored
77
.github/workflows/openssl-master.yml
vendored
@ -1,77 +0,0 @@
|
|||||||
name: openssl master
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 3 * * *"
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
- name: Install apt dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
|
||||||
sudo apt-get --no-install-recommends -y install socat gdb
|
|
||||||
sudo apt-get --no-install-recommends -y install libpsl-dev
|
|
||||||
- uses: ./.github/actions/setup-vtest
|
|
||||||
- name: Install OpenSSL master
|
|
||||||
run: env OPENSSL_VERSION="git-master" GIT_TYPE="branch" scripts/build-ssl.sh
|
|
||||||
- name: Compile HAProxy
|
|
||||||
run: |
|
|
||||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
|
||||||
USE_QUIC=1 USE_OPENSSL=1 \
|
|
||||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
|
||||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
|
||||||
sudo make install
|
|
||||||
- name: Show HAProxy version
|
|
||||||
id: show-version
|
|
||||||
run: |
|
|
||||||
ldd $(which haproxy)
|
|
||||||
haproxy -vv
|
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
|
||||||
- name: Install problem matcher for VTest
|
|
||||||
run: echo "::add-matcher::.github/vtest.json"
|
|
||||||
- name: Run VTest for HAProxy
|
|
||||||
id: vtest
|
|
||||||
run: |
|
|
||||||
# This is required for macOS which does not actually allow to increase
|
|
||||||
# the '-n' soft limit to the hard limit, thus failing to run.
|
|
||||||
ulimit -n 65536
|
|
||||||
# allow to catch coredumps
|
|
||||||
ulimit -c unlimited
|
|
||||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
|
||||||
- name: Show VTest results
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $folder/INFO
|
|
||||||
cat $folder/LOG
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
- name: Run Unit tests
|
|
||||||
id: unittests
|
|
||||||
run: |
|
|
||||||
make unit-tests
|
|
||||||
- name: Show coredumps
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
failed=false
|
|
||||||
shopt -s nullglob
|
|
||||||
for file in /tmp/core.*; do
|
|
||||||
failed=true
|
|
||||||
printf "::group::"
|
|
||||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
if [ "$failed" = true ]; then
|
|
||||||
exit 1;
|
|
||||||
fi
|
|
||||||
33
.github/workflows/openssl-nodeprecated.yml
vendored
Normal file
33
.github/workflows/openssl-nodeprecated.yml
vendored
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
#
|
||||||
|
# special purpose CI: test against OpenSSL built in "no-deprecated" mode
|
||||||
|
# let us run those builds weekly
|
||||||
|
#
|
||||||
|
# for example, OpenWRT uses such OpenSSL builds (those builds are smaller)
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# some details might be found at NL: https://www.mail-archive.com/haproxy@formilux.org/msg35759.html
|
||||||
|
# GH: https://github.com/haproxy/haproxy/issues/367
|
||||||
|
|
||||||
|
name: openssl no-deprecated
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "0 0 * * 4"
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Install VTest
|
||||||
|
run: |
|
||||||
|
scripts/build-vtest.sh
|
||||||
|
- name: Compile HAProxy
|
||||||
|
run: |
|
||||||
|
make DEFINE="-DOPENSSL_API_COMPAT=0x10100000L -DOPENSSL_NO_DEPRECATED" -j3 CC=gcc ERR=1 TARGET=linux-glibc USE_OPENSSL=1
|
||||||
|
- name: Run VTest
|
||||||
|
run: |
|
||||||
|
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||||
66
.github/workflows/quic-interop-aws-lc.yml
vendored
66
.github/workflows/quic-interop-aws-lc.yml
vendored
@ -11,57 +11,83 @@ on:
|
|||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
combined-build-and-run:
|
build:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
packages: write
|
packages: write
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Update Docker to the latest
|
- name: Log in to the Container registry
|
||||||
uses: docker/setup-docker-action@v4
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build Docker image
|
- name: Build and push Docker image
|
||||||
id: push
|
id: push
|
||||||
uses: docker/build-push-action@v6
|
uses: docker/build-push-action@v5
|
||||||
with:
|
with:
|
||||||
context: https://github.com/haproxytech/haproxy-qns.git
|
context: https://github.com/haproxytech/haproxy-qns.git
|
||||||
platforms: linux/amd64
|
push: true
|
||||||
build-args: |
|
build-args: |
|
||||||
SSLLIB=AWS-LC
|
SSLLIB: AWS-LC
|
||||||
tags: local:aws-lc
|
tags: ghcr.io/${{ github.repository }}:aws-lc
|
||||||
|
|
||||||
|
run:
|
||||||
|
needs: build
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
suite: [
|
||||||
|
{ client: chrome, tests: "http3" },
|
||||||
|
{ client: picoquic, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" },
|
||||||
|
{ client: quic-go, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" },
|
||||||
|
{ client: ngtcp2, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" }
|
||||||
|
]
|
||||||
|
fail-fast: false
|
||||||
|
|
||||||
|
name: ${{ matrix.suite.client }}
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Log in to the Container registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Install tshark
|
- name: Install tshark
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get -y install tshark
|
sudo apt-get -y install tshark
|
||||||
|
|
||||||
|
- name: Pull image
|
||||||
|
run: |
|
||||||
|
docker pull ghcr.io/${{ github.repository }}:aws-lc
|
||||||
|
|
||||||
- name: Run
|
- name: Run
|
||||||
run: |
|
run: |
|
||||||
git clone https://github.com/quic-interop/quic-interop-runner
|
git clone https://github.com/quic-interop/quic-interop-runner
|
||||||
cd quic-interop-runner
|
cd quic-interop-runner
|
||||||
pip install -r requirements.txt --break-system-packages
|
pip install -r requirements.txt --break-system-packages
|
||||||
python run.py -j result.json -l logs-chrome -r haproxy=local:aws-lc -t "http3" -c chrome -s haproxy
|
python run.py -j result.json -l logs -r haproxy=ghcr.io/${{ github.repository }}:aws-lc -t ${{ matrix.suite.tests }} -c ${{ matrix.suite.client }} -s haproxy
|
||||||
python run.py -j result.json -l logs-picoquic -r haproxy=local:aws-lc -t "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" -c picoquic -s haproxy
|
|
||||||
python run.py -j result.json -l logs-quic-go -r haproxy=local:aws-lc -t "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" -c quic-go -s haproxy
|
|
||||||
python run.py -j result.json -l logs-ngtcp2 -r haproxy=local:aws-lc -t "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" -c ngtcp2 -s haproxy
|
|
||||||
|
|
||||||
- name: Delete succeeded logs
|
- name: Delete succeeded logs
|
||||||
if: failure()
|
if: failure()
|
||||||
run: |
|
run: |
|
||||||
for client in chrome picoquic quic-go ngtcp2; do
|
cd quic-interop-runner/logs/haproxy_${{ matrix.suite.client }}
|
||||||
pushd quic-interop-runner/logs-${client}/haproxy_${client}
|
|
||||||
cat ../../result.json | jq -r '.results[][] | select(.result=="succeeded") | .name' | xargs rm -rf
|
cat ../../result.json | jq -r '.results[][] | select(.result=="succeeded") | .name' | xargs rm -rf
|
||||||
popd
|
|
||||||
done
|
|
||||||
|
|
||||||
- name: Logs upload
|
- name: Logs upload
|
||||||
if: failure()
|
if: failure()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: logs
|
name: logs-${{ matrix.suite.client }}
|
||||||
path: quic-interop-runner/logs*/
|
path: quic-interop-runner/logs/
|
||||||
retention-days: 6
|
retention-days: 6
|
||||||
|
|||||||
62
.github/workflows/quic-interop-libressl.yml
vendored
62
.github/workflows/quic-interop-libressl.yml
vendored
@ -11,55 +11,81 @@ on:
|
|||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
combined-build-and-run:
|
build:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
packages: write
|
packages: write
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Update Docker to the latest
|
- name: Log in to the Container registry
|
||||||
uses: docker/setup-docker-action@v4
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build Docker image
|
- name: Build and push Docker image
|
||||||
id: push
|
id: push
|
||||||
uses: docker/build-push-action@v6
|
uses: docker/build-push-action@v5
|
||||||
with:
|
with:
|
||||||
context: https://github.com/haproxytech/haproxy-qns.git
|
context: https://github.com/haproxytech/haproxy-qns.git
|
||||||
platforms: linux/amd64
|
push: true
|
||||||
build-args: |
|
build-args: |
|
||||||
SSLLIB=LibreSSL
|
SSLLIB: LibreSSL
|
||||||
tags: local:libressl
|
tags: ghcr.io/${{ github.repository }}:libressl
|
||||||
|
|
||||||
|
run:
|
||||||
|
needs: build
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
suite: [
|
||||||
|
{ client: picoquic, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,http3,blackhole,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,v2" },
|
||||||
|
{ client: quic-go, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,http3,blackhole,amplificationlimit,transferloss,transfercorruption,v2" }
|
||||||
|
]
|
||||||
|
fail-fast: false
|
||||||
|
|
||||||
|
name: ${{ matrix.suite.client }}
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Log in to the Container registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Install tshark
|
- name: Install tshark
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get -y install tshark
|
sudo apt-get -y install tshark
|
||||||
|
|
||||||
|
- name: Pull image
|
||||||
|
run: |
|
||||||
|
docker pull ghcr.io/${{ github.repository }}:libressl
|
||||||
|
|
||||||
- name: Run
|
- name: Run
|
||||||
run: |
|
run: |
|
||||||
git clone https://github.com/quic-interop/quic-interop-runner
|
git clone https://github.com/quic-interop/quic-interop-runner
|
||||||
cd quic-interop-runner
|
cd quic-interop-runner
|
||||||
pip install -r requirements.txt --break-system-packages
|
pip install -r requirements.txt --break-system-packages
|
||||||
python run.py -j result.json -l logs-picoquic -r haproxy=local:libressl -t "handshake,transfer,longrtt,chacha20,multiplexing,retry,http3,blackhole,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,v2" -c picoquic -s haproxy
|
python run.py -j result.json -l logs -r haproxy=ghcr.io/${{ github.repository }}:libressl -t ${{ matrix.suite.tests }} -c ${{ matrix.suite.client }} -s haproxy
|
||||||
python run.py -j result.json -l logs-quic-go -r haproxy=local:libressl -t "handshake,transfer,longrtt,chacha20,multiplexing,retry,http3,blackhole,amplificationlimit,transferloss,transfercorruption,v2" -c quic-go -s haproxy
|
|
||||||
|
|
||||||
- name: Delete succeeded logs
|
- name: Delete succeeded logs
|
||||||
if: failure()
|
if: failure()
|
||||||
run: |
|
run: |
|
||||||
for client in picoquic quic-go; do
|
cd quic-interop-runner/logs/haproxy_${{ matrix.suite.client }}
|
||||||
pushd quic-interop-runner/logs-${client}/haproxy_${client}
|
|
||||||
cat ../../result.json | jq -r '.results[][] | select(.result=="succeeded") | .name' | xargs rm -rf
|
cat ../../result.json | jq -r '.results[][] | select(.result=="succeeded") | .name' | xargs rm -rf
|
||||||
popd
|
|
||||||
done
|
|
||||||
|
|
||||||
- name: Logs upload
|
- name: Logs upload
|
||||||
if: failure()
|
if: failure()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: logs
|
name: logs-${{ matrix.suite.client }}
|
||||||
path: quic-interop-runner/logs*/
|
path: quic-interop-runner/logs/
|
||||||
retention-days: 6
|
retention-days: 6
|
||||||
|
|||||||
74
.github/workflows/quictls.yml
vendored
74
.github/workflows/quictls.yml
vendored
@ -1,74 +0,0 @@
|
|||||||
#
|
|
||||||
# weekly run against modern QuicTLS branch, i.e. https://github.com/quictls/quictls
|
|
||||||
#
|
|
||||||
|
|
||||||
name: QuicTLS
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 0 * * 4"
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
- name: Install apt dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
|
||||||
sudo apt-get --no-install-recommends -y install socat gdb
|
|
||||||
- name: Install QuicTLS
|
|
||||||
run: env QUICTLS_VERSION=main QUICTLS_URL=https://github.com/quictls/quictls scripts/build-ssl.sh
|
|
||||||
- name: Compile HAProxy
|
|
||||||
run: |
|
|
||||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
|
||||||
USE_QUIC=1 USE_OPENSSL=1 \
|
|
||||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
|
||||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
|
|
||||||
ARCH_FLAGS="-ggdb3 -fsanitize=address"
|
|
||||||
sudo make install
|
|
||||||
- name: Show HAProxy version
|
|
||||||
id: show-version
|
|
||||||
run: |
|
|
||||||
ldd $(which haproxy)
|
|
||||||
haproxy -vv
|
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
|
||||||
- uses: ./.github/actions/setup-vtest
|
|
||||||
- name: Run VTest for HAProxy
|
|
||||||
id: vtest
|
|
||||||
run: |
|
|
||||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
|
||||||
- name: Show VTest results
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $folder/INFO
|
|
||||||
cat $folder/LOG
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
- name: Run Unit tests
|
|
||||||
id: unittests
|
|
||||||
run: |
|
|
||||||
make unit-tests
|
|
||||||
- name: Show coredumps
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
failed=false
|
|
||||||
shopt -s nullglob
|
|
||||||
for file in /tmp/core.*; do
|
|
||||||
failed=true
|
|
||||||
printf "::group::"
|
|
||||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
if [ "$failed" = true ]; then
|
|
||||||
exit 1;
|
|
||||||
fi
|
|
||||||
65
.github/workflows/vtest.yml
vendored
65
.github/workflows/vtest.yml
vendored
@ -23,7 +23,7 @@ jobs:
|
|||||||
outputs:
|
outputs:
|
||||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
- name: Generate Build Matrix
|
- name: Generate Build Matrix
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@ -44,10 +44,16 @@ jobs:
|
|||||||
TMPDIR: /tmp
|
TMPDIR: /tmp
|
||||||
OT_CPP_VERSION: 1.6.0
|
OT_CPP_VERSION: 1.6.0
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 100
|
fetch-depth: 100
|
||||||
|
|
||||||
|
- name: Setup coredumps
|
||||||
|
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
|
||||||
|
run: |
|
||||||
|
sudo sysctl -w fs.suid_dumpable=1
|
||||||
|
sudo sysctl kernel.core_pattern=/tmp/core.%h.%e.%t
|
||||||
|
|
||||||
#
|
#
|
||||||
# Github Action cache key cannot contain comma, so we calculate it based on job name
|
# Github Action cache key cannot contain comma, so we calculate it based on job name
|
||||||
#
|
#
|
||||||
@ -57,7 +63,7 @@ jobs:
|
|||||||
echo "key=$(echo ${{ matrix.name }} | sha256sum | awk '{print $1}')" >> $GITHUB_OUTPUT
|
echo "key=$(echo ${{ matrix.name }} | sha256sum | awk '{print $1}')" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Cache SSL libs
|
- name: Cache SSL libs
|
||||||
if: ${{ matrix.ssl && matrix.ssl != 'stock' && matrix.ssl != 'BORINGSSL=yes' && !contains(matrix.ssl, 'QUICTLS') }}
|
if: ${{ matrix.ssl && matrix.ssl != 'stock' && matrix.ssl != 'BORINGSSL=yes' && matrix.ssl != 'QUICTLS=yes' }}
|
||||||
id: cache_ssl
|
id: cache_ssl
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@v4
|
||||||
with:
|
with:
|
||||||
@ -70,7 +76,7 @@ jobs:
|
|||||||
uses: actions/cache@v4
|
uses: actions/cache@v4
|
||||||
with:
|
with:
|
||||||
path: '~/opt-ot/'
|
path: '~/opt-ot/'
|
||||||
key: ${{ matrix.os }}-ot-${{ matrix.CC }}-${{ env.OT_CPP_VERSION }}-${{ contains(matrix.name, 'ASAN') }}
|
key: ot-${{ matrix.CC }}-${{ env.OT_CPP_VERSION }}-${{ contains(matrix.name, 'ASAN') }}
|
||||||
- name: Install apt dependencies
|
- name: Install apt dependencies
|
||||||
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
|
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
|
||||||
run: |
|
run: |
|
||||||
@ -80,14 +86,15 @@ jobs:
|
|||||||
${{ contains(matrix.FLAGS, 'USE_PCRE2=1') && 'libpcre2-dev' || '' }} \
|
${{ contains(matrix.FLAGS, 'USE_PCRE2=1') && 'libpcre2-dev' || '' }} \
|
||||||
${{ contains(matrix.ssl, 'BORINGSSL=yes') && 'ninja-build' || '' }} \
|
${{ contains(matrix.ssl, 'BORINGSSL=yes') && 'ninja-build' || '' }} \
|
||||||
socat \
|
socat \
|
||||||
gdb \
|
gdb
|
||||||
jose
|
|
||||||
- name: Install brew dependencies
|
- name: Install brew dependencies
|
||||||
if: ${{ startsWith(matrix.os, 'macos-') }}
|
if: ${{ startsWith(matrix.os, 'macos-') }}
|
||||||
run: |
|
run: |
|
||||||
brew install socat
|
brew install socat
|
||||||
brew install lua
|
brew install lua
|
||||||
- uses: ./.github/actions/setup-vtest
|
- name: Install VTest
|
||||||
|
run: |
|
||||||
|
scripts/build-vtest.sh
|
||||||
- name: Install SSL ${{ matrix.ssl }}
|
- name: Install SSL ${{ matrix.ssl }}
|
||||||
if: ${{ matrix.ssl && matrix.ssl != 'stock' && steps.cache_ssl.outputs.cache-hit != 'true' }}
|
if: ${{ matrix.ssl && matrix.ssl != 'stock' && steps.cache_ssl.outputs.cache-hit != 'true' }}
|
||||||
run: env ${{ matrix.ssl }} scripts/build-ssl.sh
|
run: env ${{ matrix.ssl }} scripts/build-ssl.sh
|
||||||
@ -110,19 +117,10 @@ jobs:
|
|||||||
ERR=1 \
|
ERR=1 \
|
||||||
TARGET=${{ matrix.TARGET }} \
|
TARGET=${{ matrix.TARGET }} \
|
||||||
CC=${{ matrix.CC }} \
|
CC=${{ matrix.CC }} \
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
DEBUG="-DDEBUG_POOL_INTEGRITY" \
|
||||||
${{ join(matrix.FLAGS, ' ') }} \
|
|
||||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
|
||||||
sudo make install-bin
|
|
||||||
- name: Compile admin/halog/halog
|
|
||||||
run: |
|
|
||||||
make -j$(nproc) admin/halog/halog \
|
|
||||||
ERR=1 \
|
|
||||||
TARGET=${{ matrix.TARGET }} \
|
|
||||||
CC=${{ matrix.CC }} \
|
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
|
||||||
${{ join(matrix.FLAGS, ' ') }} \
|
${{ join(matrix.FLAGS, ' ') }} \
|
||||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
||||||
|
sudo make install
|
||||||
- name: Show HAProxy version
|
- name: Show HAProxy version
|
||||||
id: show-version
|
id: show-version
|
||||||
run: |
|
run: |
|
||||||
@ -137,33 +135,36 @@ jobs:
|
|||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
haproxy -vv
|
haproxy -vv
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||||
|
- name: Install problem matcher for VTest
|
||||||
|
# This allows one to more easily see which tests fail.
|
||||||
|
run: echo "::add-matcher::.github/vtest.json"
|
||||||
- name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
|
- name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
|
||||||
id: vtest
|
id: vtest
|
||||||
run: |
|
run: |
|
||||||
|
# This is required for macOS which does not actually allow to increase
|
||||||
|
# the '-n' soft limit to the hard limit, thus failing to run.
|
||||||
|
ulimit -n 65536
|
||||||
|
ulimit -c unlimited
|
||||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||||
|
- name: Config syntax check memleak smoke testing
|
||||||
|
if: ${{ contains(matrix.name, 'ASAN') }}
|
||||||
|
run: |
|
||||||
|
./haproxy -dI -f .github/h2spec.config -c
|
||||||
|
./haproxy -dI -f examples/content-sw-sample.cfg -c
|
||||||
|
./haproxy -dI -f examples/option-http_proxy.cfg -c
|
||||||
|
./haproxy -dI -f examples/quick-test.cfg -c
|
||||||
|
./haproxy -dI -f examples/transparent_proxy.cfg -c
|
||||||
- name: Show VTest results
|
- name: Show VTest results
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||||
run: |
|
run: |
|
||||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
for folder in ${TMPDIR}/haregtests-*/vtc.*; do
|
||||||
printf "::group::"
|
printf "::group::"
|
||||||
cat $folder/INFO
|
cat $folder/INFO
|
||||||
cat $folder/LOG
|
cat $folder/LOG
|
||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
done
|
done
|
||||||
exit 1
|
exit 1
|
||||||
- name: Run Unit tests
|
|
||||||
id: unittests
|
|
||||||
run: |
|
|
||||||
make unit-tests
|
|
||||||
- name: Show Unit-Tests results
|
|
||||||
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $result
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
- name: Show coredumps
|
- name: Show coredumps
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
3
.github/workflows/windows.yml
vendored
3
.github/workflows/windows.yml
vendored
@ -18,7 +18,6 @@ jobs:
|
|||||||
msys2:
|
msys2:
|
||||||
name: ${{ matrix.name }}
|
name: ${{ matrix.name }}
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: msys2 {0}
|
shell: msys2 {0}
|
||||||
@ -36,7 +35,7 @@ jobs:
|
|||||||
- USE_THREAD=1
|
- USE_THREAD=1
|
||||||
- USE_ZLIB=1
|
- USE_ZLIB=1
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
- uses: msys2/setup-msys2@v2
|
- uses: msys2/setup-msys2@v2
|
||||||
with:
|
with:
|
||||||
install: >-
|
install: >-
|
||||||
|
|||||||
80
.github/workflows/wolfssl.yml
vendored
80
.github/workflows/wolfssl.yml
vendored
@ -1,80 +0,0 @@
|
|||||||
name: WolfSSL
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 0 * * 4"
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
- name: Install apt dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
|
||||||
sudo apt-get --no-install-recommends -y install socat gdb jose
|
|
||||||
- name: Install WolfSSL
|
|
||||||
run: env WOLFSSL_VERSION=git-master WOLFSSL_DEBUG=1 scripts/build-ssl.sh
|
|
||||||
- name: Compile HAProxy
|
|
||||||
run: |
|
|
||||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
|
||||||
USE_OPENSSL_WOLFSSL=1 USE_QUIC=1 \
|
|
||||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
|
||||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
|
|
||||||
ARCH_FLAGS="-ggdb3 -fsanitize=address"
|
|
||||||
sudo make install
|
|
||||||
- name: Show HAProxy version
|
|
||||||
id: show-version
|
|
||||||
run: |
|
|
||||||
ldd $(which haproxy)
|
|
||||||
haproxy -vv
|
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
|
||||||
- uses: ./.github/actions/setup-vtest
|
|
||||||
- name: Run VTest for HAProxy
|
|
||||||
id: vtest
|
|
||||||
run: |
|
|
||||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
|
||||||
- name: Run Unit tests
|
|
||||||
id: unittests
|
|
||||||
run: |
|
|
||||||
make unit-tests
|
|
||||||
- name: Show VTest results
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $folder/INFO
|
|
||||||
cat $folder/LOG
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
- name: Show coredumps
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
failed=false
|
|
||||||
shopt -s nullglob
|
|
||||||
for file in /tmp/core.*; do
|
|
||||||
failed=true
|
|
||||||
printf "::group::"
|
|
||||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
if [ "$failed" = true ]; then
|
|
||||||
exit 1;
|
|
||||||
fi
|
|
||||||
- name: Show Unit-Tests results
|
|
||||||
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $result
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
|
|
||||||
1
.gitignore
vendored
1
.gitignore
vendored
@ -57,4 +57,3 @@ dev/udp/udp-perturb
|
|||||||
/src/dlmalloc.c
|
/src/dlmalloc.c
|
||||||
/tests/test_hashes
|
/tests/test_hashes
|
||||||
doc/lua-api/_build
|
doc/lua-api/_build
|
||||||
dev/term_events/term_events
|
|
||||||
|
|||||||
@ -8,7 +8,7 @@ branches:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
global:
|
global:
|
||||||
- FLAGS="USE_LUA=1 USE_OPENSSL=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_ZLIB=1"
|
- FLAGS="USE_LUA=1 USE_OPENSSL=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_SYSTEMD=1 USE_ZLIB=1"
|
||||||
- TMPDIR=/tmp
|
- TMPDIR=/tmp
|
||||||
|
|
||||||
addons:
|
addons:
|
||||||
|
|||||||
12
BRANCHES
12
BRANCHES
@ -171,17 +171,7 @@ feedback for developers:
|
|||||||
as the previous releases that had 6 months to stabilize. In terms of
|
as the previous releases that had 6 months to stabilize. In terms of
|
||||||
stability it really means that the point zero version already accumulated
|
stability it really means that the point zero version already accumulated
|
||||||
6 months of fixes and that it is much safer to use even just after it is
|
6 months of fixes and that it is much safer to use even just after it is
|
||||||
released. There is one exception though, features marked as "experimental"
|
released.
|
||||||
are not guaranteed to be maintained beyond the release of the next LTS
|
|
||||||
branch. The rationale here is that the experimental status is made to
|
|
||||||
expose an early preview of a feature, that is often incomplete, not always
|
|
||||||
in its definitive form regarding configuration, and for which developers
|
|
||||||
are seeking feedback from the users. It is even possible that changes will
|
|
||||||
be brought within the stable branch and it may happen that the feature
|
|
||||||
breaks. It is not imaginable to always be able to backport bug fixes too
|
|
||||||
far in this context since the code and configuration may change quite a
|
|
||||||
bit. Users who want to try experimental features are expected to upgrade
|
|
||||||
quickly to benefit from the improvements made to that feature.
|
|
||||||
|
|
||||||
- for developers, given that the odd versions are solely used by highly
|
- for developers, given that the odd versions are solely used by highly
|
||||||
skilled users, it's easier to get advanced traces and captures, and there
|
skilled users, it's easier to get advanced traces and captures, and there
|
||||||
|
|||||||
@ -1010,7 +1010,7 @@ you notice you're already practising some of them:
|
|||||||
- continue to send pull requests after having been explained why they are not
|
- continue to send pull requests after having been explained why they are not
|
||||||
welcome.
|
welcome.
|
||||||
|
|
||||||
- give wrong advice to people asking for help, or sending them patches to
|
- give wrong advices to people asking for help, or sending them patches to
|
||||||
try which make no sense, waste their time, and give them a bad impression
|
try which make no sense, waste their time, and give them a bad impression
|
||||||
of the people working on the project.
|
of the people working on the project.
|
||||||
|
|
||||||
|
|||||||
83
INSTALL
83
INSTALL
@ -111,22 +111,20 @@ HAProxy requires a working GCC or Clang toolchain and GNU make :
|
|||||||
may want to retry with "gmake" which is the name commonly used for GNU make
|
may want to retry with "gmake" which is the name commonly used for GNU make
|
||||||
on BSD systems.
|
on BSD systems.
|
||||||
|
|
||||||
- GCC >= 4.7 (up to 15 tested). Older versions are no longer supported due to
|
- GCC >= 4.2 (up to 13 tested). Older versions can be made to work with a
|
||||||
the latest mt_list update which only uses c11-like atomics. Newer versions
|
few minor adaptations if really needed. Newer versions may sometimes break
|
||||||
may sometimes break due to compiler regressions or behaviour changes. The
|
due to compiler regressions or behaviour changes. The version shipped with
|
||||||
version shipped with your operating system is very likely to work with no
|
your operating system is very likely to work with no trouble. Clang >= 3.0
|
||||||
trouble. Clang >= 3.0 is also known to work as an alternative solution, and
|
is also known to work as an alternative solution. Recent versions may emit
|
||||||
versions up to 19 were successfully tested. Recent versions may emit a bit
|
a bit more warnings that are worth reporting as they may reveal real bugs.
|
||||||
more warnings that are worth reporting as they may reveal real bugs. TCC
|
TCC (https://repo.or.cz/tinycc.git) is also usable for developers but will
|
||||||
(https://repo.or.cz/tinycc.git) is also usable for developers but will not
|
not support threading and was found at least once to produce bad code in
|
||||||
support threading and was found at least once to produce bad code in some
|
some rare corner cases (since fixed). But it builds extremely quickly
|
||||||
rare corner cases (since fixed). But it builds extremely quickly (typically
|
(typically half a second for the whole project) and is very convenient to
|
||||||
half a second for the whole project) and is very convenient to run quick
|
run quick tests during API changes or code refactoring.
|
||||||
tests during API changes or code refactoring.
|
|
||||||
|
|
||||||
- GNU ld (binutils package), with no particular version. Other linkers might
|
- GNU ld (binutils package), with no particular version. Other linkers might
|
||||||
work but were not tested. The default one from your operating system will
|
work but were not tested.
|
||||||
normally work.
|
|
||||||
|
|
||||||
On debian or Ubuntu systems and their derivatives, you may get all these tools
|
On debian or Ubuntu systems and their derivatives, you may get all these tools
|
||||||
at once by issuing the two following commands :
|
at once by issuing the two following commands :
|
||||||
@ -237,7 +235,7 @@ to forcefully enable it using "USE_LIBCRYPT=1".
|
|||||||
-----------------
|
-----------------
|
||||||
For SSL/TLS, it is necessary to use a cryptography library. HAProxy currently
|
For SSL/TLS, it is necessary to use a cryptography library. HAProxy currently
|
||||||
supports the OpenSSL library, and is known to build and work with branches
|
supports the OpenSSL library, and is known to build and work with branches
|
||||||
1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, and 3.0 to 3.6. It is recommended to use
|
1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, and 3.0 to 3.3. It is recommended to use
|
||||||
at least OpenSSL 1.1.1 to have support for all SSL keywords and configuration
|
at least OpenSSL 1.1.1 to have support for all SSL keywords and configuration
|
||||||
in HAProxy. OpenSSL follows a long-term support cycle similar to HAProxy's,
|
in HAProxy. OpenSSL follows a long-term support cycle similar to HAProxy's,
|
||||||
and each of the branches above receives its own fixes, without forcing you to
|
and each of the branches above receives its own fixes, without forcing you to
|
||||||
@ -259,15 +257,11 @@ reported to work as well. While there are some efforts from the community to
|
|||||||
ensure they work well, OpenSSL remains the primary target and this means that
|
ensure they work well, OpenSSL remains the primary target and this means that
|
||||||
in case of conflicting choices, OpenSSL support will be favored over other
|
in case of conflicting choices, OpenSSL support will be favored over other
|
||||||
options. Note that QUIC is not fully supported when haproxy is built with
|
options. Note that QUIC is not fully supported when haproxy is built with
|
||||||
OpenSSL < 3.5.2 version. In this case, QUICTLS or AWS-LC are the preferred
|
OpenSSL. In this case, QUICTLS is the preferred alternative. As of writing
|
||||||
alternatives. As of writing this, the QuicTLS project follows OpenSSL very
|
this, the QuicTLS project follows OpenSSL very closely and provides update
|
||||||
closely and provides update simultaneously, but being a volunteer-driven
|
simultaneously, but being a volunteer-driven project, its long-term future does
|
||||||
project, its long-term future does not look certain enough to convince
|
not look certain enough to convince operating systems to package it, so it
|
||||||
operating systems to package it, so it needs to be build locally. Recent
|
needs to be build locally. See the section about QUIC in this document.
|
||||||
versions of AWS-LC (>= 1.22 and the FIPS branches) are pretty complete and
|
|
||||||
generally more performant than other OpenSSL derivatives, but may behave
|
|
||||||
slightly differently, particularly when dealing with outdated setups. See
|
|
||||||
the section about QUIC in this document.
|
|
||||||
|
|
||||||
A fifth option is wolfSSL (https://github.com/wolfSSL/wolfssl). It is the only
|
A fifth option is wolfSSL (https://github.com/wolfSSL/wolfssl). It is the only
|
||||||
supported alternative stack not based on OpenSSL, yet which implements almost
|
supported alternative stack not based on OpenSSL, yet which implements almost
|
||||||
@ -471,6 +465,12 @@ are the extra libraries that may be referenced at build time :
|
|||||||
on Linux. It is automatically detected and may be disabled
|
on Linux. It is automatically detected and may be disabled
|
||||||
using "USE_DL=", though it should never harm.
|
using "USE_DL=", though it should never harm.
|
||||||
|
|
||||||
|
- USE_SYSTEMD=1 enables support for the sdnotify features of systemd,
|
||||||
|
allowing better integration with systemd on Linux systems
|
||||||
|
which come with it. It is never enabled by default so there
|
||||||
|
is no need to disable it.
|
||||||
|
|
||||||
|
|
||||||
4.10) Common errors
|
4.10) Common errors
|
||||||
-------------------
|
-------------------
|
||||||
Some build errors may happen depending on the options combinations or the
|
Some build errors may happen depending on the options combinations or the
|
||||||
@ -494,8 +494,8 @@ target. Common issues may include:
|
|||||||
other supported compatible library.
|
other supported compatible library.
|
||||||
|
|
||||||
- many "dereferencing pointer 'sa.985' does break strict-aliasing rules"
|
- many "dereferencing pointer 'sa.985' does break strict-aliasing rules"
|
||||||
=> these warnings happen on old compilers (typically gcc before 7.x),
|
=> these warnings happen on old compilers (typically gcc-4.4), and may
|
||||||
and may safely be ignored; newer ones are better on these.
|
safely be ignored; newer ones are better on these.
|
||||||
|
|
||||||
|
|
||||||
4.11) QUIC
|
4.11) QUIC
|
||||||
@ -504,11 +504,10 @@ QUIC is the new transport layer protocol and is required for HTTP/3. This
|
|||||||
protocol stack is currently supported as an experimental feature in haproxy on
|
protocol stack is currently supported as an experimental feature in haproxy on
|
||||||
the frontend side. In order to enable it, use "USE_QUIC=1 USE_OPENSSL=1".
|
the frontend side. In order to enable it, use "USE_QUIC=1 USE_OPENSSL=1".
|
||||||
|
|
||||||
Note that QUIC is not always fully supported by the OpenSSL library depending on
|
Note that QUIC is not fully supported by the OpenSSL library. Indeed QUIC 0-RTT
|
||||||
its version. Indeed QUIC 0-RTT cannot be supported by OpenSSL for versions before
|
cannot be supported by OpenSSL contrary to others libraries with full QUIC
|
||||||
3.5 contrary to others libraries with full QUIC support. The preferred option is
|
support. The preferred option is to use QUICTLS. This is a fork of OpenSSL with
|
||||||
to use QUICTLS. This is a fork of OpenSSL with a QUIC-compatible API. Its
|
a QUIC-compatible API. Its repository is available at this location:
|
||||||
repository is available at this location:
|
|
||||||
|
|
||||||
https://github.com/quictls/openssl
|
https://github.com/quictls/openssl
|
||||||
|
|
||||||
@ -536,18 +535,14 @@ way assuming that wolfSSL was installed in /opt/wolfssl-5.6.0 as shown in 4.5:
|
|||||||
SSL_INC=/opt/wolfssl-5.6.0/include SSL_LIB=/opt/wolfssl-5.6.0/lib
|
SSL_INC=/opt/wolfssl-5.6.0/include SSL_LIB=/opt/wolfssl-5.6.0/lib
|
||||||
LDFLAGS="-Wl,-rpath,/opt/wolfssl-5.6.0/lib"
|
LDFLAGS="-Wl,-rpath,/opt/wolfssl-5.6.0/lib"
|
||||||
|
|
||||||
As last resort, haproxy may be compiled against OpenSSL as follows from 3.5
|
As last resort, haproxy may be compiled against OpenSSL as follows:
|
||||||
version with 0-RTT support:
|
|
||||||
|
|
||||||
$ make TARGET=generic USE_OPENSSL=1 USE_QUIC=1
|
|
||||||
|
|
||||||
or as follows for all OpenSSL versions but without O-RTT support:
|
|
||||||
|
|
||||||
$ make TARGET=generic USE_OPENSSL=1 USE_QUIC=1 USE_QUIC_OPENSSL_COMPAT=1
|
$ make TARGET=generic USE_OPENSSL=1 USE_QUIC=1 USE_QUIC_OPENSSL_COMPAT=1
|
||||||
|
|
||||||
In addition to this requirements, the QUIC listener bindings must be explicitly
|
Note that QUIC 0-RTT is not supported by haproxy QUIC stack when built against
|
||||||
enabled with a specific QUIC tuning parameter. (see "limited-quic" global
|
OpenSSL. In addition to this compilation requirements, the QUIC listener
|
||||||
parameter of haproxy Configuration Manual).
|
bindings must be explicitly enabled with a specific QUIC tuning parameter.
|
||||||
|
(see "limited-quic" global parameter of haproxy Configuration Manual).
|
||||||
|
|
||||||
|
|
||||||
5) How to build HAProxy
|
5) How to build HAProxy
|
||||||
@ -563,9 +558,9 @@ It goes into more details with the main options.
|
|||||||
To build haproxy, you have to choose your target OS amongst the following ones
|
To build haproxy, you have to choose your target OS amongst the following ones
|
||||||
and assign it to the TARGET variable :
|
and assign it to the TARGET variable :
|
||||||
|
|
||||||
- linux-glibc for Linux kernel 4.17 and above
|
- linux-glibc for Linux kernel 2.6.28 and above
|
||||||
- linux-glibc-legacy for Linux kernel 2.6.28 and above without new features
|
- linux-glibc-legacy for Linux kernel 2.6.28 and above without new features
|
||||||
- linux-musl for Linux kernel 4.17 and above with musl libc
|
- linux-musl for Linux kernel 2.6.28 and above with musl libc
|
||||||
- solaris for Solaris 10 and above
|
- solaris for Solaris 10 and above
|
||||||
- freebsd for FreeBSD 10 and above
|
- freebsd for FreeBSD 10 and above
|
||||||
- dragonfly for DragonFlyBSD 4.3 and above
|
- dragonfly for DragonFlyBSD 4.3 and above
|
||||||
@ -765,8 +760,8 @@ forced to produce final binaries, and must not be used during bisect sessions,
|
|||||||
as it will often lead to the wrong commit.
|
as it will often lead to the wrong commit.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
# silence strict-aliasing warnings with old gcc-5.5:
|
# silence strict-aliasing warnings with old gcc-4.4:
|
||||||
$ make -j$(nproc) TARGET=linux-glibc CC=gcc-55 CFLAGS=-fno-strict-aliasing
|
$ make -j$(nproc) TARGET=linux-glibc CC=gcc-44 CFLAGS=-fno-strict-aliasing
|
||||||
|
|
||||||
# disable all warning options:
|
# disable all warning options:
|
||||||
$ make -j$(nproc) TARGET=linux-glibc CC=mycc WARN_CFLAGS= NOWARN_CFLAGS=
|
$ make -j$(nproc) TARGET=linux-glibc CC=mycc WARN_CFLAGS= NOWARN_CFLAGS=
|
||||||
|
|||||||
226
Makefile
226
Makefile
@ -35,7 +35,6 @@
|
|||||||
# USE_OPENSSL : enable use of OpenSSL. Recommended, but see below.
|
# USE_OPENSSL : enable use of OpenSSL. Recommended, but see below.
|
||||||
# USE_OPENSSL_AWSLC : enable use of AWS-LC
|
# USE_OPENSSL_AWSLC : enable use of AWS-LC
|
||||||
# USE_OPENSSL_WOLFSSL : enable use of wolfSSL with the OpenSSL API
|
# USE_OPENSSL_WOLFSSL : enable use of wolfSSL with the OpenSSL API
|
||||||
# USE_ECH : enable use of ECH with the OpenSSL API
|
|
||||||
# USE_QUIC : enable use of QUIC with the quictls API (quictls, libressl, boringssl)
|
# USE_QUIC : enable use of QUIC with the quictls API (quictls, libressl, boringssl)
|
||||||
# USE_QUIC_OPENSSL_COMPAT : enable use of QUIC with the standard openssl API (limited features)
|
# USE_QUIC_OPENSSL_COMPAT : enable use of QUIC with the standard openssl API (limited features)
|
||||||
# USE_ENGINE : enable use of OpenSSL Engine.
|
# USE_ENGINE : enable use of OpenSSL Engine.
|
||||||
@ -57,14 +56,14 @@
|
|||||||
# USE_DEVICEATLAS : enable DeviceAtlas api.
|
# USE_DEVICEATLAS : enable DeviceAtlas api.
|
||||||
# USE_51DEGREES : enable third party device detection library from 51Degrees
|
# USE_51DEGREES : enable third party device detection library from 51Degrees
|
||||||
# USE_WURFL : enable WURFL detection library from Scientiamobile
|
# USE_WURFL : enable WURFL detection library from Scientiamobile
|
||||||
|
# USE_SYSTEMD : enable sd_notify() support.
|
||||||
# USE_OBSOLETE_LINKER : use when the linker fails to emit __start_init/__stop_init
|
# USE_OBSOLETE_LINKER : use when the linker fails to emit __start_init/__stop_init
|
||||||
# USE_THREAD_DUMP : use the more advanced thread state dump system. Automatic.
|
# USE_THREAD_DUMP : use the more advanced thread state dump system. Automatic.
|
||||||
# USE_OT : enable the OpenTracing filter
|
# USE_OT : enable the OpenTracing filter
|
||||||
# USE_MEMORY_PROFILING : enable the memory profiler. Linux-glibc only.
|
# USE_MEMORY_PROFILING : enable the memory profiler. Linux-glibc only.
|
||||||
# USE_LIBATOMIC : force to link with/without libatomic. Automatic.
|
# USE_LIBATOMIC : force to link with/without libatomic. Automatic.
|
||||||
# USE_PTHREAD_EMULATION : replace pthread's rwlocks with ours
|
# USE_PTHREAD_EMULATION : replace pthread's rwlocks with ours
|
||||||
# USE_SHM_OPEN : use shm_open() for features that can make use of shared memory
|
# USE_SHM_OPEN : use shm_open() for the startup-logs
|
||||||
# USE_KTLS : use kTLS.(requires at least Linux 4.17).
|
|
||||||
#
|
#
|
||||||
# Options can be forced by specifying "USE_xxx=1" or can be disabled by using
|
# Options can be forced by specifying "USE_xxx=1" or can be disabled by using
|
||||||
# "USE_xxx=" (empty string). The list of enabled and disabled options for a
|
# "USE_xxx=" (empty string). The list of enabled and disabled options for a
|
||||||
@ -136,12 +135,7 @@
|
|||||||
# VTEST_PROGRAM : location of the vtest program to run reg-tests.
|
# VTEST_PROGRAM : location of the vtest program to run reg-tests.
|
||||||
# DEBUG_USE_ABORT: use abort() for program termination, see include/haproxy/bug.h for details
|
# DEBUG_USE_ABORT: use abort() for program termination, see include/haproxy/bug.h for details
|
||||||
|
|
||||||
#### Add -Werror when set to non-empty, and make Makefile stop on warnings.
|
|
||||||
#### It must be declared before includes because it's used there.
|
|
||||||
ERR =
|
|
||||||
|
|
||||||
include include/make/verbose.mk
|
include include/make/verbose.mk
|
||||||
include include/make/errors.mk
|
|
||||||
include include/make/compiler.mk
|
include include/make/compiler.mk
|
||||||
include include/make/options.mk
|
include include/make/options.mk
|
||||||
|
|
||||||
@ -165,7 +159,7 @@ TARGET =
|
|||||||
CPU =
|
CPU =
|
||||||
ifneq ($(CPU),)
|
ifneq ($(CPU),)
|
||||||
ifneq ($(CPU),generic)
|
ifneq ($(CPU),generic)
|
||||||
$(call $(complain),the "CPU" variable was forced to "$(CPU)" but is no longer \
|
$(warning Warning: the "CPU" variable was forced to "$(CPU)" but is no longer \
|
||||||
used and will be ignored. For native builds, modern compilers generally \
|
used and will be ignored. For native builds, modern compilers generally \
|
||||||
prefer that the string "-march=native" is passed in CPU_CFLAGS or CFLAGS. \
|
prefer that the string "-march=native" is passed in CPU_CFLAGS or CFLAGS. \
|
||||||
For other CPU-specific options, please read suggestions in the INSTALL file.)
|
For other CPU-specific options, please read suggestions in the INSTALL file.)
|
||||||
@ -175,7 +169,7 @@ endif
|
|||||||
#### No longer used
|
#### No longer used
|
||||||
ARCH =
|
ARCH =
|
||||||
ifneq ($(ARCH),)
|
ifneq ($(ARCH),)
|
||||||
$(call $(complain),the "ARCH" variable was forced to "$(ARCH)" but is no \
|
$(warning Warning: the "ARCH" variable was forced to "$(ARCH)" but is no \
|
||||||
longer used and will be ignored. Please check the INSTALL file for other \
|
longer used and will be ignored. Please check the INSTALL file for other \
|
||||||
options, but usually in order to pass arch-specific options, ARCH_FLAGS, \
|
options, but usually in order to pass arch-specific options, ARCH_FLAGS, \
|
||||||
CFLAGS or LDFLAGS are preferred.)
|
CFLAGS or LDFLAGS are preferred.)
|
||||||
@ -193,7 +187,7 @@ OPT_CFLAGS = -O2
|
|||||||
#### No longer used
|
#### No longer used
|
||||||
DEBUG_CFLAGS =
|
DEBUG_CFLAGS =
|
||||||
ifneq ($(DEBUG_CFLAGS),)
|
ifneq ($(DEBUG_CFLAGS),)
|
||||||
$(call $(complain),DEBUG_CFLAGS was forced to "$(DEBUG_CFLAGS)" but is no \
|
$(warning Warning: DEBUG_CFLAGS was forced to "$(DEBUG_CFLAGS)" but is no \
|
||||||
longer used and will be ignored. If you have ported this build setting from \
|
longer used and will be ignored. If you have ported this build setting from \
|
||||||
and older version, it is likely that you just want to pass these options \
|
and older version, it is likely that you just want to pass these options \
|
||||||
to the CFLAGS variable. If you are passing some debugging-related options \
|
to the CFLAGS variable. If you are passing some debugging-related options \
|
||||||
@ -201,10 +195,12 @@ $(call $(complain),DEBUG_CFLAGS was forced to "$(DEBUG_CFLAGS)" but is no \
|
|||||||
both the compilation and linking stages.)
|
both the compilation and linking stages.)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
#### Add -Werror when set to non-empty
|
||||||
|
ERR =
|
||||||
|
|
||||||
#### May be used to force running a specific set of reg-tests
|
#### May be used to force running a specific set of reg-tests
|
||||||
REG_TEST_FILES =
|
REG_TEST_FILES =
|
||||||
REG_TEST_SCRIPT=./scripts/run-regtests.sh
|
REG_TEST_SCRIPT=./scripts/run-regtests.sh
|
||||||
UNIT_TEST_SCRIPT=./scripts/run-unittests.sh
|
|
||||||
|
|
||||||
#### Standard C definition
|
#### Standard C definition
|
||||||
# Compiler-specific flags that may be used to set the standard behavior we
|
# Compiler-specific flags that may be used to set the standard behavior we
|
||||||
@ -214,8 +210,7 @@ UNIT_TEST_SCRIPT=./scripts/run-unittests.sh
|
|||||||
# undefined behavior to silently produce invalid code. For this reason we have
|
# undefined behavior to silently produce invalid code. For this reason we have
|
||||||
# to use -fwrapv or -fno-strict-overflow to guarantee the intended behavior.
|
# to use -fwrapv or -fno-strict-overflow to guarantee the intended behavior.
|
||||||
# It is preferable not to change this option in order to avoid breakage.
|
# It is preferable not to change this option in order to avoid breakage.
|
||||||
STD_CFLAGS := $(call cc-opt-alt,-fwrapv,-fno-strict-overflow) \
|
STD_CFLAGS := $(call cc-opt-alt,-fwrapv,-fno-strict-overflow)
|
||||||
$(call cc-opt,-fvect-cost-model=very-cheap)
|
|
||||||
|
|
||||||
#### Compiler-specific flags to enable certain classes of warnings.
|
#### Compiler-specific flags to enable certain classes of warnings.
|
||||||
# Some are hard-coded, others are enabled only if supported.
|
# Some are hard-coded, others are enabled only if supported.
|
||||||
@ -252,7 +247,7 @@ endif
|
|||||||
#### No longer used
|
#### No longer used
|
||||||
SMALL_OPTS =
|
SMALL_OPTS =
|
||||||
ifneq ($(SMALL_OPTS),)
|
ifneq ($(SMALL_OPTS),)
|
||||||
$(call $(complain),SMALL_OPTS was forced to "$(SMALL_OPTS)" but is no longer \
|
$(warning Warning: SMALL_OPTS was forced to "$(SMALL_OPTS)" but is no longer \
|
||||||
used and will be ignored. Please check if this setting are still relevant, \
|
used and will be ignored. Please check if this setting are still relevant, \
|
||||||
and move it either to DEFINE or to CFLAGS instead.)
|
and move it either to DEFINE or to CFLAGS instead.)
|
||||||
endif
|
endif
|
||||||
@ -265,9 +260,8 @@ endif
|
|||||||
# without appearing here. Currently defined DEBUG macros include DEBUG_FULL,
|
# without appearing here. Currently defined DEBUG macros include DEBUG_FULL,
|
||||||
# DEBUG_MEM_STATS, DEBUG_DONT_SHARE_POOLS, DEBUG_FD, DEBUG_POOL_INTEGRITY,
|
# DEBUG_MEM_STATS, DEBUG_DONT_SHARE_POOLS, DEBUG_FD, DEBUG_POOL_INTEGRITY,
|
||||||
# DEBUG_NO_POOLS, DEBUG_FAIL_ALLOC, DEBUG_STRICT_ACTION=[0-3], DEBUG_HPACK,
|
# DEBUG_NO_POOLS, DEBUG_FAIL_ALLOC, DEBUG_STRICT_ACTION=[0-3], DEBUG_HPACK,
|
||||||
# DEBUG_AUTH, DEBUG_SPOE, DEBUG_UAF, DEBUG_THREAD=0-2, DEBUG_STRICT, DEBUG_DEV,
|
# DEBUG_AUTH, DEBUG_SPOE, DEBUG_UAF, DEBUG_THREAD, DEBUG_STRICT, DEBUG_DEV,
|
||||||
# DEBUG_TASK, DEBUG_MEMORY_POOLS, DEBUG_POOL_TRACING, DEBUG_QPACK, DEBUG_LIST,
|
# DEBUG_TASK, DEBUG_MEMORY_POOLS, DEBUG_POOL_TRACING, DEBUG_QPACK, DEBUG_LIST.
|
||||||
# DEBUG_COUNTERS=[0-2], DEBUG_STRESS, DEBUG_UNIT.
|
|
||||||
DEBUG =
|
DEBUG =
|
||||||
|
|
||||||
#### Trace options
|
#### Trace options
|
||||||
@ -342,16 +336,14 @@ use_opts = USE_EPOLL USE_KQUEUE USE_NETFILTER USE_POLL \
|
|||||||
USE_TPROXY USE_LINUX_TPROXY USE_LINUX_CAP \
|
USE_TPROXY USE_LINUX_TPROXY USE_LINUX_CAP \
|
||||||
USE_LINUX_SPLICE USE_LIBCRYPT USE_CRYPT_H USE_ENGINE \
|
USE_LINUX_SPLICE USE_LIBCRYPT USE_CRYPT_H USE_ENGINE \
|
||||||
USE_GETADDRINFO USE_OPENSSL USE_OPENSSL_WOLFSSL USE_OPENSSL_AWSLC \
|
USE_GETADDRINFO USE_OPENSSL USE_OPENSSL_WOLFSSL USE_OPENSSL_AWSLC \
|
||||||
USE_ECH \
|
|
||||||
USE_SSL USE_LUA USE_ACCEPT4 USE_CLOSEFROM USE_ZLIB USE_SLZ \
|
USE_SSL USE_LUA USE_ACCEPT4 USE_CLOSEFROM USE_ZLIB USE_SLZ \
|
||||||
USE_CPU_AFFINITY USE_TFO USE_NS USE_DL USE_RT USE_LIBATOMIC \
|
USE_CPU_AFFINITY USE_TFO USE_NS USE_DL USE_RT USE_LIBATOMIC \
|
||||||
USE_MATH USE_DEVICEATLAS USE_51DEGREES \
|
USE_MATH USE_DEVICEATLAS USE_51DEGREES \
|
||||||
USE_WURFL USE_OBSOLETE_LINKER USE_PRCTL USE_PROCCTL \
|
USE_WURFL USE_SYSTEMD USE_OBSOLETE_LINKER USE_PRCTL USE_PROCCTL \
|
||||||
USE_THREAD_DUMP USE_EVPORTS USE_OT USE_QUIC USE_PROMEX \
|
USE_THREAD_DUMP USE_EVPORTS USE_OT USE_QUIC USE_PROMEX \
|
||||||
USE_MEMORY_PROFILING USE_SHM_OPEN \
|
USE_MEMORY_PROFILING USE_SHM_OPEN \
|
||||||
USE_STATIC_PCRE USE_STATIC_PCRE2 \
|
USE_STATIC_PCRE USE_STATIC_PCRE2 \
|
||||||
USE_PCRE USE_PCRE_JIT USE_PCRE2 USE_PCRE2_JIT \
|
USE_PCRE USE_PCRE_JIT USE_PCRE2 USE_PCRE2_JIT USE_QUIC_OPENSSL_COMPAT
|
||||||
USE_QUIC_OPENSSL_COMPAT USE_KTLS
|
|
||||||
|
|
||||||
# preset all variables for all supported build options among use_opts
|
# preset all variables for all supported build options among use_opts
|
||||||
$(reset_opts_vars)
|
$(reset_opts_vars)
|
||||||
@ -382,13 +374,13 @@ ifeq ($(TARGET),haiku)
|
|||||||
set_target_defaults = $(call default_opts,USE_POLL USE_TPROXY USE_OBSOLETE_LINKER)
|
set_target_defaults = $(call default_opts,USE_POLL USE_TPROXY USE_OBSOLETE_LINKER)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# For linux >= 4.17 and glibc
|
# For linux >= 2.6.28 and glibc
|
||||||
ifeq ($(TARGET),linux-glibc)
|
ifeq ($(TARGET),linux-glibc)
|
||||||
set_target_defaults = $(call default_opts, \
|
set_target_defaults = $(call default_opts, \
|
||||||
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
|
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
|
||||||
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
|
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
|
||||||
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
|
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
|
||||||
USE_GETADDRINFO USE_BACKTRACE USE_SHM_OPEN USE_KTLS)
|
USE_GETADDRINFO USE_BACKTRACE USE_SHM_OPEN USE_SYSTEMD)
|
||||||
INSTALL = install -v
|
INSTALL = install -v
|
||||||
endif
|
endif
|
||||||
|
|
||||||
@ -401,13 +393,13 @@ ifeq ($(TARGET),linux-glibc-legacy)
|
|||||||
INSTALL = install -v
|
INSTALL = install -v
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# For linux >= 4.17 and musl
|
# For linux >= 2.6.28 and musl
|
||||||
ifeq ($(TARGET),linux-musl)
|
ifeq ($(TARGET),linux-musl)
|
||||||
set_target_defaults = $(call default_opts, \
|
set_target_defaults = $(call default_opts, \
|
||||||
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
|
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
|
||||||
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
|
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
|
||||||
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
|
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
|
||||||
USE_GETADDRINFO USE_BACKTRACE USE_SHM_OPEN USE_KTLS)
|
USE_GETADDRINFO USE_SHM_OPEN)
|
||||||
INSTALL = install -v
|
INSTALL = install -v
|
||||||
endif
|
endif
|
||||||
|
|
||||||
@ -424,7 +416,7 @@ endif
|
|||||||
ifeq ($(TARGET),freebsd)
|
ifeq ($(TARGET),freebsd)
|
||||||
set_target_defaults = $(call default_opts, \
|
set_target_defaults = $(call default_opts, \
|
||||||
USE_POLL USE_TPROXY USE_LIBCRYPT USE_THREAD USE_CPU_AFFINITY USE_KQUEUE \
|
USE_POLL USE_TPROXY USE_LIBCRYPT USE_THREAD USE_CPU_AFFINITY USE_KQUEUE \
|
||||||
USE_ACCEPT4 USE_CLOSEFROM USE_GETADDRINFO USE_PROCCTL)
|
USE_ACCEPT4 USE_CLOSEFROM USE_GETADDRINFO USE_PROCCTL USE_SHM_OPEN)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# kFreeBSD glibc
|
# kFreeBSD glibc
|
||||||
@ -598,16 +590,10 @@ endif
|
|||||||
|
|
||||||
ifneq ($(USE_BACKTRACE:0=),)
|
ifneq ($(USE_BACKTRACE:0=),)
|
||||||
BACKTRACE_LDFLAGS = -Wl,$(if $(EXPORT_SYMBOL),$(EXPORT_SYMBOL),--export-dynamic)
|
BACKTRACE_LDFLAGS = -Wl,$(if $(EXPORT_SYMBOL),$(EXPORT_SYMBOL),--export-dynamic)
|
||||||
BACKTRACE_CFLAGS = -fno-omit-frame-pointer
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifneq ($(USE_MEMORY_PROFILING:0=),)
|
|
||||||
MEMORY_PROFILING_CFLAGS = -fno-optimize-sibling-calls
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifneq ($(USE_CPU_AFFINITY:0=),)
|
ifneq ($(USE_CPU_AFFINITY:0=),)
|
||||||
OPTIONS_OBJS += src/cpuset.o
|
OPTIONS_OBJS += src/cpuset.o
|
||||||
OPTIONS_OBJS += src/cpu_topo.o
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# OpenSSL is packaged in various forms and with various dependencies.
|
# OpenSSL is packaged in various forms and with various dependencies.
|
||||||
@ -640,10 +626,7 @@ ifneq ($(USE_OPENSSL:0=),)
|
|||||||
SSL_LDFLAGS := $(if $(SSL_LIB),-L$(SSL_LIB)) -lssl -lcrypto
|
SSL_LDFLAGS := $(if $(SSL_LIB),-L$(SSL_LIB)) -lssl -lcrypto
|
||||||
endif
|
endif
|
||||||
USE_SSL := $(if $(USE_SSL:0=),$(USE_SSL:0=),implicit)
|
USE_SSL := $(if $(USE_SSL:0=),$(USE_SSL:0=),implicit)
|
||||||
OPTIONS_OBJS += src/ssl_sock.o src/ssl_ckch.o src/ssl_ocsp.o src/ssl_crtlist.o \
|
OPTIONS_OBJS += src/ssl_sock.o src/ssl_ckch.o src/ssl_ocsp.o src/ssl_crtlist.o src/ssl_sample.o src/cfgparse-ssl.o src/ssl_gencert.o src/ssl_utils.o src/jwt.o src/ssl_clienthello.o
|
||||||
src/ssl_sample.o src/cfgparse-ssl.o src/ssl_gencert.o \
|
|
||||||
src/ssl_utils.o src/jwt.o src/ssl_clienthello.o src/jws.o src/acme.o \
|
|
||||||
src/acme_resolvers.o src/ssl_trace.o src/jwe.o
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifneq ($(USE_ENGINE:0=),)
|
ifneq ($(USE_ENGINE:0=),)
|
||||||
@ -656,21 +639,20 @@ endif
|
|||||||
|
|
||||||
ifneq ($(USE_QUIC:0=),)
|
ifneq ($(USE_QUIC:0=),)
|
||||||
|
|
||||||
OPTIONS_OBJS += src/mux_quic.o src/h3.o src/quic_rx.o src/quic_tx.o \
|
|
||||||
|
OPTIONS_OBJS += src/quic_rx.o src/mux_quic.o src/h3.o src/quic_tx.o \
|
||||||
src/quic_conn.o src/quic_frame.o src/quic_sock.o \
|
src/quic_conn.o src/quic_frame.o src/quic_sock.o \
|
||||||
src/quic_tls.o src/quic_ssl.o src/proto_quic.o \
|
src/quic_ssl.o src/quic_tls.o src/proto_quic.o \
|
||||||
src/quic_cli.o src/quic_trace.o src/quic_tp.o \
|
src/quic_trace.o src/quic_cli.o src/quic_tp.o \
|
||||||
src/quic_cid.o src/quic_stream.o \
|
src/quic_cid.o src/quic_retransmit.o src/quic_retry.o \
|
||||||
src/quic_retransmit.o src/quic_loss.o \
|
src/quic_loss.o src/quic_cc_cubic.o src/quic_stream.o \
|
||||||
src/hq_interop.o src/quic_cc_cubic.o \
|
src/xprt_quic.o src/quic_ack.o src/hq_interop.o \
|
||||||
src/quic_cc_bbr.o src/quic_retry.o \
|
src/quic_cc_newreno.o src/qmux_http.o \
|
||||||
src/cfgparse-quic.o src/xprt_quic.o src/quic_token.o \
|
src/quic_cc_nocc.o src/qpack-dec.o src/quic_cc.o \
|
||||||
src/quic_ack.o src/qpack-dec.o src/quic_cc_newreno.o \
|
src/cfgparse-quic.o src/qmux_trace.o src/qpack-enc.o \
|
||||||
src/qmux_http.o src/qmux_trace.o src/quic_rules.o \
|
src/qpack-tbl.o src/h3_stats.o src/quic_stats.o \
|
||||||
src/quic_cc_nocc.o src/quic_cc.o src/quic_pacing.o \
|
src/quic_fctl.o src/cbuf.o src/quic_rules.o \
|
||||||
src/h3_stats.o src/quic_stats.o src/qpack-enc.o \
|
src/quic_token.o
|
||||||
src/qpack-tbl.o src/quic_cc_drs.o src/quic_fctl.o \
|
|
||||||
src/quic_enc.o
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifneq ($(USE_QUIC_OPENSSL_COMPAT:0=),)
|
ifneq ($(USE_QUIC_OPENSSL_COMPAT:0=),)
|
||||||
@ -782,6 +764,10 @@ ifneq ($(USE_WURFL:0=),)
|
|||||||
WURFL_LDFLAGS = $(if $(WURFL_LIB),-L$(WURFL_LIB)) -lwurfl
|
WURFL_LDFLAGS = $(if $(WURFL_LIB),-L$(WURFL_LIB)) -lwurfl
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifneq ($(USE_SYSTEMD:0=),)
|
||||||
|
OPTIONS_OBJS += src/systemd.o
|
||||||
|
endif
|
||||||
|
|
||||||
ifneq ($(USE_PCRE:0=)$(USE_STATIC_PCRE:0=)$(USE_PCRE_JIT:0=),)
|
ifneq ($(USE_PCRE:0=)$(USE_STATIC_PCRE:0=)$(USE_PCRE_JIT:0=),)
|
||||||
ifneq ($(USE_PCRE2:0=)$(USE_STATIC_PCRE2:0=)$(USE_PCRE2_JIT:0=),)
|
ifneq ($(USE_PCRE2:0=)$(USE_STATIC_PCRE2:0=)$(USE_PCRE2_JIT:0=),)
|
||||||
$(error cannot compile both PCRE and PCRE2 support)
|
$(error cannot compile both PCRE and PCRE2 support)
|
||||||
@ -951,67 +937,62 @@ all:
|
|||||||
@echo
|
@echo
|
||||||
@exit 1
|
@exit 1
|
||||||
else
|
else
|
||||||
all: dev/flags/flags haproxy $(EXTRA)
|
all: haproxy dev/flags/flags $(EXTRA)
|
||||||
endif # obsolete targets
|
endif # obsolete targets
|
||||||
endif # TARGET
|
endif # TARGET
|
||||||
|
|
||||||
OBJS =
|
OBJS =
|
||||||
HATERM_OBJS =
|
|
||||||
|
|
||||||
ifneq ($(EXTRA_OBJS),)
|
ifneq ($(EXTRA_OBJS),)
|
||||||
OBJS += $(EXTRA_OBJS)
|
OBJS += $(EXTRA_OBJS)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \
|
OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/stream.o \
|
||||||
src/server.o src/stream.o src/tcpcheck.o src/http_ana.o \
|
src/log.o src/server.o src/tcpcheck.o src/http_ana.o \
|
||||||
src/stick_table.o src/tools.o src/mux_spop.o src/sample.o \
|
src/stick_table.o src/tools.o src/sample.o src/flt_spoe.o \
|
||||||
src/activity.o src/cfgparse.o src/peers.o src/cli.o \
|
src/cfgparse.o src/peers.o src/cli.o src/resolvers.o \
|
||||||
src/backend.o src/connection.o src/resolvers.o src/proxy.o \
|
src/connection.o src/backend.o src/cache.o src/http_htx.o \
|
||||||
src/cache.o src/stconn.o src/http_htx.o src/debug.o \
|
src/proxy.o src/stconn.o src/check.o src/haproxy.o \
|
||||||
src/check.o src/stats-html.o src/haproxy.o src/listener.o \
|
src/stats-html.o src/listener.o src/pattern.o src/debug.o \
|
||||||
src/applet.o src/pattern.o src/cfgparse-listen.o \
|
src/cfgparse-listen.o src/http_client.o src/activity.o \
|
||||||
src/flt_spoe.o src/cebis_tree.o src/http_ext.o \
|
src/applet.o src/http_act.o src/http_fetch.o src/http_ext.o \
|
||||||
src/http_act.o src/http_fetch.o src/cebs_tree.o \
|
src/dns.o src/vars.o src/tcp_rules.o src/pool.o src/stats.o \
|
||||||
src/cebib_tree.o src/http_client.o src/dns.o \
|
src/stats-proxy.o src/sink.o src/filters.o src/mux_pt.o \
|
||||||
src/cebb_tree.o src/vars.o src/event_hdl.o src/tcp_rules.o \
|
src/event_hdl.o src/server_state.o src/h1_htx.o src/h1.o \
|
||||||
src/trace.o src/stats-proxy.o src/pool.o src/stats.o \
|
src/flt_http_comp.o src/task.o src/payload.o src/fcgi-app.o \
|
||||||
src/cfgparse-global.o src/filters.o src/mux_pt.o \
|
src/map.o src/trace.o src/tcp_sample.o src/tcp_act.o \
|
||||||
src/flt_http_comp.o src/sock.o src/h1.o src/sink.o \
|
src/session.o src/htx.o src/cfgparse-global.o src/mjson.o \
|
||||||
src/ceba_tree.o src/session.o src/payload.o src/htx.o \
|
src/h2.o src/ring.o src/fd.o src/sock.o src/mworker.o \
|
||||||
src/cebl_tree.o src/ceb32_tree.o src/ceb64_tree.o \
|
src/flt_trace.o src/thread.o src/proto_rhttp.o src/acl.o \
|
||||||
src/server_state.o src/proto_rhttp.o src/flt_trace.o src/fd.o \
|
src/http.o src/flt_bwlim.o src/channel.o src/queue.o \
|
||||||
src/task.o src/map.o src/fcgi-app.o src/h2.o src/mworker.o \
|
src/mqtt.o src/proto_tcp.o src/lb_chash.o src/http_rules.o \
|
||||||
src/tcp_sample.o src/mjson.o src/h1_htx.o src/tcp_act.o \
|
src/errors.o src/extcheck.o src/dns_ring.o src/stats-json.o \
|
||||||
src/ring.o src/flt_bwlim.o src/acl.o src/thread.o src/queue.o \
|
src/http_conv.o src/frontend.o src/proto_sockpair.o \
|
||||||
src/http_rules.o src/http.o src/channel.o src/proto_tcp.o \
|
src/compression.o src/ncbuf.o src/stats-file.o src/raw_sock.o \
|
||||||
src/mqtt.o src/lb_chash.o src/extcheck.o src/dns_ring.o \
|
src/lb_fwrr.o src/action.o src/uri_normalizer.o src/buf.o \
|
||||||
src/errors.o src/ncbuf.o src/compression.o src/http_conv.o \
|
src/proto_uxst.o src/ebmbtree.o src/xprt_handshake.o \
|
||||||
src/frontend.o src/stats-json.o src/proto_sockpair.o \
|
src/protocol.o src/proto_udp.o src/lb_fwlc.o src/sha1.o \
|
||||||
src/raw_sock.o src/action.o src/stats-file.o src/buf.o \
|
src/proto_uxdg.o src/mailers.o src/lb_fas.o src/cfgcond.o \
|
||||||
src/xprt_handshake.o src/proto_uxst.o src/lb_fwrr.o \
|
src/cfgdiag.o src/sock_unix.o src/sock_inet.o \
|
||||||
src/uri_normalizer.o src/mailers.o src/protocol.o \
|
src/mworker-prog.o src/lb_map.o src/ev_select.o src/shctx.o \
|
||||||
src/cfgcond.o src/proto_udp.o src/lb_fwlc.o src/ebmbtree.o \
|
src/hpack-dec.o src/fix.o src/clock.o src/cfgparse-tcp.o \
|
||||||
src/proto_uxdg.o src/cfgdiag.o src/sock_unix.o src/sha1.o \
|
src/arg.o src/signal.o src/fcgi.o src/dynbuf.o src/regex.o \
|
||||||
src/lb_fas.o src/clock.o src/sock_inet.o src/ev_select.o \
|
src/lru.o src/lb_ss.o src/eb64tree.o src/chunk.o \
|
||||||
src/lb_map.o src/shctx.o src/hpack-dec.o src/net_helper.o \
|
src/cfgparse-unix.o src/guid.o src/ebimtree.o src/eb32tree.o \
|
||||||
src/arg.o src/signal.o src/fix.o src/dynbuf.o src/guid.o \
|
src/eb32sctree.o src/base64.o src/uri_auth.o src/time.o \
|
||||||
src/cfgparse-tcp.o src/lb_ss.o src/chunk.o src/counters.o \
|
src/hpack-tbl.o src/ebsttree.o src/ebistree.o src/auth.o \
|
||||||
src/cfgparse-unix.o src/regex.o src/fcgi.o src/uri_auth.o \
|
src/hpack-huff.o src/freq_ctr.o src/dict.o src/wdt.o \
|
||||||
src/eb64tree.o src/eb32tree.o src/eb32sctree.o src/lru.o \
|
src/pipe.o src/init.o src/http_acl.o src/hpack-enc.o \
|
||||||
src/limits.o src/ebimtree.o src/wdt.o src/hpack-tbl.o \
|
src/cebu32_tree.o src/cebu64_tree.o src/cebua_tree.o \
|
||||||
src/ebistree.o src/base64.o src/auth.o src/time.o \
|
src/cebub_tree.o src/cebuib_tree.o src/cebuis_tree.o \
|
||||||
src/ebsttree.o src/freq_ctr.o src/systemd.o src/init.o \
|
src/cebul_tree.o src/cebus_tree.o \
|
||||||
src/http_acl.o src/dict.o src/dgram.o src/pipe.o \
|
src/ebtree.o src/dgram.o src/hash.o src/version.o \
|
||||||
src/hpack-huff.o src/hpack-enc.o src/ebtree.o src/hash.o \
|
src/limits.o src/mux_spop.o
|
||||||
src/httpclient_cli.o src/version.o src/ncbmbuf.o src/ech.o \
|
|
||||||
src/cfgparse-peers.o src/haterm.o
|
|
||||||
|
|
||||||
ifneq ($(TRACE),)
|
ifneq ($(TRACE),)
|
||||||
OBJS += src/calltrace.o
|
OBJS += src/calltrace.o
|
||||||
endif
|
endif
|
||||||
|
|
||||||
HATERM_OBJS += $(OBJS) src/haterm_init.o
|
|
||||||
|
|
||||||
# Used only for forced dependency checking. May be cleared during development.
|
# Used only for forced dependency checking. May be cleared during development.
|
||||||
INCLUDES = $(wildcard include/*/*.h)
|
INCLUDES = $(wildcard include/*/*.h)
|
||||||
DEP = $(INCLUDES) .build_opts
|
DEP = $(INCLUDES) .build_opts
|
||||||
@ -1041,9 +1022,8 @@ help:
|
|||||||
# TARGET variable is not set since we're not building, by definition.
|
# TARGET variable is not set since we're not building, by definition.
|
||||||
IGNORE_OPTS=help install install-man install-doc install-bin \
|
IGNORE_OPTS=help install install-man install-doc install-bin \
|
||||||
uninstall clean tags cscope tar git-tar version update-version \
|
uninstall clean tags cscope tar git-tar version update-version \
|
||||||
opts reg-tests reg-tests-help unit-tests admin/halog/halog dev/flags/flags \
|
opts reg-tests reg-tests-help admin/halog/halog dev/flags/flags \
|
||||||
dev/haring/haring dev/ncpu/ncpu dev/poll/poll dev/tcploop/tcploop \
|
dev/haring/haring dev/poll/poll dev/tcploop/tcploop
|
||||||
dev/term_events/term_events dev/gdb/pm-from-core dev/gdb/libs-from-core
|
|
||||||
|
|
||||||
ifneq ($(TARGET),)
|
ifneq ($(TARGET),)
|
||||||
ifeq ($(filter $(firstword $(MAKECMDGOALS)),$(IGNORE_OPTS)),)
|
ifeq ($(filter $(firstword $(MAKECMDGOALS)),$(IGNORE_OPTS)),)
|
||||||
@ -1059,9 +1039,6 @@ endif # non-empty target
|
|||||||
haproxy: $(OPTIONS_OBJS) $(OBJS)
|
haproxy: $(OPTIONS_OBJS) $(OBJS)
|
||||||
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
||||||
|
|
||||||
haterm: $(OPTIONS_OBJS) $(HATERM_OBJS)
|
|
||||||
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
|
||||||
|
|
||||||
objsize: haproxy
|
objsize: haproxy
|
||||||
$(Q)objdump -t $^|grep ' g '|grep -F '.text'|awk '{print $$5 FS $$6}'|sort
|
$(Q)objdump -t $^|grep ' g '|grep -F '.text'|awk '{print $$5 FS $$6}'|sort
|
||||||
|
|
||||||
@ -1077,21 +1054,12 @@ admin/dyncookie/dyncookie: admin/dyncookie/dyncookie.o
|
|||||||
dev/flags/flags: dev/flags/flags.o
|
dev/flags/flags: dev/flags/flags.o
|
||||||
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
||||||
|
|
||||||
dev/gdb/libs-from-core: dev/gdb/libs-from-core.o
|
|
||||||
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
|
||||||
|
|
||||||
dev/gdb/pm-from-core: dev/gdb/pm-from-core.o
|
|
||||||
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
|
||||||
|
|
||||||
dev/haring/haring: dev/haring/haring.o
|
dev/haring/haring: dev/haring/haring.o
|
||||||
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
||||||
|
|
||||||
dev/hpack/%: dev/hpack/%.o
|
dev/hpack/%: dev/hpack/%.o
|
||||||
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
||||||
|
|
||||||
dev/ncpu/ncpu:
|
|
||||||
$(cmd_MAKE) -C dev/ncpu ncpu V='$(V)'
|
|
||||||
|
|
||||||
dev/poll/poll:
|
dev/poll/poll:
|
||||||
$(cmd_MAKE) -C dev/poll poll CC='$(CC)' OPTIMIZE='$(COPTS)' V='$(V)'
|
$(cmd_MAKE) -C dev/poll poll CC='$(CC)' OPTIMIZE='$(COPTS)' V='$(V)'
|
||||||
|
|
||||||
@ -1104,16 +1072,13 @@ dev/tcploop/tcploop:
|
|||||||
dev/udp/udp-perturb: dev/udp/udp-perturb.o
|
dev/udp/udp-perturb: dev/udp/udp-perturb.o
|
||||||
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
||||||
|
|
||||||
dev/term_events/term_events: dev/term_events/term_events.o
|
|
||||||
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
|
||||||
|
|
||||||
# rebuild it every time
|
# rebuild it every time
|
||||||
.PHONY: src/version.c dev/ncpu/ncpu dev/poll/poll dev/tcploop/tcploop
|
.PHONY: src/version.c dev/poll/poll dev/tcploop/tcploop
|
||||||
|
|
||||||
src/calltrace.o: src/calltrace.c $(DEP)
|
src/calltrace.o: src/calltrace.c $(DEP)
|
||||||
$(cmd_CC) $(TRACE_COPTS) -c -o $@ $<
|
$(cmd_CC) $(TRACE_COPTS) -c -o $@ $<
|
||||||
|
|
||||||
src/version.o: src/version.c $(DEP)
|
src/haproxy.o: src/haproxy.c $(DEP)
|
||||||
$(cmd_CC) $(COPTS) \
|
$(cmd_CC) $(COPTS) \
|
||||||
-DBUILD_TARGET='"$(strip $(TARGET))"' \
|
-DBUILD_TARGET='"$(strip $(TARGET))"' \
|
||||||
-DBUILD_CC='"$(strip $(CC))"' \
|
-DBUILD_CC='"$(strip $(CC))"' \
|
||||||
@ -1136,11 +1101,6 @@ install-doc:
|
|||||||
$(INSTALL) -m 644 doc/$$x.txt "$(DESTDIR)$(DOCDIR)" ; \
|
$(INSTALL) -m 644 doc/$$x.txt "$(DESTDIR)$(DOCDIR)" ; \
|
||||||
done
|
done
|
||||||
|
|
||||||
install-admin:
|
|
||||||
$(Q)$(INSTALL) -d "$(DESTDIR)$(SBINDIR)"
|
|
||||||
$(Q)$(INSTALL) admin/cli/haproxy-dump-certs "$(DESTDIR)$(SBINDIR)"
|
|
||||||
$(Q)$(INSTALL) admin/cli/haproxy-reload "$(DESTDIR)$(SBINDIR)"
|
|
||||||
|
|
||||||
install-bin:
|
install-bin:
|
||||||
$(Q)for i in haproxy $(EXTRA); do \
|
$(Q)for i in haproxy $(EXTRA); do \
|
||||||
if ! [ -e "$$i" ]; then \
|
if ! [ -e "$$i" ]; then \
|
||||||
@ -1151,7 +1111,7 @@ install-bin:
|
|||||||
$(Q)$(INSTALL) -d "$(DESTDIR)$(SBINDIR)"
|
$(Q)$(INSTALL) -d "$(DESTDIR)$(SBINDIR)"
|
||||||
$(Q)$(INSTALL) haproxy $(EXTRA) "$(DESTDIR)$(SBINDIR)"
|
$(Q)$(INSTALL) haproxy $(EXTRA) "$(DESTDIR)$(SBINDIR)"
|
||||||
|
|
||||||
install: install-bin install-admin install-man install-doc
|
install: install-bin install-man install-doc
|
||||||
|
|
||||||
uninstall:
|
uninstall:
|
||||||
$(Q)rm -f "$(DESTDIR)$(MANDIR)"/man1/haproxy.1
|
$(Q)rm -f "$(DESTDIR)$(MANDIR)"/man1/haproxy.1
|
||||||
@ -1173,15 +1133,12 @@ clean:
|
|||||||
$(Q)rm -f addons/ot/src/*.[oas]
|
$(Q)rm -f addons/ot/src/*.[oas]
|
||||||
$(Q)rm -f addons/wurfl/*.[oas] addons/wurfl/dummy/*.[oas]
|
$(Q)rm -f addons/wurfl/*.[oas] addons/wurfl/dummy/*.[oas]
|
||||||
$(Q)rm -f admin/*/*.[oas] admin/*/*/*.[oas]
|
$(Q)rm -f admin/*/*.[oas] admin/*/*/*.[oas]
|
||||||
$(Q)rm -f dev/*/*.[oas]
|
|
||||||
$(Q)rm -f dev/flags/flags
|
|
||||||
|
|
||||||
distclean: clean
|
|
||||||
$(Q)rm -f admin/iprange/iprange admin/iprange/ip6range admin/halog/halog
|
$(Q)rm -f admin/iprange/iprange admin/iprange/ip6range admin/halog/halog
|
||||||
$(Q)rm -f admin/dyncookie/dyncookie
|
$(Q)rm -f admin/dyncookie/dyncookie
|
||||||
$(Q)rm -f dev/haring/haring dev/ncpu/ncpu{,.so} dev/poll/poll dev/tcploop/tcploop
|
$(Q)rm -f dev/*/*.[oas]
|
||||||
|
$(Q)rm -f dev/flags/flags dev/haring/haring dev/poll/poll dev/tcploop/tcploop
|
||||||
$(Q)rm -f dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
|
$(Q)rm -f dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
|
||||||
$(Q)rm -f dev/qpack/decode dev/gdb/pm-from-core dev/gdb/libs-from-core
|
$(Q)rm -f dev/qpack/decode
|
||||||
|
|
||||||
tags:
|
tags:
|
||||||
$(Q)find src include \( -name '*.c' -o -name '*.h' \) -print0 | \
|
$(Q)find src include \( -name '*.c' -o -name '*.h' \) -print0 | \
|
||||||
@ -1299,17 +1256,10 @@ reg-tests-help:
|
|||||||
|
|
||||||
.PHONY: reg-tests reg-tests-help
|
.PHONY: reg-tests reg-tests-help
|
||||||
|
|
||||||
unit-tests:
|
|
||||||
$(Q)$(UNIT_TEST_SCRIPT)
|
|
||||||
.PHONY: unit-tests
|
|
||||||
|
|
||||||
|
|
||||||
# "make range" iteratively builds using "make all" and the exact same build
|
# "make range" iteratively builds using "make all" and the exact same build
|
||||||
# options for all commits within RANGE. RANGE may be either a git range
|
# options for all commits within RANGE. RANGE may be either a git range
|
||||||
# such as ref1..ref2 or a single commit, in which case all commits from
|
# such as ref1..ref2 or a single commit, in which case all commits from
|
||||||
# the master branch to this one will be tested.
|
# the master branch to this one will be tested.
|
||||||
# Will execute TEST_CMD for each commit if defined, and will stop in case of
|
|
||||||
# failure.
|
|
||||||
|
|
||||||
range:
|
range:
|
||||||
$(Q)[ -d .git/. ] || { echo "## Fatal: \"make $@\" may only be used inside a Git repository."; exit 1; }
|
$(Q)[ -d .git/. ] || { echo "## Fatal: \"make $@\" may only be used inside a Git repository."; exit 1; }
|
||||||
@ -1335,8 +1285,6 @@ range:
|
|||||||
echo "[ $$index/$$count ] $$commit #############################"; \
|
echo "[ $$index/$$count ] $$commit #############################"; \
|
||||||
git checkout -q $$commit || die 1; \
|
git checkout -q $$commit || die 1; \
|
||||||
$(MAKE) all || die 1; \
|
$(MAKE) all || die 1; \
|
||||||
set -- $(TEST_CMD); \
|
|
||||||
[ "$$#" -eq 0 ] || "$$@" || die 1; \
|
|
||||||
index=$$((index + 1)); \
|
index=$$((index + 1)); \
|
||||||
done; \
|
done; \
|
||||||
echo;echo "Done! $${count} commit(s) built successfully for RANGE $${RANGE}" ; \
|
echo;echo "Done! $${count} commit(s) built successfully for RANGE $${RANGE}" ; \
|
||||||
|
|||||||
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
[](https://github.com/haproxy/haproxy/actions/workflows/musl.yml)
|
[](https://github.com/haproxy/haproxy/actions/workflows/musl.yml)
|
||||||
[](https://github.com/haproxy/haproxy/actions/workflows/aws-lc.yml)
|
[](https://github.com/haproxy/haproxy/actions/workflows/aws-lc.yml)
|
||||||
|
[](https://github.com/haproxy/haproxy/actions/workflows/openssl-nodeprecated.yml)
|
||||||
[](https://github.com/haproxy/haproxy/actions/workflows/illumos.yml)
|
[](https://github.com/haproxy/haproxy/actions/workflows/illumos.yml)
|
||||||
[](https://github.com/haproxy/haproxy/actions/workflows/netbsd.yml)
|
[](https://github.com/haproxy/haproxy/actions/workflows/netbsd.yml)
|
||||||
[](https://cirrus-ci.com/github/haproxy/haproxy/)
|
[](https://cirrus-ci.com/github/haproxy/haproxy/)
|
||||||
|
|||||||
@ -5,8 +5,7 @@ CXX := c++
|
|||||||
CXXLIB := -lstdc++
|
CXXLIB := -lstdc++
|
||||||
|
|
||||||
ifeq ($(DEVICEATLAS_SRC),)
|
ifeq ($(DEVICEATLAS_SRC),)
|
||||||
OPTIONS_CFLAGS += -I$(DEVICEATLAS_INC)
|
OPTIONS_LDFLAGS += -lda
|
||||||
OPTIONS_LDFLAGS += -Wl,-rpath,$(DEVICEATLAS_LIB) -L$(DEVICEATLAS_LIB) -lda
|
|
||||||
else
|
else
|
||||||
DEVICEATLAS_INC = $(DEVICEATLAS_SRC)
|
DEVICEATLAS_INC = $(DEVICEATLAS_SRC)
|
||||||
DEVICEATLAS_LIB = $(DEVICEATLAS_SRC)
|
DEVICEATLAS_LIB = $(DEVICEATLAS_SRC)
|
||||||
|
|||||||
@ -31,7 +31,6 @@ static struct {
|
|||||||
da_atlas_t atlas;
|
da_atlas_t atlas;
|
||||||
da_evidence_id_t useragentid;
|
da_evidence_id_t useragentid;
|
||||||
da_severity_t loglevel;
|
da_severity_t loglevel;
|
||||||
size_t maxhdrlen;
|
|
||||||
char separator;
|
char separator;
|
||||||
unsigned char daset:1;
|
unsigned char daset:1;
|
||||||
} global_deviceatlas = {
|
} global_deviceatlas = {
|
||||||
@ -43,7 +42,6 @@ static struct {
|
|||||||
.atlasmap = NULL,
|
.atlasmap = NULL,
|
||||||
.atlasfd = -1,
|
.atlasfd = -1,
|
||||||
.useragentid = 0,
|
.useragentid = 0,
|
||||||
.maxhdrlen = 0,
|
|
||||||
.daset = 0,
|
.daset = 0,
|
||||||
.separator = '|',
|
.separator = '|',
|
||||||
};
|
};
|
||||||
@ -59,10 +57,6 @@ static int da_json_file(char **args, int section_type, struct proxy *curpx,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
global_deviceatlas.jsonpath = strdup(args[1]);
|
global_deviceatlas.jsonpath = strdup(args[1]);
|
||||||
if (unlikely(global_deviceatlas.jsonpath == NULL)) {
|
|
||||||
memprintf(err, "deviceatlas json file : out of memory.\n");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -79,7 +73,6 @@ static int da_log_level(char **args, int section_type, struct proxy *curpx,
|
|||||||
loglevel = atol(args[1]);
|
loglevel = atol(args[1]);
|
||||||
if (loglevel < 0 || loglevel > 3) {
|
if (loglevel < 0 || loglevel > 3) {
|
||||||
memprintf(err, "deviceatlas log level : expects a log level between 0 and 3, %s given.\n", args[1]);
|
memprintf(err, "deviceatlas log level : expects a log level between 0 and 3, %s given.\n", args[1]);
|
||||||
return -1;
|
|
||||||
} else {
|
} else {
|
||||||
global_deviceatlas.loglevel = (da_severity_t)loglevel;
|
global_deviceatlas.loglevel = (da_severity_t)loglevel;
|
||||||
}
|
}
|
||||||
@ -108,10 +101,6 @@ static int da_properties_cookie(char **args, int section_type, struct proxy *cur
|
|||||||
return -1;
|
return -1;
|
||||||
} else {
|
} else {
|
||||||
global_deviceatlas.cookiename = strdup(args[1]);
|
global_deviceatlas.cookiename = strdup(args[1]);
|
||||||
if (unlikely(global_deviceatlas.cookiename == NULL)) {
|
|
||||||
memprintf(err, "deviceatlas cookie name : out of memory.\n");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
global_deviceatlas.cookienamelen = strlen(global_deviceatlas.cookiename);
|
global_deviceatlas.cookienamelen = strlen(global_deviceatlas.cookiename);
|
||||||
return 0;
|
return 0;
|
||||||
@ -130,7 +119,6 @@ static int da_cache_size(char **args, int section_type, struct proxy *curpx,
|
|||||||
cachesize = atol(args[1]);
|
cachesize = atol(args[1]);
|
||||||
if (cachesize < 0 || cachesize > DA_CACHE_MAX) {
|
if (cachesize < 0 || cachesize > DA_CACHE_MAX) {
|
||||||
memprintf(err, "deviceatlas cache size : expects a cache size between 0 and %d, %s given.\n", DA_CACHE_MAX, args[1]);
|
memprintf(err, "deviceatlas cache size : expects a cache size between 0 and %d, %s given.\n", DA_CACHE_MAX, args[1]);
|
||||||
return -1;
|
|
||||||
} else {
|
} else {
|
||||||
#ifdef APINOCACHE
|
#ifdef APINOCACHE
|
||||||
fprintf(stdout, "deviceatlas cache size : no-op, its support is disabled.\n");
|
fprintf(stdout, "deviceatlas cache size : no-op, its support is disabled.\n");
|
||||||
@ -177,7 +165,7 @@ static int init_deviceatlas(void)
|
|||||||
da_status_t status;
|
da_status_t status;
|
||||||
|
|
||||||
jsonp = fopen(global_deviceatlas.jsonpath, "r");
|
jsonp = fopen(global_deviceatlas.jsonpath, "r");
|
||||||
if (unlikely(jsonp == 0)) {
|
if (jsonp == 0) {
|
||||||
ha_alert("deviceatlas : '%s' json file has invalid path or is not readable.\n",
|
ha_alert("deviceatlas : '%s' json file has invalid path or is not readable.\n",
|
||||||
global_deviceatlas.jsonpath);
|
global_deviceatlas.jsonpath);
|
||||||
err_code |= ERR_ALERT | ERR_FATAL;
|
err_code |= ERR_ALERT | ERR_FATAL;
|
||||||
@ -189,11 +177,9 @@ static int init_deviceatlas(void)
|
|||||||
status = da_atlas_compile(jsonp, da_haproxy_read, da_haproxy_seek,
|
status = da_atlas_compile(jsonp, da_haproxy_read, da_haproxy_seek,
|
||||||
&global_deviceatlas.atlasimgptr, &atlasimglen);
|
&global_deviceatlas.atlasimgptr, &atlasimglen);
|
||||||
fclose(jsonp);
|
fclose(jsonp);
|
||||||
if (unlikely(status != DA_OK)) {
|
if (status != DA_OK) {
|
||||||
ha_alert("deviceatlas : '%s' json file is invalid.\n",
|
ha_alert("deviceatlas : '%s' json file is invalid.\n",
|
||||||
global_deviceatlas.jsonpath);
|
global_deviceatlas.jsonpath);
|
||||||
free(global_deviceatlas.atlasimgptr);
|
|
||||||
da_fini();
|
|
||||||
err_code |= ERR_ALERT | ERR_FATAL;
|
err_code |= ERR_ALERT | ERR_FATAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -201,10 +187,8 @@ static int init_deviceatlas(void)
|
|||||||
status = da_atlas_open(&global_deviceatlas.atlas, extraprops,
|
status = da_atlas_open(&global_deviceatlas.atlas, extraprops,
|
||||||
global_deviceatlas.atlasimgptr, atlasimglen);
|
global_deviceatlas.atlasimgptr, atlasimglen);
|
||||||
|
|
||||||
if (unlikely(status != DA_OK)) {
|
if (status != DA_OK) {
|
||||||
ha_alert("deviceatlas : data could not be compiled.\n");
|
ha_alert("deviceatlas : data could not be compiled.\n");
|
||||||
free(global_deviceatlas.atlasimgptr);
|
|
||||||
da_fini();
|
|
||||||
err_code |= ERR_ALERT | ERR_FATAL;
|
err_code |= ERR_ALERT | ERR_FATAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -213,28 +197,11 @@ static int init_deviceatlas(void)
|
|||||||
|
|
||||||
if (global_deviceatlas.cookiename == 0) {
|
if (global_deviceatlas.cookiename == 0) {
|
||||||
global_deviceatlas.cookiename = strdup(DA_COOKIENAME_DEFAULT);
|
global_deviceatlas.cookiename = strdup(DA_COOKIENAME_DEFAULT);
|
||||||
if (unlikely(global_deviceatlas.cookiename == NULL)) {
|
|
||||||
ha_alert("deviceatlas : out of memory.\n");
|
|
||||||
da_atlas_close(&global_deviceatlas.atlas);
|
|
||||||
free(global_deviceatlas.atlasimgptr);
|
|
||||||
da_fini();
|
|
||||||
err_code |= ERR_ALERT | ERR_FATAL;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
global_deviceatlas.cookienamelen = strlen(global_deviceatlas.cookiename);
|
global_deviceatlas.cookienamelen = strlen(global_deviceatlas.cookiename);
|
||||||
}
|
}
|
||||||
|
|
||||||
global_deviceatlas.useragentid = da_atlas_header_evidence_id(&global_deviceatlas.atlas,
|
global_deviceatlas.useragentid = da_atlas_header_evidence_id(&global_deviceatlas.atlas,
|
||||||
"user-agent");
|
"user-agent");
|
||||||
{
|
|
||||||
size_t hi;
|
|
||||||
global_deviceatlas.maxhdrlen = 16;
|
|
||||||
for (hi = 0; hi < global_deviceatlas.atlas.header_evidence_count; hi++) {
|
|
||||||
size_t nl = strlen(global_deviceatlas.atlas.header_priorities[hi].name);
|
|
||||||
if (nl > global_deviceatlas.maxhdrlen)
|
|
||||||
global_deviceatlas.maxhdrlen = nl;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ((global_deviceatlas.atlasfd = shm_open(ATLASMAPNM, O_RDWR, 0660)) != -1) {
|
if ((global_deviceatlas.atlasfd = shm_open(ATLASMAPNM, O_RDWR, 0660)) != -1) {
|
||||||
global_deviceatlas.atlasmap = mmap(NULL, ATLASTOKSZ, PROT_READ | PROT_WRITE, MAP_SHARED, global_deviceatlas.atlasfd, 0);
|
global_deviceatlas.atlasmap = mmap(NULL, ATLASTOKSZ, PROT_READ | PROT_WRITE, MAP_SHARED, global_deviceatlas.atlasfd, 0);
|
||||||
if (global_deviceatlas.atlasmap == MAP_FAILED) {
|
if (global_deviceatlas.atlasmap == MAP_FAILED) {
|
||||||
@ -264,13 +231,15 @@ static void deinit_deviceatlas(void)
|
|||||||
free(global_deviceatlas.cookiename);
|
free(global_deviceatlas.cookiename);
|
||||||
da_atlas_close(&global_deviceatlas.atlas);
|
da_atlas_close(&global_deviceatlas.atlas);
|
||||||
free(global_deviceatlas.atlasimgptr);
|
free(global_deviceatlas.atlasimgptr);
|
||||||
da_fini();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (global_deviceatlas.atlasfd != -1) {
|
if (global_deviceatlas.atlasfd != -1) {
|
||||||
munmap(global_deviceatlas.atlasmap, ATLASTOKSZ);
|
munmap(global_deviceatlas.atlasmap, ATLASTOKSZ);
|
||||||
close(global_deviceatlas.atlasfd);
|
close(global_deviceatlas.atlasfd);
|
||||||
|
shm_unlink(ATLASMAPNM);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
da_fini();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void da_haproxy_checkinst(void)
|
static void da_haproxy_checkinst(void)
|
||||||
@ -289,10 +258,6 @@ static void da_haproxy_checkinst(void)
|
|||||||
da_property_decl_t extraprops[1] = {{NULL, 0}};
|
da_property_decl_t extraprops[1] = {{NULL, 0}};
|
||||||
#ifdef USE_THREAD
|
#ifdef USE_THREAD
|
||||||
HA_SPIN_LOCK(OTHER_LOCK, &dadwsch_lock);
|
HA_SPIN_LOCK(OTHER_LOCK, &dadwsch_lock);
|
||||||
if (base[0] == 0) {
|
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &dadwsch_lock);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
strlcpy2(atlasp, base + sizeof(char), sizeof(atlasp));
|
strlcpy2(atlasp, base + sizeof(char), sizeof(atlasp));
|
||||||
jsonp = fopen(atlasp, "r");
|
jsonp = fopen(atlasp, "r");
|
||||||
@ -310,20 +275,10 @@ static void da_haproxy_checkinst(void)
|
|||||||
fclose(jsonp);
|
fclose(jsonp);
|
||||||
if (status == DA_OK) {
|
if (status == DA_OK) {
|
||||||
if (da_atlas_open(&inst, extraprops, cnew, atlassz) == DA_OK) {
|
if (da_atlas_open(&inst, extraprops, cnew, atlassz) == DA_OK) {
|
||||||
inst.config.cache_size = global_deviceatlas.cachesize;
|
|
||||||
da_atlas_close(&global_deviceatlas.atlas);
|
da_atlas_close(&global_deviceatlas.atlas);
|
||||||
free(global_deviceatlas.atlasimgptr);
|
free(global_deviceatlas.atlasimgptr);
|
||||||
global_deviceatlas.atlasimgptr = cnew;
|
global_deviceatlas.atlasimgptr = cnew;
|
||||||
global_deviceatlas.atlas = inst;
|
global_deviceatlas.atlas = inst;
|
||||||
{
|
|
||||||
size_t hi;
|
|
||||||
global_deviceatlas.maxhdrlen = 16;
|
|
||||||
for (hi = 0; hi < inst.header_evidence_count; hi++) {
|
|
||||||
size_t nl = strlen(inst.header_priorities[hi].name);
|
|
||||||
if (nl > global_deviceatlas.maxhdrlen)
|
|
||||||
global_deviceatlas.maxhdrlen = nl;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
base[0] = 0;
|
base[0] = 0;
|
||||||
ha_notice("deviceatlas : new instance, data file date `%s`.\n",
|
ha_notice("deviceatlas : new instance, data file date `%s`.\n",
|
||||||
da_getdatacreationiso8601(&global_deviceatlas.atlas));
|
da_getdatacreationiso8601(&global_deviceatlas.atlas));
|
||||||
@ -331,8 +286,6 @@ static void da_haproxy_checkinst(void)
|
|||||||
ha_alert("deviceatlas : instance update failed.\n");
|
ha_alert("deviceatlas : instance update failed.\n");
|
||||||
free(cnew);
|
free(cnew);
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
free(cnew);
|
|
||||||
}
|
}
|
||||||
#ifdef USE_THREAD
|
#ifdef USE_THREAD
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &dadwsch_lock);
|
HA_SPIN_UNLOCK(OTHER_LOCK, &dadwsch_lock);
|
||||||
@ -344,7 +297,7 @@ static void da_haproxy_checkinst(void)
|
|||||||
static int da_haproxy(const struct arg *args, struct sample *smp, da_deviceinfo_t *devinfo)
|
static int da_haproxy(const struct arg *args, struct sample *smp, da_deviceinfo_t *devinfo)
|
||||||
{
|
{
|
||||||
struct buffer *tmp;
|
struct buffer *tmp;
|
||||||
da_propid_t prop;
|
da_propid_t prop, *pprop;
|
||||||
da_status_t status;
|
da_status_t status;
|
||||||
da_type_t proptype;
|
da_type_t proptype;
|
||||||
const char *propname;
|
const char *propname;
|
||||||
@ -364,15 +317,13 @@ static int da_haproxy(const struct arg *args, struct sample *smp, da_deviceinfo_
|
|||||||
chunk_appendf(tmp, "%c", global_deviceatlas.separator);
|
chunk_appendf(tmp, "%c", global_deviceatlas.separator);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (unlikely(da_atlas_getproptype(&global_deviceatlas.atlas, prop, &proptype) != DA_OK)) {
|
pprop = ∝
|
||||||
chunk_appendf(tmp, "%c", global_deviceatlas.separator);
|
da_atlas_getproptype(&global_deviceatlas.atlas, *pprop, &proptype);
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (proptype) {
|
switch (proptype) {
|
||||||
case DA_TYPE_BOOLEAN: {
|
case DA_TYPE_BOOLEAN: {
|
||||||
bool val;
|
bool val;
|
||||||
status = da_getpropboolean(devinfo, prop, &val);
|
status = da_getpropboolean(devinfo, *pprop, &val);
|
||||||
if (status == DA_OK) {
|
if (status == DA_OK) {
|
||||||
chunk_appendf(tmp, "%d", val);
|
chunk_appendf(tmp, "%d", val);
|
||||||
}
|
}
|
||||||
@ -381,7 +332,7 @@ static int da_haproxy(const struct arg *args, struct sample *smp, da_deviceinfo_
|
|||||||
case DA_TYPE_INTEGER:
|
case DA_TYPE_INTEGER:
|
||||||
case DA_TYPE_NUMBER: {
|
case DA_TYPE_NUMBER: {
|
||||||
long val;
|
long val;
|
||||||
status = da_getpropinteger(devinfo, prop, &val);
|
status = da_getpropinteger(devinfo, *pprop, &val);
|
||||||
if (status == DA_OK) {
|
if (status == DA_OK) {
|
||||||
chunk_appendf(tmp, "%ld", val);
|
chunk_appendf(tmp, "%ld", val);
|
||||||
}
|
}
|
||||||
@ -389,7 +340,7 @@ static int da_haproxy(const struct arg *args, struct sample *smp, da_deviceinfo_
|
|||||||
}
|
}
|
||||||
case DA_TYPE_STRING: {
|
case DA_TYPE_STRING: {
|
||||||
const char *val;
|
const char *val;
|
||||||
status = da_getpropstring(devinfo, prop, &val);
|
status = da_getpropstring(devinfo, *pprop, &val);
|
||||||
if (status == DA_OK) {
|
if (status == DA_OK) {
|
||||||
chunk_appendf(tmp, "%s", val);
|
chunk_appendf(tmp, "%s", val);
|
||||||
}
|
}
|
||||||
@ -420,26 +371,29 @@ static int da_haproxy_conv(const struct arg *args, struct sample *smp, void *pri
|
|||||||
{
|
{
|
||||||
da_deviceinfo_t devinfo;
|
da_deviceinfo_t devinfo;
|
||||||
da_status_t status;
|
da_status_t status;
|
||||||
char useragentbuf[1024];
|
const char *useragent;
|
||||||
|
char useragentbuf[1024] = { 0 };
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (unlikely(global_deviceatlas.daset == 0) || smp->data.u.str.data == 0) {
|
if (global_deviceatlas.daset == 0 || smp->data.u.str.data == 0) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
da_haproxy_checkinst();
|
da_haproxy_checkinst();
|
||||||
|
|
||||||
i = smp->data.u.str.data > sizeof(useragentbuf) - 1 ? sizeof(useragentbuf) - 1 : smp->data.u.str.data;
|
i = smp->data.u.str.data > sizeof(useragentbuf) ? sizeof(useragentbuf) : smp->data.u.str.data;
|
||||||
memcpy(useragentbuf, smp->data.u.str.area, i);
|
memcpy(useragentbuf, smp->data.u.str.area, i - 1);
|
||||||
useragentbuf[i] = 0;
|
useragentbuf[i - 1] = 0;
|
||||||
|
|
||||||
|
useragent = (const char *)useragentbuf;
|
||||||
|
|
||||||
status = da_search(&global_deviceatlas.atlas, &devinfo,
|
status = da_search(&global_deviceatlas.atlas, &devinfo,
|
||||||
global_deviceatlas.useragentid, useragentbuf, 0);
|
global_deviceatlas.useragentid, useragent, 0);
|
||||||
|
|
||||||
return status != DA_OK ? 0 : da_haproxy(args, smp, &devinfo);
|
return status != DA_OK ? 0 : da_haproxy(args, smp, &devinfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define DA_MAX_HEADERS 32
|
#define DA_MAX_HEADERS 24
|
||||||
|
|
||||||
static int da_haproxy_fetch(const struct arg *args, struct sample *smp, const char *kw, void *private)
|
static int da_haproxy_fetch(const struct arg *args, struct sample *smp, const char *kw, void *private)
|
||||||
{
|
{
|
||||||
@ -449,10 +403,10 @@ static int da_haproxy_fetch(const struct arg *args, struct sample *smp, const ch
|
|||||||
struct channel *chn;
|
struct channel *chn;
|
||||||
struct htx *htx;
|
struct htx *htx;
|
||||||
struct htx_blk *blk;
|
struct htx_blk *blk;
|
||||||
char vbuf[DA_MAX_HEADERS][1024];
|
char vbuf[DA_MAX_HEADERS][1024] = {{ 0 }};
|
||||||
int i, nbh = 0;
|
int i, nbh = 0;
|
||||||
|
|
||||||
if (unlikely(global_deviceatlas.daset == 0)) {
|
if (global_deviceatlas.daset == 0) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -460,17 +414,18 @@ static int da_haproxy_fetch(const struct arg *args, struct sample *smp, const ch
|
|||||||
|
|
||||||
chn = (smp->strm ? &smp->strm->req : NULL);
|
chn = (smp->strm ? &smp->strm->req : NULL);
|
||||||
htx = smp_prefetch_htx(smp, chn, NULL, 1);
|
htx = smp_prefetch_htx(smp, chn, NULL, 1);
|
||||||
if (unlikely(!htx))
|
if (!htx)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
i = 0;
|
||||||
for (blk = htx_get_first_blk(htx); nbh < DA_MAX_HEADERS && blk; blk = htx_get_next_blk(htx, blk)) {
|
for (blk = htx_get_first_blk(htx); nbh < DA_MAX_HEADERS && blk; blk = htx_get_next_blk(htx, blk)) {
|
||||||
size_t vlen;
|
size_t vlen;
|
||||||
char *pval;
|
char *pval;
|
||||||
da_evidence_id_t evid;
|
da_evidence_id_t evid;
|
||||||
enum htx_blk_type type;
|
enum htx_blk_type type;
|
||||||
struct ist n, v;
|
struct ist n, v;
|
||||||
char hbuf[64];
|
char hbuf[24] = { 0 };
|
||||||
char tval[1024];
|
char tval[1024] = { 0 };
|
||||||
|
|
||||||
type = htx_get_blk_type(blk);
|
type = htx_get_blk_type(blk);
|
||||||
|
|
||||||
@ -483,18 +438,20 @@ static int da_haproxy_fetch(const struct arg *args, struct sample *smp, const ch
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (n.len > global_deviceatlas.maxhdrlen || n.len >= sizeof(hbuf)) {
|
/* The HTTP headers used by the DeviceAtlas API are not longer */
|
||||||
|
if (n.len >= sizeof(hbuf)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(hbuf, n.ptr, n.len);
|
memcpy(hbuf, n.ptr, n.len);
|
||||||
hbuf[n.len] = 0;
|
hbuf[n.len] = 0;
|
||||||
|
pval = v.ptr;
|
||||||
|
vlen = v.len;
|
||||||
evid = -1;
|
evid = -1;
|
||||||
i = v.len > sizeof(tval) - 1 ? sizeof(tval) - 1 : v.len;
|
i = v.len > sizeof(tval) - 1 ? sizeof(tval) - 1 : v.len;
|
||||||
memcpy(tval, v.ptr, i);
|
memcpy(tval, v.ptr, i);
|
||||||
tval[i] = 0;
|
tval[i] = 0;
|
||||||
pval = tval;
|
pval = tval;
|
||||||
vlen = i;
|
|
||||||
|
|
||||||
if (strcasecmp(hbuf, "Accept-Language") == 0) {
|
if (strcasecmp(hbuf, "Accept-Language") == 0) {
|
||||||
evid = da_atlas_accept_language_evidence_id(&global_deviceatlas.atlas);
|
evid = da_atlas_accept_language_evidence_id(&global_deviceatlas.atlas);
|
||||||
@ -512,7 +469,7 @@ static int da_haproxy_fetch(const struct arg *args, struct sample *smp, const ch
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
vlen = pl;
|
vlen -= global_deviceatlas.cookienamelen - 1;
|
||||||
pval = p;
|
pval = p;
|
||||||
evid = da_atlas_clientprop_evidence_id(&global_deviceatlas.atlas);
|
evid = da_atlas_clientprop_evidence_id(&global_deviceatlas.atlas);
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@ -141,11 +141,6 @@ enum {
|
|||||||
DA_INITIAL_MEMORY_ESTIMATE = 1024 * 1024 * 14
|
DA_INITIAL_MEMORY_ESTIMATE = 1024 * 1024 * 14
|
||||||
};
|
};
|
||||||
|
|
||||||
struct header_evidence_entry {
|
|
||||||
const char *name;
|
|
||||||
da_evidence_id_t id;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct da_config {
|
struct da_config {
|
||||||
unsigned int cache_size;
|
unsigned int cache_size;
|
||||||
unsigned int __reserved[15]; /* enough reserved keywords for future use */
|
unsigned int __reserved[15]; /* enough reserved keywords for future use */
|
||||||
@ -217,7 +212,7 @@ da_status_t da_atlas_compile(void *ctx, da_read_fn readfn, da_setpos_fn setposfn
|
|||||||
* da_getpropid on the atlas, and if generated by the search, the ID will be consistent across
|
* da_getpropid on the atlas, and if generated by the search, the ID will be consistent across
|
||||||
* different calls to search.
|
* different calls to search.
|
||||||
* Properties added by a search that are neither in the compiled atlas, nor in the extra_props list
|
* Properties added by a search that are neither in the compiled atlas, nor in the extra_props list
|
||||||
* Are assigned an ID within the context that is not transferable through different search results
|
* Are assigned an ID within the context that is not transferrable through different search results
|
||||||
* within the same atlas.
|
* within the same atlas.
|
||||||
* @param atlas Atlas instance
|
* @param atlas Atlas instance
|
||||||
* @param extra_props properties
|
* @param extra_props properties
|
||||||
|
|||||||
@ -47,12 +47,6 @@ via the OpenTracing API with OpenTracing compatible servers (tracers).
|
|||||||
Currently, tracers that support this API include Datadog, Jaeger, LightStep
|
Currently, tracers that support this API include Datadog, Jaeger, LightStep
|
||||||
and Zipkin.
|
and Zipkin.
|
||||||
|
|
||||||
Note: The OpenTracing filter shouldn't be used for new designs as OpenTracing
|
|
||||||
itself is no longer maintained nor supported by its authors. A
|
|
||||||
replacement filter base on OpenTelemetry is currently under development
|
|
||||||
and is expected to be ready around HAProxy 3.2. As such OpenTracing will
|
|
||||||
be deprecated in 3.3 and removed in 3.5.
|
|
||||||
|
|
||||||
The OT filter was primarily tested with the Jaeger tracer, while configurations
|
The OT filter was primarily tested with the Jaeger tracer, while configurations
|
||||||
for both Datadog and Zipkin tracers were also set in the test directory.
|
for both Datadog and Zipkin tracers were also set in the test directory.
|
||||||
|
|
||||||
|
|||||||
@ -718,7 +718,7 @@ static void flt_ot_check_timeouts(struct stream *s, struct filter *f)
|
|||||||
if (flt_ot_is_disabled(f FLT_OT_DBG_ARGS(, -1)))
|
if (flt_ot_is_disabled(f FLT_OT_DBG_ARGS(, -1)))
|
||||||
FLT_OT_RETURN();
|
FLT_OT_RETURN();
|
||||||
|
|
||||||
s->pending_events |= STRM_EVT_MSG;
|
s->pending_events |= TASK_WOKEN_MSG;
|
||||||
|
|
||||||
flt_ot_return_void(f, &err);
|
flt_ot_return_void(f, &err);
|
||||||
|
|
||||||
|
|||||||
@ -39,21 +39,14 @@
|
|||||||
*/
|
*/
|
||||||
static void flt_ot_vars_scope_dump(struct vars *vars, const char *scope)
|
static void flt_ot_vars_scope_dump(struct vars *vars, const char *scope)
|
||||||
{
|
{
|
||||||
int i;
|
const struct var *var;
|
||||||
|
|
||||||
if (vars == NULL)
|
if (vars == NULL)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
vars_rdlock(vars);
|
vars_rdlock(vars);
|
||||||
for (i = 0; i < VAR_NAME_ROOTS; i++) {
|
list_for_each_entry(var, &(vars->head), l)
|
||||||
struct ceb_node *node = cebu64_first(&(vars->name_root[i]));
|
|
||||||
|
|
||||||
for ( ; node != NULL; node = cebu64_next(&(vars->name_root[i]), node)) {
|
|
||||||
struct var *var = container_of(node, struct var, node);
|
|
||||||
|
|
||||||
FLT_OT_DBG(2, "'%s.%016" PRIx64 "' -> '%.*s'", scope, var->name_hash, (int)b_data(&(var->data.u.str)), b_orig(&(var->data.u.str)));
|
FLT_OT_DBG(2, "'%s.%016" PRIx64 "' -> '%.*s'", scope, var->name_hash, (int)b_data(&(var->data.u.str)), b_orig(&(var->data.u.str)));
|
||||||
}
|
|
||||||
}
|
|
||||||
vars_rdunlock(vars);
|
vars_rdunlock(vars);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -91,18 +91,6 @@ name must be preceded by a minus character ('-'). Here are examples:
|
|||||||
# Only dump frontends, backends and servers status
|
# Only dump frontends, backends and servers status
|
||||||
/metrics?metrics=haproxy_frontend_status,haproxy_backend_status,haproxy_server_status
|
/metrics?metrics=haproxy_frontend_status,haproxy_backend_status,haproxy_server_status
|
||||||
|
|
||||||
* Add section description as label for all metrics
|
|
||||||
|
|
||||||
It is possible to set a description in global and proxy sections, via the
|
|
||||||
"description" directive. The global description is exposed if it is define via
|
|
||||||
the "haproxy_process_description" metric. But the descriptions provided in proxy
|
|
||||||
sections are not dumped. However, it is possible to add it as a label for all
|
|
||||||
metrics of the corresponding section, including the global one. To do so,
|
|
||||||
"desc-labels" parameter must be set:
|
|
||||||
|
|
||||||
/metrics?desc-labels
|
|
||||||
|
|
||||||
/ metrics?scope=frontend&desc-labels
|
|
||||||
|
|
||||||
* Dump extra counters
|
* Dump extra counters
|
||||||
|
|
||||||
@ -205,8 +193,6 @@ listed below. Metrics from extra counters are not listed.
|
|||||||
| haproxy_process_current_tasks |
|
| haproxy_process_current_tasks |
|
||||||
| haproxy_process_current_run_queue |
|
| haproxy_process_current_run_queue |
|
||||||
| haproxy_process_idle_time_percent |
|
| haproxy_process_idle_time_percent |
|
||||||
| haproxy_process_node |
|
|
||||||
| haproxy_process_description |
|
|
||||||
| haproxy_process_stopping |
|
| haproxy_process_stopping |
|
||||||
| haproxy_process_jobs |
|
| haproxy_process_jobs |
|
||||||
| haproxy_process_unstoppable_jobs |
|
| haproxy_process_unstoppable_jobs |
|
||||||
@ -389,9 +375,6 @@ listed below. Metrics from extra counters are not listed.
|
|||||||
| haproxy_server_max_connect_time_seconds |
|
| haproxy_server_max_connect_time_seconds |
|
||||||
| haproxy_server_max_response_time_seconds |
|
| haproxy_server_max_response_time_seconds |
|
||||||
| haproxy_server_max_total_time_seconds |
|
| haproxy_server_max_total_time_seconds |
|
||||||
| haproxy_server_agent_status |
|
|
||||||
| haproxy_server_agent_code |
|
|
||||||
| haproxy_server_agent_duration_seconds |
|
|
||||||
| haproxy_server_internal_errors_total |
|
| haproxy_server_internal_errors_total |
|
||||||
| haproxy_server_unsafe_idle_connections_current |
|
| haproxy_server_unsafe_idle_connections_current |
|
||||||
| haproxy_server_safe_idle_connections_current |
|
| haproxy_server_safe_idle_connections_current |
|
||||||
@ -407,7 +390,6 @@ listed below. Metrics from extra counters are not listed.
|
|||||||
+----------------------------------------------------+
|
+----------------------------------------------------+
|
||||||
| haproxy_sticktable_size |
|
| haproxy_sticktable_size |
|
||||||
| haproxy_sticktable_used |
|
| haproxy_sticktable_used |
|
||||||
| haproxy_sticktable_local_updates |
|
|
||||||
+----------------------------------------------------+
|
+----------------------------------------------------+
|
||||||
|
|
||||||
* Resolvers metrics
|
* Resolvers metrics
|
||||||
|
|||||||
@ -32,11 +32,11 @@
|
|||||||
|
|
||||||
/* Prometheus exporter flags (ctx->flags) */
|
/* Prometheus exporter flags (ctx->flags) */
|
||||||
#define PROMEX_FL_METRIC_HDR 0x00000001
|
#define PROMEX_FL_METRIC_HDR 0x00000001
|
||||||
#define PROMEX_FL_BODYLESS_RESP 0x00000002
|
#define PROMEX_FL_INFO_METRIC 0x00000002
|
||||||
/* unused: 0x00000004 */
|
#define PROMEX_FL_FRONT_METRIC 0x00000004
|
||||||
/* unused: 0x00000008 */
|
#define PROMEX_FL_BACK_METRIC 0x00000008
|
||||||
/* unused: 0x00000010 */
|
#define PROMEX_FL_SRV_METRIC 0x00000010
|
||||||
/* unused: 0x00000020 */
|
#define PROMEX_FL_LI_METRIC 0x00000020
|
||||||
#define PROMEX_FL_MODULE_METRIC 0x00000040
|
#define PROMEX_FL_MODULE_METRIC 0x00000040
|
||||||
#define PROMEX_FL_SCOPE_GLOBAL 0x00000080
|
#define PROMEX_FL_SCOPE_GLOBAL 0x00000080
|
||||||
#define PROMEX_FL_SCOPE_FRONT 0x00000100
|
#define PROMEX_FL_SCOPE_FRONT 0x00000100
|
||||||
@ -47,7 +47,6 @@
|
|||||||
#define PROMEX_FL_NO_MAINT_SRV 0x00002000
|
#define PROMEX_FL_NO_MAINT_SRV 0x00002000
|
||||||
#define PROMEX_FL_EXTRA_COUNTERS 0x00004000
|
#define PROMEX_FL_EXTRA_COUNTERS 0x00004000
|
||||||
#define PROMEX_FL_INC_METRIC_BY_DEFAULT 0x00008000
|
#define PROMEX_FL_INC_METRIC_BY_DEFAULT 0x00008000
|
||||||
#define PROMEX_FL_DESC_LABELS 0x00010000
|
|
||||||
|
|
||||||
#define PROMEX_FL_SCOPE_ALL (PROMEX_FL_SCOPE_GLOBAL | PROMEX_FL_SCOPE_FRONT | \
|
#define PROMEX_FL_SCOPE_ALL (PROMEX_FL_SCOPE_GLOBAL | PROMEX_FL_SCOPE_FRONT | \
|
||||||
PROMEX_FL_SCOPE_LI | PROMEX_FL_SCOPE_BACK | \
|
PROMEX_FL_SCOPE_LI | PROMEX_FL_SCOPE_BACK | \
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -1,235 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# Dump certificates from the HAProxy stats or master socket to the filesystem
|
|
||||||
# Experimental script
|
|
||||||
#
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
export BASEPATH=${BASEPATH:-/etc/haproxy}/
|
|
||||||
export SOCKET=${SOCKET:-/var/run/haproxy-master.sock}
|
|
||||||
export DRY_RUN=0
|
|
||||||
export DEBUG=
|
|
||||||
export VERBOSE=
|
|
||||||
export M="@1 "
|
|
||||||
export TMP
|
|
||||||
|
|
||||||
vecho() {
|
|
||||||
|
|
||||||
[ -n "$VERBOSE" ] && echo "$@"
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
read_certificate() {
|
|
||||||
name=$1
|
|
||||||
crt_filename=
|
|
||||||
key_filename=
|
|
||||||
|
|
||||||
OFS=$IFS
|
|
||||||
IFS=":"
|
|
||||||
|
|
||||||
while read -r key value; do
|
|
||||||
case "$key" in
|
|
||||||
"Crt filename")
|
|
||||||
crt_filename="${value# }"
|
|
||||||
key_filename="${value# }"
|
|
||||||
;;
|
|
||||||
"Key filename")
|
|
||||||
key_filename="${value# }"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done < <(echo "${M}show ssl cert ${name}" | socat "${SOCKET}" -)
|
|
||||||
IFS=$OFS
|
|
||||||
|
|
||||||
if [ -z "$crt_filename" ] || [ -z "$key_filename" ]; then
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# handle fields without a crt-base/key-base
|
|
||||||
[ "${crt_filename:0:1}" != "/" ] && crt_filename="${BASEPATH}${crt_filename}"
|
|
||||||
[ "${key_filename:0:1}" != "/" ] && key_filename="${BASEPATH}${key_filename}"
|
|
||||||
|
|
||||||
vecho "name:$name"
|
|
||||||
vecho "crt:$crt_filename"
|
|
||||||
vecho "key:$key_filename"
|
|
||||||
|
|
||||||
export NAME="$name"
|
|
||||||
export CRT_FILENAME="$crt_filename"
|
|
||||||
export KEY_FILENAME="$key_filename"
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
cmp_certkey() {
|
|
||||||
prev=$1
|
|
||||||
new=$2
|
|
||||||
|
|
||||||
if [ ! -f "$prev" ]; then
|
|
||||||
return 1;
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! cmp -s <(openssl x509 -in "$prev" -noout -fingerprint -sha256) <(openssl x509 -in "$new" -noout -fingerprint -sha256); then
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
dump_certificate() {
|
|
||||||
name=$1
|
|
||||||
prev_crt=$2
|
|
||||||
prev_key=$3
|
|
||||||
r="tmp.${RANDOM}"
|
|
||||||
d="old.$(date +%s)"
|
|
||||||
new_crt="$TMP/$(basename "$prev_crt").${r}"
|
|
||||||
new_key="$TMP/$(basename "$prev_key").${r}"
|
|
||||||
|
|
||||||
if ! touch "${new_crt}" || ! touch "${new_key}"; then
|
|
||||||
echo "[ALERT] ($$) : can't dump \"$name\", can't create tmp files" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "${M}dump ssl cert ${name}" | socat "${SOCKET}" - | openssl pkey >> "${new_key}"
|
|
||||||
# use crl2pkcs7 as a way to dump multiple x509, storeutl could be used in modern versions of openssl
|
|
||||||
echo "${M}dump ssl cert ${name}" | socat "${SOCKET}" - | openssl crl2pkcs7 -nocrl -certfile /dev/stdin | openssl pkcs7 -print_certs >> "${new_crt}"
|
|
||||||
|
|
||||||
if ! cmp -s <(openssl x509 -in "${new_crt}" -pubkey -noout) <(openssl pkey -in "${new_key}" -pubout); then
|
|
||||||
echo "[ALERT] ($$) : Private key \"${new_key}\" and public key \"${new_crt}\" don't match" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if cmp_certkey "${prev_crt}" "${new_crt}"; then
|
|
||||||
echo "[NOTICE] ($$) : ${crt_filename} is already up to date" >&2
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# dry run will just return before trying to move the files
|
|
||||||
if [ "${DRY_RUN}" != "0" ]; then
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# move the current certificates to ".old.timestamp"
|
|
||||||
if [ -f "${prev_crt}" ] && [ -f "${prev_key}" ]; then
|
|
||||||
mv "${prev_crt}" "${prev_crt}.${d}"
|
|
||||||
[ "${prev_crt}" != "${prev_key}" ] && mv "${prev_key}" "${prev_key}.${d}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# move the new certificates to old place
|
|
||||||
mv "${new_crt}" "${prev_crt}"
|
|
||||||
[ "${prev_crt}" != "${prev_key}" ] && mv "${new_key}" "${prev_key}"
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
dump_all_certificates() {
|
|
||||||
echo "${M}show ssl cert" | socat "${SOCKET}" - | grep -v '^#' | grep -v '^$' | while read -r line; do
|
|
||||||
export NAME
|
|
||||||
export CRT_FILENAME
|
|
||||||
export KEY_FILENAME
|
|
||||||
|
|
||||||
if read_certificate "$line"; then
|
|
||||||
dump_certificate "$NAME" "$CRT_FILENAME" "$KEY_FILENAME"
|
|
||||||
else
|
|
||||||
echo "[WARNING] ($$) : can't dump \"$name\", crt/key filename details not found in \"show ssl cert\"" >&2
|
|
||||||
fi
|
|
||||||
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
usage() {
|
|
||||||
echo "Usage:"
|
|
||||||
echo " $0 [options]* [cert]*"
|
|
||||||
echo ""
|
|
||||||
echo " Dump certificates from the HAProxy stats or master socket to the filesystem"
|
|
||||||
echo " Require socat and openssl"
|
|
||||||
echo " EXPERIMENTAL script, backup your files!"
|
|
||||||
echo " The script will move your previous files to FILE.old.unixtimestamp (ex: foo.com.pem.old.1759044998)"
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "Options:"
|
|
||||||
echo " -S, --master-socket <path> Use the master socket at <path> (default: ${SOCKET})"
|
|
||||||
echo " -s, --socket <path> Use the stats socket at <path>"
|
|
||||||
echo " -p, --path <path> Specify a base path for relative files (default: ${BASEPATH})"
|
|
||||||
echo " -n, --dry-run Read certificates on the socket but don't dump them"
|
|
||||||
echo " -d, --debug Debug mode, set -x"
|
|
||||||
echo " -v, --verbose Verbose mode"
|
|
||||||
echo " -h, --help This help"
|
|
||||||
echo " -- End of options"
|
|
||||||
echo ""
|
|
||||||
echo "Examples:"
|
|
||||||
echo " $0 -v -p ${BASEPATH} -S ${SOCKET}"
|
|
||||||
echo " $0 -v -p ${BASEPATH} -S ${SOCKET} bar.com.rsa.pem"
|
|
||||||
echo " $0 -v -p ${BASEPATH} -S ${SOCKET} -- foo.com.ecdsa.pem bar.com.rsa.pem"
|
|
||||||
}
|
|
||||||
|
|
||||||
main() {
|
|
||||||
while [ -n "$1" ]; do
|
|
||||||
case "$1" in
|
|
||||||
-S|--master-socket)
|
|
||||||
SOCKET="$2"
|
|
||||||
M="@1 "
|
|
||||||
shift 2
|
|
||||||
;;
|
|
||||||
-s|--socket)
|
|
||||||
SOCKET="$2"
|
|
||||||
M=
|
|
||||||
shift 2
|
|
||||||
;;
|
|
||||||
-p|--path)
|
|
||||||
BASEPATH="$2/"
|
|
||||||
shift 2
|
|
||||||
;;
|
|
||||||
-n|--dry-run)
|
|
||||||
DRY_RUN=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-d|--debug)
|
|
||||||
DEBUG=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-v|--verbose)
|
|
||||||
VERBOSE=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-h|--help)
|
|
||||||
usage "$@"
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
--)
|
|
||||||
shift
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
-*)
|
|
||||||
echo "[ALERT] ($$) : Unknown option '$1'" >&2
|
|
||||||
usage "$@"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ -n "$DEBUG" ]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
|
|
||||||
TMP=${TMP:-$(mktemp -d)}
|
|
||||||
|
|
||||||
if [ -z "$1" ]; then
|
|
||||||
dump_all_certificates
|
|
||||||
else
|
|
||||||
# compute the certificates names at the end of the command
|
|
||||||
while [ -n "$1" ]; do
|
|
||||||
if ! read_certificate "$1"; then
|
|
||||||
echo "[ALERT] ($$) : can't dump \"$1\", crt/key filename details not found in \"show ssl cert\"" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
[ "${DRY_RUN}" = "0" ] && dump_certificate "$NAME" "$CRT_FILENAME" "$KEY_FILENAME"
|
|
||||||
shift
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
trap 'rm -rf -- "$TMP"' EXIT
|
|
||||||
main "$@"
|
|
||||||
@ -1,118 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
export VERBOSE=1
|
|
||||||
export TIMEOUT=90
|
|
||||||
export MASTER_SOCKET="${MASTER_SOCKET:-/var/run/haproxy-master.sock}"
|
|
||||||
|
|
||||||
alert() {
|
|
||||||
if [ "$VERBOSE" -ge "1" ]; then
|
|
||||||
echo "[ALERT] $*" >&2
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
reload() {
|
|
||||||
if [ -S "$MASTER_SOCKET" ]; then
|
|
||||||
socat_addr="UNIX-CONNECT:${MASTER_SOCKET}"
|
|
||||||
else
|
|
||||||
case "$MASTER_SOCKET" in
|
|
||||||
*:[0-9]*)
|
|
||||||
socat_addr="TCP:${MASTER_SOCKET}"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
alert "Invalid master socket address '${MASTER_SOCKET}': expected a UNIX socket file or <host>:<port>"
|
|
||||||
return 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "reload" | socat -t"${TIMEOUT}" "$socat_addr" - | {
|
|
||||||
read -r status || { alert "No status received (connection error or timeout after ${TIMEOUT}s)."; exit 1; }
|
|
||||||
case "$status" in
|
|
||||||
"Success=1") ret=0 ;;
|
|
||||||
"Success=0") ret=1 ;;
|
|
||||||
*) alert "Unexpected response: '$status'"; exit 1 ;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
read -r _ # consume "--"
|
|
||||||
|
|
||||||
if [ "$VERBOSE" -ge 3 ] || { [ "$ret" = 1 ] && [ "$VERBOSE" -ge 2 ]; }; then
|
|
||||||
cat >&2
|
|
||||||
else
|
|
||||||
cat >/dev/null
|
|
||||||
fi
|
|
||||||
|
|
||||||
exit "$ret"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
usage() {
|
|
||||||
echo "Usage:"
|
|
||||||
echo " $0 [options]*"
|
|
||||||
echo ""
|
|
||||||
echo " Trigger a reload from the master socket"
|
|
||||||
echo " Require socat"
|
|
||||||
echo " EXPERIMENTAL script!"
|
|
||||||
echo ""
|
|
||||||
echo "Options:"
|
|
||||||
echo " -S, --master-socket <addr> Unix socket path or <host>:<port> (default: ${MASTER_SOCKET})"
|
|
||||||
echo " -d, --debug Debug mode, set -x"
|
|
||||||
echo " -t, --timeout Timeout (socat -t) (default: ${TIMEOUT})"
|
|
||||||
echo " -s, --silent Silent mode (no output)"
|
|
||||||
echo " -v, --verbose Verbose output (output from haproxy on failure)"
|
|
||||||
echo " -vv --verbose=all Very verbose output (output from haproxy on success and failure)"
|
|
||||||
echo " -h, --help This help"
|
|
||||||
echo ""
|
|
||||||
echo "Examples:"
|
|
||||||
echo " $0 -S ${MASTER_SOCKET} -d ${TIMEOUT}"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
main() {
|
|
||||||
while [ -n "$1" ]; do
|
|
||||||
case "$1" in
|
|
||||||
-S|--master-socket)
|
|
||||||
MASTER_SOCKET="$2"
|
|
||||||
shift 2
|
|
||||||
;;
|
|
||||||
-t|--timeout)
|
|
||||||
TIMEOUT="$2"
|
|
||||||
shift 2
|
|
||||||
;;
|
|
||||||
-s|--silent)
|
|
||||||
VERBOSE=0
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-v|--verbose)
|
|
||||||
VERBOSE=2
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-vv|--verbose=all)
|
|
||||||
VERBOSE=3
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-d|--debug)
|
|
||||||
DEBUG=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-h|--help)
|
|
||||||
usage "$@"
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "[ALERT] ($$) : Unknown option '$1'" >&2
|
|
||||||
usage "$@"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ -n "$DEBUG" ]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
main "$@"
|
|
||||||
reload
|
|
||||||
@ -123,22 +123,6 @@ struct url_stat {
|
|||||||
#define FILT2_PRESERVE_QUERY 0x02
|
#define FILT2_PRESERVE_QUERY 0x02
|
||||||
#define FILT2_EXTRACT_CAPTURE 0x04
|
#define FILT2_EXTRACT_CAPTURE 0x04
|
||||||
|
|
||||||
#define FILT_OUTPUT_FMT (FILT_COUNT_ONLY| \
|
|
||||||
FILT_COUNT_STATUS| \
|
|
||||||
FILT_COUNT_SRV_STATUS| \
|
|
||||||
FILT_COUNT_COOK_CODES| \
|
|
||||||
FILT_COUNT_TERM_CODES| \
|
|
||||||
FILT_COUNT_URL_ONLY| \
|
|
||||||
FILT_COUNT_URL_COUNT| \
|
|
||||||
FILT_COUNT_URL_ERR| \
|
|
||||||
FILT_COUNT_URL_TAVG| \
|
|
||||||
FILT_COUNT_URL_TTOT| \
|
|
||||||
FILT_COUNT_URL_TAVGO| \
|
|
||||||
FILT_COUNT_URL_TTOTO| \
|
|
||||||
FILT_COUNT_URL_BAVG| \
|
|
||||||
FILT_COUNT_URL_BTOT| \
|
|
||||||
FILT_COUNT_IP_COUNT)
|
|
||||||
|
|
||||||
unsigned int filter = 0;
|
unsigned int filter = 0;
|
||||||
unsigned int filter2 = 0;
|
unsigned int filter2 = 0;
|
||||||
unsigned int filter_invert = 0;
|
unsigned int filter_invert = 0;
|
||||||
@ -208,7 +192,7 @@ void help()
|
|||||||
" you can also use -n to start from earlier then field %d\n"
|
" you can also use -n to start from earlier then field %d\n"
|
||||||
" -query preserve the query string for per-URL (-u*) statistics\n"
|
" -query preserve the query string for per-URL (-u*) statistics\n"
|
||||||
"\n"
|
"\n"
|
||||||
"Output format - **only one** may be used at a time\n"
|
"Output format - only one may be used at a time\n"
|
||||||
" -c only report the number of lines that would have been printed\n"
|
" -c only report the number of lines that would have been printed\n"
|
||||||
" -pct output connect and response times percentiles\n"
|
" -pct output connect and response times percentiles\n"
|
||||||
" -st output number of requests per HTTP status code\n"
|
" -st output number of requests per HTTP status code\n"
|
||||||
@ -914,9 +898,6 @@ int main(int argc, char **argv)
|
|||||||
if (!filter && !filter2)
|
if (!filter && !filter2)
|
||||||
die("No action specified.\n");
|
die("No action specified.\n");
|
||||||
|
|
||||||
if ((filter & FILT_OUTPUT_FMT) & ((filter & FILT_OUTPUT_FMT) - 1))
|
|
||||||
die("Please, set only one output filter.\n");
|
|
||||||
|
|
||||||
if (filter & FILT_ACC_COUNT && !filter_acc_count)
|
if (filter & FILT_ACC_COUNT && !filter_acc_count)
|
||||||
filter_acc_count=1;
|
filter_acc_count=1;
|
||||||
|
|
||||||
@ -1571,10 +1552,6 @@ void filter_count_srv_status(const char *accept_field, const char *time_field, s
|
|||||||
if (!srv_node) {
|
if (!srv_node) {
|
||||||
/* server not yet in the tree, let's create it */
|
/* server not yet in the tree, let's create it */
|
||||||
srv = (void *)calloc(1, sizeof(struct srv_st) + e - b + 1);
|
srv = (void *)calloc(1, sizeof(struct srv_st) + e - b + 1);
|
||||||
if (unlikely(!srv)) {
|
|
||||||
fprintf(stderr, "%s: not enough memory\n", __FUNCTION__);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
srv_node = &srv->node;
|
srv_node = &srv->node;
|
||||||
memcpy(&srv_node->key, b, e - b);
|
memcpy(&srv_node->key, b, e - b);
|
||||||
srv_node->key[e - b] = '\0';
|
srv_node->key[e - b] = '\0';
|
||||||
@ -1684,10 +1661,6 @@ void filter_count_url(const char *accept_field, const char *time_field, struct t
|
|||||||
*/
|
*/
|
||||||
if (unlikely(!ustat))
|
if (unlikely(!ustat))
|
||||||
ustat = calloc(1, sizeof(*ustat));
|
ustat = calloc(1, sizeof(*ustat));
|
||||||
if (unlikely(!ustat)) {
|
|
||||||
fprintf(stderr, "%s: not enough memory\n", __FUNCTION__);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
ustat->nb_err = err;
|
ustat->nb_err = err;
|
||||||
ustat->nb_req = 1;
|
ustat->nb_req = 1;
|
||||||
|
|||||||
@ -6,9 +6,9 @@ Wants=network-online.target
|
|||||||
[Service]
|
[Service]
|
||||||
EnvironmentFile=-/etc/default/haproxy
|
EnvironmentFile=-/etc/default/haproxy
|
||||||
EnvironmentFile=-/etc/sysconfig/haproxy
|
EnvironmentFile=-/etc/sysconfig/haproxy
|
||||||
Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/run/haproxy.pid" "CFGDIR=/etc/haproxy/conf.d" "EXTRAOPTS=-S /run/haproxy-master.sock"
|
Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/run/haproxy.pid" "EXTRAOPTS=-S /run/haproxy-master.sock"
|
||||||
ExecStart=@SBINDIR@/haproxy -Ws -f $CONFIG -f $CFGDIR -p $PIDFILE $EXTRAOPTS
|
ExecStart=@SBINDIR@/haproxy -Ws -f $CONFIG -p $PIDFILE $EXTRAOPTS
|
||||||
ExecReload=@SBINDIR@/haproxy -Ws -f $CONFIG -f $CFGDIR -c $EXTRAOPTS
|
ExecReload=@SBINDIR@/haproxy -Ws -f $CONFIG -c $EXTRAOPTS
|
||||||
ExecReload=/bin/kill -USR2 $MAINPID
|
ExecReload=/bin/kill -USR2 $MAINPID
|
||||||
KillMode=mixed
|
KillMode=mixed
|
||||||
Restart=always
|
Restart=always
|
||||||
|
|||||||
@ -195,7 +195,7 @@ while read -r; do
|
|||||||
! [[ "$REPLY" =~ [[:blank:]]h2c.*\.flg=([0-9a-fx]*) ]] || append_flag b.h2c.flg h2c "${BASH_REMATCH[1]}"
|
! [[ "$REPLY" =~ [[:blank:]]h2c.*\.flg=([0-9a-fx]*) ]] || append_flag b.h2c.flg h2c "${BASH_REMATCH[1]}"
|
||||||
elif [ $ctx = cob ]; then
|
elif [ $ctx = cob ]; then
|
||||||
! [[ "$REPLY" =~ [[:blank:]]flags=([0-9a-fx]*) ]] || append_flag b.co.flg conn "${BASH_REMATCH[1]}"
|
! [[ "$REPLY" =~ [[:blank:]]flags=([0-9a-fx]*) ]] || append_flag b.co.flg conn "${BASH_REMATCH[1]}"
|
||||||
! [[ "$REPLY" =~ [[:blank:]]fd.state=([0-9a-fx]*) ]] || append_flag b.co.fd.st fd 0x"${BASH_REMATCH[1]}"
|
! [[ "$REPLY" =~ [[:blank:]]fd.state=([0-9a-fx]*) ]] || append_flag b.co.fd.st fd "${BASH_REMATCH[1]}"
|
||||||
elif [ $ctx = res ]; then
|
elif [ $ctx = res ]; then
|
||||||
! [[ "$REPLY" =~ [[:blank:]]\(f=([0-9a-fx]*) ]] || append_flag res.flg chn "${BASH_REMATCH[1]}"
|
! [[ "$REPLY" =~ [[:blank:]]\(f=([0-9a-fx]*) ]] || append_flag res.flg chn "${BASH_REMATCH[1]}"
|
||||||
! [[ "$REPLY" =~ [[:blank:]]an=([0-9a-fx]*) ]] || append_flag res.ana ana "${BASH_REMATCH[1]}"
|
! [[ "$REPLY" =~ [[:blank:]]an=([0-9a-fx]*) ]] || append_flag res.ana ana "${BASH_REMATCH[1]}"
|
||||||
|
|||||||
@ -25,7 +25,7 @@ end
|
|||||||
|
|
||||||
# returns $node filled with the first node of ebroot $arg0
|
# returns $node filled with the first node of ebroot $arg0
|
||||||
define ebtree_first
|
define ebtree_first
|
||||||
# browse ebtree left until encountering leaf
|
# browse ebtree left until encoutering leaf
|
||||||
set $node = (struct eb_node *)$arg0->b[0]
|
set $node = (struct eb_node *)$arg0->b[0]
|
||||||
while 1
|
while 1
|
||||||
_ebtree_set_tag_node $node
|
_ebtree_set_tag_node $node
|
||||||
@ -76,7 +76,7 @@ end
|
|||||||
|
|
||||||
# returns $node filled with the first node of ebroot $arg0
|
# returns $node filled with the first node of ebroot $arg0
|
||||||
define ebsctree_first
|
define ebsctree_first
|
||||||
# browse ebsctree left until encountering leaf
|
# browse ebsctree left until encoutering leaf
|
||||||
set $node = (struct eb32sc_node *)$arg0->b[0]
|
set $node = (struct eb32sc_node *)$arg0->b[0]
|
||||||
while 1
|
while 1
|
||||||
_ebsctree_set_tag_node $node
|
_ebsctree_set_tag_node $node
|
||||||
|
|||||||
@ -1,162 +0,0 @@
|
|||||||
/*
|
|
||||||
* Extracts the libs archives from a core dump
|
|
||||||
*
|
|
||||||
* Copyright (C) 2026 Willy Tarreau <w@1wt.eu>
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining
|
|
||||||
* a copy of this software and associated documentation files (the
|
|
||||||
* "Software"), to deal in the Software without restriction, including
|
|
||||||
* without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
* distribute, sublicense, and/or sell copies of the Software, and to
|
|
||||||
* permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
* the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be
|
|
||||||
* included in all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
|
||||||
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
|
||||||
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
|
||||||
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Note: builds with no option under glibc, and can be built as a minimal
|
|
||||||
* uploadable static executable using nolibc as well:
|
|
||||||
gcc -o libs-from-core -nostdinc -nostdlib -s -Os -static -fno-ident \
|
|
||||||
-fno-exceptions -fno-asynchronous-unwind-tables -fno-unwind-tables \
|
|
||||||
-Wl,--gc-sections,--orphan-handling=discard,-znoseparate-code \
|
|
||||||
-I /path/to/nolibc-sysroot/include libs-from-core.c
|
|
||||||
*/
|
|
||||||
#define _GNU_SOURCE
|
|
||||||
#include <sys/mman.h>
|
|
||||||
#include <sys/stat.h>
|
|
||||||
#include <elf.h>
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <stdint.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
|
|
||||||
void usage(const char *progname)
|
|
||||||
{
|
|
||||||
const char *slash = strrchr(progname, '/');
|
|
||||||
|
|
||||||
if (slash)
|
|
||||||
progname = slash + 1;
|
|
||||||
|
|
||||||
fprintf(stderr,
|
|
||||||
"Usage: %s [-q] <core_file>\n"
|
|
||||||
"Locate a libs archive from an haproxy core dump and dump it to stdout.\n"
|
|
||||||
"Arguments:\n"
|
|
||||||
" -q Query mode: only report offset and length, do not dump\n"
|
|
||||||
" core_file Core dump produced by haproxy\n",
|
|
||||||
progname);
|
|
||||||
}
|
|
||||||
|
|
||||||
int main(int argc, char **argv)
|
|
||||||
{
|
|
||||||
Elf64_Ehdr *ehdr;
|
|
||||||
Elf64_Phdr *phdr;
|
|
||||||
struct stat st;
|
|
||||||
uint8_t *mem;
|
|
||||||
int i, fd;
|
|
||||||
const char *fname;
|
|
||||||
int quiet = 0;
|
|
||||||
int arg;
|
|
||||||
|
|
||||||
for (arg = 1; arg < argc; arg++) {
|
|
||||||
if (*argv[arg] != '-')
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (strcmp(argv[arg], "-q") == 0)
|
|
||||||
quiet = 1;
|
|
||||||
else if (strcmp(argv[arg], "--") == 0) {
|
|
||||||
arg++;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (arg < argc) {
|
|
||||||
fname = argv[arg];
|
|
||||||
} else {
|
|
||||||
usage(argv[0]);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
fd = open(fname, O_RDONLY);
|
|
||||||
|
|
||||||
/* Let's just map the core dump as an ELF header */
|
|
||||||
fstat(fd, &st);
|
|
||||||
mem = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
|
|
||||||
if (mem == MAP_FAILED) {
|
|
||||||
perror("mmap()");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* get the program headers */
|
|
||||||
ehdr = (Elf64_Ehdr *)mem;
|
|
||||||
|
|
||||||
/* check that it's really a core. Should be "\x7fELF" */
|
|
||||||
if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) {
|
|
||||||
fprintf(stderr, "ELF magic not found.\n");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ehdr->e_ident[EI_CLASS] != ELFCLASS64) {
|
|
||||||
fprintf(stderr, "Only 64-bit ELF supported.\n");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ehdr->e_type != ET_CORE) {
|
|
||||||
fprintf(stderr, "ELF type %d, not a core dump.\n", ehdr->e_type);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* OK we can safely go with program headers */
|
|
||||||
phdr = (Elf64_Phdr *)(mem + ehdr->e_phoff);
|
|
||||||
|
|
||||||
for (i = 0; i < ehdr->e_phnum; i++) {
|
|
||||||
uint64_t size = phdr[i].p_filesz;
|
|
||||||
uint64_t offset = phdr[i].p_offset;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (phdr[i].p_type != PT_LOAD)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
//fprintf(stderr, "Scanning segment %d...\n", ehdr->e_phnum);
|
|
||||||
//fprintf(stderr, "\r%-5d: off=%lx va=%lx sz=%lx ", i, (long)offset, (long)phdr[i].p_vaddr, (long)size);
|
|
||||||
if (!size)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (size < 512) // minimum for a tar header
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* tar magic */
|
|
||||||
if (memcmp(mem + offset + 257, "ustar\0""00", 8) != 0)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* uid, gid */
|
|
||||||
if (memcmp(mem + offset + 108, "0000000\0""0000000\0", 16) != 0)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* link name */
|
|
||||||
if (memcmp(mem + offset + 157, "haproxy-libs-dump\0", 18) != 0)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* OK that's really it */
|
|
||||||
|
|
||||||
if (quiet)
|
|
||||||
printf("offset=%#lx size=%#lx\n", offset, size);
|
|
||||||
else
|
|
||||||
ret = (write(1, mem + offset, size) == size) ? 0 : 1;
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
//fprintf(stderr, "\r%75s\n", "\r");
|
|
||||||
fprintf(stderr, "libs archive not found. Was 'set-dumpable' set to 'libs' ?\n");
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
@ -1,19 +0,0 @@
|
|||||||
# show non-null memprofile entries with method, alloc/free counts/tot and caller
|
|
||||||
|
|
||||||
define memprof_dump
|
|
||||||
set $i = 0
|
|
||||||
set $meth={ "UNKN", "MALL", "CALL", "REAL", "STRD", "FREE", "P_AL", "P_FR", "STND", "VALL", "ALAL", "PALG", "MALG", "PVAL" }
|
|
||||||
while $i < sizeof(memprof_stats) / sizeof(memprof_stats[0])
|
|
||||||
if memprof_stats[$i].alloc_calls || memprof_stats[$i].free_calls
|
|
||||||
set $m = memprof_stats[$i].method
|
|
||||||
printf "m:%s ac:%u fc:%u at:%u ft:%u ", $meth[$m], \
|
|
||||||
memprof_stats[$i].alloc_calls, memprof_stats[$i].free_calls, \
|
|
||||||
memprof_stats[$i].alloc_tot, memprof_stats[$i].free_tot
|
|
||||||
output/a memprof_stats[$i].caller
|
|
||||||
printf "\n"
|
|
||||||
end
|
|
||||||
set $i = $i + 1
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
|
|
||||||
@ -1,141 +0,0 @@
|
|||||||
/*
|
|
||||||
* Find the post-mortem offset from a core dump
|
|
||||||
*
|
|
||||||
* Copyright (C) 2026 Willy Tarreau <w@1wt.eu>
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining
|
|
||||||
* a copy of this software and associated documentation files (the
|
|
||||||
* "Software"), to deal in the Software without restriction, including
|
|
||||||
* without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
* distribute, sublicense, and/or sell copies of the Software, and to
|
|
||||||
* permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
* the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be
|
|
||||||
* included in all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
|
||||||
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
|
||||||
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
|
||||||
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Note: builds with no option under glibc, and can be built as a minimal
|
|
||||||
* uploadable static executable using nolibc as well:
|
|
||||||
gcc -o pm-from-core -nostdinc -nostdlib -s -Os -static -fno-ident \
|
|
||||||
-fno-exceptions -fno-asynchronous-unwind-tables -fno-unwind-tables \
|
|
||||||
-Wl,--gc-sections,--orphan-handling=discard,-znoseparate-code \
|
|
||||||
-I /path/to/nolibc-sysroot/include pm-from-core.c
|
|
||||||
*/
|
|
||||||
#define _GNU_SOURCE
|
|
||||||
#include <sys/mman.h>
|
|
||||||
#include <sys/stat.h>
|
|
||||||
#include <elf.h>
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <stdint.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
#if defined(__GLIBC__)
|
|
||||||
# define my_memmem memmem
|
|
||||||
#else
|
|
||||||
void *my_memmem(const void *haystack, size_t haystacklen,
|
|
||||||
const void *needle, size_t needlelen)
|
|
||||||
{
|
|
||||||
while (haystacklen >= needlelen) {
|
|
||||||
if (!memcmp(haystack, needle, needlelen))
|
|
||||||
return (void*)haystack;
|
|
||||||
haystack++;
|
|
||||||
haystacklen--;
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define MAGIC "POST-MORTEM STARTS HERE+7654321\0"
|
|
||||||
|
|
||||||
int main(int argc, char **argv)
|
|
||||||
{
|
|
||||||
Elf64_Ehdr *ehdr;
|
|
||||||
Elf64_Phdr *phdr;
|
|
||||||
struct stat st;
|
|
||||||
uint8_t *mem;
|
|
||||||
int i, fd;
|
|
||||||
|
|
||||||
if (argc < 2) {
|
|
||||||
printf("Usage: %s <core_file>\n", argv[0]);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
fd = open(argv[1], O_RDONLY);
|
|
||||||
|
|
||||||
/* Let's just map the core dump as an ELF header */
|
|
||||||
fstat(fd, &st);
|
|
||||||
mem = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
|
|
||||||
if (mem == MAP_FAILED) {
|
|
||||||
perror("mmap()");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* get the program headers */
|
|
||||||
ehdr = (Elf64_Ehdr *)mem;
|
|
||||||
|
|
||||||
/* check that it's really a core. Should be "\x7fELF" */
|
|
||||||
if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) {
|
|
||||||
fprintf(stderr, "ELF magic not found.\n");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ehdr->e_ident[EI_CLASS] != ELFCLASS64) {
|
|
||||||
fprintf(stderr, "Only 64-bit ELF supported.\n");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ehdr->e_type != ET_CORE) {
|
|
||||||
fprintf(stderr, "ELF type %d, not a core dump.\n", ehdr->e_type);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* OK we can safely go with program headers */
|
|
||||||
phdr = (Elf64_Phdr *)(mem + ehdr->e_phoff);
|
|
||||||
|
|
||||||
for (i = 0; i < ehdr->e_phnum; i++) {
|
|
||||||
uint64_t size = phdr[i].p_filesz;
|
|
||||||
uint64_t offset = phdr[i].p_offset;
|
|
||||||
uint64_t vaddr = phdr[i].p_vaddr;
|
|
||||||
uint64_t found_ofs;
|
|
||||||
uint8_t *found;
|
|
||||||
|
|
||||||
if (phdr[i].p_type != PT_LOAD)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
//printf("Scanning segment %d...\n", ehdr->e_phnum);
|
|
||||||
//printf("\r%-5d: off=%lx va=%lx sz=%lx ", i, (long)offset, (long)vaddr, (long)size);
|
|
||||||
if (!size)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (size >= 1048576) // don't scan large segments
|
|
||||||
continue;
|
|
||||||
|
|
||||||
found = my_memmem(mem + offset, size, MAGIC, sizeof(MAGIC) - 1);
|
|
||||||
if (!found)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
found_ofs = found - (mem + offset);
|
|
||||||
|
|
||||||
printf("Found post-mortem magic in segment %d:\n", i);
|
|
||||||
printf(" Core File Offset: 0x%lx (0x%lx + 0x%lx)\n", offset + found_ofs, offset, found_ofs);
|
|
||||||
printf(" Runtime VAddr: 0x%lx (0x%lx + 0x%lx)\n", vaddr + found_ofs, vaddr, found_ofs);
|
|
||||||
printf(" Segment Size: 0x%lx\n", size);
|
|
||||||
printf("\nIn gdb, copy-paste this line:\n\n pm_init 0x%lx\n\n", vaddr + found_ofs);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
//printf("\r%75s\n", "\r");
|
|
||||||
printf("post-mortem magic not found\n");
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
@ -14,8 +14,8 @@ define pools_dump
|
|||||||
set $idx=$idx + 1
|
set $idx=$idx + 1
|
||||||
end
|
end
|
||||||
|
|
||||||
set $mem = (unsigned long)$total * $e->size
|
set $mem = $total * $e->size
|
||||||
printf "list=%#lx pool_head=%p name=%s size=%u alloc=%u used=%u mem=%lu\n", $p, $e, $e->name, $e->size, $total, $used, $mem
|
printf "list=%#lx pool_head=%p name=%s size=%u alloc=%u used=%u mem=%u\n", $p, $e, $e->name, $e->size, $total, $used, $mem
|
||||||
set $p = *(void **)$p
|
set $p = *(void **)$p
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
@ -1,247 +0,0 @@
|
|||||||
-- This is an HTTP/2 tracer for a TCP proxy. It will decode the frames that are
|
|
||||||
-- exchanged between the client and the server and indicate their direction,
|
|
||||||
-- types, flags and lengths. Lines are prefixed with a connection number modulo
|
|
||||||
-- 4096 that allows to sort out multiplexed exchanges. In order to use this,
|
|
||||||
-- simply load this file in the global section and use it from a TCP proxy:
|
|
||||||
--
|
|
||||||
-- global
|
|
||||||
-- lua-load "dev/h2/h2-tracer.lua"
|
|
||||||
--
|
|
||||||
-- listen h2_sniffer
|
|
||||||
-- mode tcp
|
|
||||||
-- bind :8002
|
|
||||||
-- filter lua.h2-tracer #hex
|
|
||||||
-- server s1 127.0.0.1:8003
|
|
||||||
--
|
|
||||||
|
|
||||||
-- define the decoder's class here
|
|
||||||
Dec = {}
|
|
||||||
Dec.id = "Lua H2 tracer"
|
|
||||||
Dec.flags = 0
|
|
||||||
Dec.__index = Dec
|
|
||||||
Dec.args = {} -- args passed by the filter's declaration
|
|
||||||
Dec.cid = 0 -- next connection ID
|
|
||||||
|
|
||||||
-- prefix to indent responses
|
|
||||||
res_pfx = " | "
|
|
||||||
|
|
||||||
-- H2 frame types
|
|
||||||
h2ft = {
|
|
||||||
[0] = "DATA",
|
|
||||||
[1] = "HEADERS",
|
|
||||||
[2] = "PRIORITY",
|
|
||||||
[3] = "RST_STREAM",
|
|
||||||
[4] = "SETTINGS",
|
|
||||||
[5] = "PUSH_PROMISE",
|
|
||||||
[6] = "PING",
|
|
||||||
[7] = "GOAWAY",
|
|
||||||
[8] = "WINDOW_UPDATE",
|
|
||||||
[9] = "CONTINUATION",
|
|
||||||
}
|
|
||||||
|
|
||||||
h2ff = {
|
|
||||||
[0] = { [0] = "ES", [3] = "PADDED" }, -- data
|
|
||||||
[1] = { [0] = "ES", [2] = "EH", [3] = "PADDED", [5] = "PRIORITY" }, -- headers
|
|
||||||
[2] = { }, -- priority
|
|
||||||
[3] = { }, -- rst_stream
|
|
||||||
[4] = { [0] = "ACK" }, -- settings
|
|
||||||
[5] = { [2] = "EH", [3] = "PADDED" }, -- push_promise
|
|
||||||
[6] = { [0] = "ACK" }, -- ping
|
|
||||||
[7] = { }, -- goaway
|
|
||||||
[8] = { }, -- window_update
|
|
||||||
[9] = { [2] = "EH" }, -- continuation
|
|
||||||
}
|
|
||||||
|
|
||||||
function Dec:new()
|
|
||||||
local dec = {}
|
|
||||||
|
|
||||||
setmetatable(dec, Dec)
|
|
||||||
dec.do_hex = false
|
|
||||||
if (Dec.args[1] == "hex") then
|
|
||||||
dec.do_hex = true
|
|
||||||
end
|
|
||||||
|
|
||||||
Dec.cid = Dec.cid+1
|
|
||||||
-- mix the thread number when multithreading.
|
|
||||||
dec.cid = Dec.cid + 64 * core.thread
|
|
||||||
|
|
||||||
-- state per dir. [1]=req [2]=res
|
|
||||||
dec.st = {
|
|
||||||
[1] = {
|
|
||||||
hdr = { 0, 0, 0, 0, 0, 0, 0, 0, 0 },
|
|
||||||
fofs = 0,
|
|
||||||
flen = 0,
|
|
||||||
ftyp = 0,
|
|
||||||
fflg = 0,
|
|
||||||
sid = 0,
|
|
||||||
tot = 0,
|
|
||||||
},
|
|
||||||
[2] = {
|
|
||||||
hdr = { 0, 0, 0, 0, 0, 0, 0, 0, 0 },
|
|
||||||
fofs = 0,
|
|
||||||
flen = 0,
|
|
||||||
ftyp = 0,
|
|
||||||
fflg = 0,
|
|
||||||
sid = 0,
|
|
||||||
tot = 0,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return dec
|
|
||||||
end
|
|
||||||
|
|
||||||
function Dec:start_analyze(txn, chn)
|
|
||||||
if chn:is_resp() then
|
|
||||||
io.write(string.format("[%03x] ", self.cid % 4096) .. res_pfx .. "### res start\n")
|
|
||||||
else
|
|
||||||
io.write(string.format("[%03x] ", self.cid % 4096) .. "### req start\n")
|
|
||||||
end
|
|
||||||
filter.register_data_filter(self, chn)
|
|
||||||
end
|
|
||||||
|
|
||||||
function Dec:end_analyze(txn, chn)
|
|
||||||
if chn:is_resp() then
|
|
||||||
io.write(string.format("[%03x] ", self.cid % 4096) .. res_pfx .. "### res end: " .. self.st[2].tot .. " bytes total\n")
|
|
||||||
else
|
|
||||||
io.write(string.format("[%03x] ", self.cid % 4096) .. "### req end: " ..self.st[1].tot.. " bytes total\n")
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function Dec:tcp_payload(txn, chn)
|
|
||||||
local data = { }
|
|
||||||
local dofs = 1
|
|
||||||
local pfx = ""
|
|
||||||
local dir = 1
|
|
||||||
local sofs = 0
|
|
||||||
local ft = ""
|
|
||||||
local ff = ""
|
|
||||||
|
|
||||||
if chn:is_resp() then
|
|
||||||
pfx = res_pfx
|
|
||||||
dir = 2
|
|
||||||
end
|
|
||||||
|
|
||||||
pfx = string.format("[%03x] ", self.cid % 4096) .. pfx
|
|
||||||
|
|
||||||
-- stream offset before processing
|
|
||||||
sofs = self.st[dir].tot
|
|
||||||
|
|
||||||
if (chn:input() > 0) then
|
|
||||||
data = chn:data()
|
|
||||||
self.st[dir].tot = self.st[dir].tot + chn:input()
|
|
||||||
end
|
|
||||||
|
|
||||||
if (chn:input() > 0 and self.do_hex ~= false) then
|
|
||||||
io.write("\n" .. pfx .. "Hex:\n")
|
|
||||||
for i = 1, #data do
|
|
||||||
if ((i & 7) == 1) then io.write(pfx) end
|
|
||||||
io.write(string.format("0x%02x ", data:sub(i, i):byte()))
|
|
||||||
if ((i & 7) == 0 or i == #data) then io.write("\n") end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
-- start at byte 1 in the <data> string
|
|
||||||
dofs = 1
|
|
||||||
|
|
||||||
-- the first 24 bytes are expected to be an H2 preface on the request
|
|
||||||
if (dir == 1 and sofs < 24) then
|
|
||||||
-- let's not check it for now
|
|
||||||
local bytes = self.st[dir].tot - sofs
|
|
||||||
if (sofs + self.st[dir].tot >= 24) then
|
|
||||||
-- skip what was missing from the preface
|
|
||||||
dofs = dofs + 24 - sofs
|
|
||||||
sofs = 24
|
|
||||||
io.write(pfx .. "[PREFACE len=24]\n")
|
|
||||||
else
|
|
||||||
-- consume more preface bytes
|
|
||||||
sofs = sofs + self.st[dir].tot
|
|
||||||
return
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
-- parse contents as long as there are pending data
|
|
||||||
|
|
||||||
while true do
|
|
||||||
-- check if we need to consume data from the current frame
|
|
||||||
-- flen is the number of bytes left before the frame's end.
|
|
||||||
if (self.st[dir].flen > 0) then
|
|
||||||
if dofs > #data then return end -- missing data
|
|
||||||
if (#data - dofs + 1 < self.st[dir].flen) then
|
|
||||||
-- insufficient data
|
|
||||||
self.st[dir].flen = self.st[dir].flen - (#data - dofs + 1)
|
|
||||||
io.write(pfx .. string.format("%32s\n", "... -" .. (#data - dofs + 1) .. " = " .. self.st[dir].flen))
|
|
||||||
dofs = #data + 1
|
|
||||||
return
|
|
||||||
else
|
|
||||||
-- enough data to finish
|
|
||||||
if (dofs == 1) then
|
|
||||||
-- only print a partial size if the frame was interrupted
|
|
||||||
io.write(pfx .. string.format("%32s\n", "... -" .. self.st[dir].flen .. " = 0"))
|
|
||||||
end
|
|
||||||
dofs = dofs + self.st[dir].flen
|
|
||||||
self.st[dir].flen = 0
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
-- here, flen = 0, we're at the beginning of a new frame --
|
|
||||||
|
|
||||||
-- read possibly missing header bytes until dec.fofs == 9
|
|
||||||
while self.st[dir].fofs < 9 do
|
|
||||||
if dofs > #data then return end -- missing data
|
|
||||||
self.st[dir].hdr[self.st[dir].fofs + 1] = data:sub(dofs, dofs):byte()
|
|
||||||
dofs = dofs + 1
|
|
||||||
self.st[dir].fofs = self.st[dir].fofs + 1
|
|
||||||
end
|
|
||||||
|
|
||||||
-- we have a full frame header here
|
|
||||||
if (self.do_hex ~= false) then
|
|
||||||
io.write("\n" .. pfx .. string.format("hdr=%02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
|
|
||||||
self.st[dir].hdr[1], self.st[dir].hdr[2], self.st[dir].hdr[3],
|
|
||||||
self.st[dir].hdr[4], self.st[dir].hdr[5], self.st[dir].hdr[6],
|
|
||||||
self.st[dir].hdr[7], self.st[dir].hdr[8], self.st[dir].hdr[9]))
|
|
||||||
end
|
|
||||||
|
|
||||||
-- we have a full frame header, we'll be ready
|
|
||||||
-- for a new frame once the data is gone
|
|
||||||
self.st[dir].flen = self.st[dir].hdr[1] * 65536 +
|
|
||||||
self.st[dir].hdr[2] * 256 +
|
|
||||||
self.st[dir].hdr[3]
|
|
||||||
self.st[dir].ftyp = self.st[dir].hdr[4]
|
|
||||||
self.st[dir].fflg = self.st[dir].hdr[5]
|
|
||||||
self.st[dir].sid = self.st[dir].hdr[6] * 16777216 +
|
|
||||||
self.st[dir].hdr[7] * 65536 +
|
|
||||||
self.st[dir].hdr[8] * 256 +
|
|
||||||
self.st[dir].hdr[9]
|
|
||||||
self.st[dir].fofs = 0
|
|
||||||
|
|
||||||
-- decode frame type
|
|
||||||
if self.st[dir].ftyp <= 9 then
|
|
||||||
ft = h2ft[self.st[dir].ftyp]
|
|
||||||
else
|
|
||||||
ft = string.format("TYPE_0x%02x\n", self.st[dir].ftyp)
|
|
||||||
end
|
|
||||||
|
|
||||||
-- decode frame flags for frame type <ftyp>
|
|
||||||
ff = ""
|
|
||||||
for i = 7, 0, -1 do
|
|
||||||
if (((self.st[dir].fflg >> i) & 1) ~= 0) then
|
|
||||||
if self.st[dir].ftyp <= 9 and h2ff[self.st[dir].ftyp][i] ~= nil then
|
|
||||||
ff = ff .. ((ff == "") and "" or "+")
|
|
||||||
ff = ff .. h2ff[self.st[dir].ftyp][i]
|
|
||||||
else
|
|
||||||
ff = ff .. ((ff == "") and "" or "+")
|
|
||||||
ff = ff .. string.format("0x%02x", 1<<i)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
io.write(pfx .. string.format("[%s %ssid=%u len=%u (bytes=%u)]\n",
|
|
||||||
ft, (ff == "") and "" or ff .. " ",
|
|
||||||
self.st[dir].sid, self.st[dir].flen,
|
|
||||||
(#data - dofs + 1)))
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
core.register_filter("h2-tracer", Dec, function(dec, args)
|
|
||||||
Dec.args = args
|
|
||||||
return dec
|
|
||||||
end)
|
|
||||||
@ -59,9 +59,9 @@ struct ring_v2 {
|
|||||||
struct ring_v2a {
|
struct ring_v2a {
|
||||||
size_t size; // storage size
|
size_t size; // storage size
|
||||||
size_t rsvd; // header length (used for file-backed maps)
|
size_t rsvd; // header length (used for file-backed maps)
|
||||||
size_t tail ALIGNED(64); // storage tail
|
size_t tail __attribute__((aligned(64))); // storage tail
|
||||||
size_t head ALIGNED(64); // storage head
|
size_t head __attribute__((aligned(64))); // storage head
|
||||||
char area[0] ALIGNED(64); // storage area begins immediately here
|
char area[0] __attribute__((aligned(64))); // storage area begins immediately here
|
||||||
};
|
};
|
||||||
|
|
||||||
/* display the message and exit with the code */
|
/* display the message and exit with the code */
|
||||||
|
|||||||
@ -1,31 +0,0 @@
|
|||||||
include ../../include/make/verbose.mk
|
|
||||||
|
|
||||||
CC = cc
|
|
||||||
OPTIMIZE = -O2 -g
|
|
||||||
DEFINE =
|
|
||||||
INCLUDE =
|
|
||||||
OBJS = ncpu.so ncpu
|
|
||||||
OBJDUMP = objdump
|
|
||||||
|
|
||||||
all: $(OBJS)
|
|
||||||
|
|
||||||
%.o: %.c
|
|
||||||
$(cmd_CC) $(OPTIMIZE) $(DEFINE) $(INCLUDE) -shared -fPIC -c -o $@ $^
|
|
||||||
|
|
||||||
%.so: %.o
|
|
||||||
$(cmd_CC) -pie -o $@ $^
|
|
||||||
$(Q)rm -f $^
|
|
||||||
|
|
||||||
%: %.so
|
|
||||||
$(call qinfo, PATCHING)set -- $$($(OBJDUMP) -j .dynamic -h $^ | fgrep .dynamic); \
|
|
||||||
ofs=$$6; size=$$3; \
|
|
||||||
dd status=none bs=1 count=$$((0x$$ofs)) if=$^ of=$^-p1; \
|
|
||||||
dd status=none bs=1 skip=$$((0x$$ofs)) count=$$((0x$$size)) if=$^ of=$^-p2; \
|
|
||||||
dd status=none bs=1 skip=$$((0x$$ofs+0x$$size)) if=$^ of=$^-p3; \
|
|
||||||
sed -e 's,\xfb\xff\xff\x6f\x00\x00\x00\x00\x00\x00\x00\x08,\xfb\xff\xff\x6f\x00\x00\x00\x00\x00\x00\x00\x00,g' < $^-p2 > $^-p2-patched; \
|
|
||||||
cat $^-p1 $^-p2-patched $^-p3 > "$@"
|
|
||||||
$(Q)rm -f $^-p*
|
|
||||||
$(Q)chmod 755 "$@"
|
|
||||||
|
|
||||||
clean:
|
|
||||||
rm -f $(OBJS) *.[oas] *.so-* *~
|
|
||||||
136
dev/ncpu/ncpu.c
136
dev/ncpu/ncpu.c
@ -1,136 +0,0 @@
|
|||||||
#define _GNU_SOURCE
|
|
||||||
#include <errno.h>
|
|
||||||
#include <limits.h>
|
|
||||||
#include <sched.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
|
|
||||||
// gcc -fPIC -shared -O2 -o ncpu{.so,.c}
|
|
||||||
// NCPU=16 LD_PRELOAD=$PWD/ncpu.so command args...
|
|
||||||
|
|
||||||
static char prog_full_path[PATH_MAX];
|
|
||||||
|
|
||||||
long sysconf(int name)
|
|
||||||
{
|
|
||||||
if (name == _SC_NPROCESSORS_ONLN ||
|
|
||||||
name == _SC_NPROCESSORS_CONF) {
|
|
||||||
const char *ncpu = getenv("NCPU");
|
|
||||||
int n;
|
|
||||||
|
|
||||||
n = ncpu ? atoi(ncpu) : CPU_SETSIZE;
|
|
||||||
if (n < 0 || n > CPU_SETSIZE)
|
|
||||||
n = CPU_SETSIZE;
|
|
||||||
return n;
|
|
||||||
}
|
|
||||||
errno = EINVAL;
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* return a cpu_set having the first $NCPU set */
|
|
||||||
int sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask)
|
|
||||||
{
|
|
||||||
const char *ncpu;
|
|
||||||
int i, n;
|
|
||||||
|
|
||||||
CPU_ZERO_S(cpusetsize, mask);
|
|
||||||
|
|
||||||
ncpu = getenv("NCPU");
|
|
||||||
n = ncpu ? atoi(ncpu) : CPU_SETSIZE;
|
|
||||||
if (n < 0 || n > CPU_SETSIZE)
|
|
||||||
n = CPU_SETSIZE;
|
|
||||||
|
|
||||||
for (i = 0; i < n; i++)
|
|
||||||
CPU_SET_S(i, cpusetsize, mask);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* silently ignore the operation */
|
|
||||||
int sched_setaffinity(pid_t pid, size_t cpusetsize, const cpu_set_t *mask)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void usage(const char *argv0)
|
|
||||||
{
|
|
||||||
fprintf(stderr,
|
|
||||||
"Usage: %s [-n ncpu] [cmd [args...]]\n"
|
|
||||||
" Will install itself in LD_PRELOAD before calling <cmd> with args.\n"
|
|
||||||
" The number of CPUs may also come from variable NCPU or default to %d.\n"
|
|
||||||
"\n"
|
|
||||||
"",
|
|
||||||
argv0, CPU_SETSIZE);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Called in wrapper mode, no longer supported on recent glibc */
|
|
||||||
int main(int argc, char **argv)
|
|
||||||
{
|
|
||||||
const char *argv0 = argv[0];
|
|
||||||
char *preload;
|
|
||||||
int plen;
|
|
||||||
|
|
||||||
prog_full_path[0] = 0;
|
|
||||||
plen = readlink("/proc/self/exe", prog_full_path, sizeof(prog_full_path) - 1);
|
|
||||||
if (plen != -1)
|
|
||||||
prog_full_path[plen] = 0;
|
|
||||||
else
|
|
||||||
plen = snprintf(prog_full_path, sizeof(prog_full_path), "%s", argv[0]);
|
|
||||||
|
|
||||||
while (1) {
|
|
||||||
argc--;
|
|
||||||
argv++;
|
|
||||||
|
|
||||||
if (argc < 1)
|
|
||||||
usage(argv0);
|
|
||||||
|
|
||||||
if (strcmp(argv[0], "--") == 0) {
|
|
||||||
argc--;
|
|
||||||
argv++;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
else if (strcmp(argv[0], "-n") == 0) {
|
|
||||||
if (argc < 2)
|
|
||||||
usage(argv0);
|
|
||||||
|
|
||||||
if (setenv("NCPU", argv[1], 1) != 0)
|
|
||||||
usage(argv0);
|
|
||||||
argc--;
|
|
||||||
argv++;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
/* unknown arg, that's the command */
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* here the only args left start with the cmd name */
|
|
||||||
|
|
||||||
/* now we'll concatenate ourselves at the end of the LD_PRELOAD variable */
|
|
||||||
preload = getenv("LD_PRELOAD");
|
|
||||||
if (preload) {
|
|
||||||
int olen = strlen(preload);
|
|
||||||
preload = realloc(preload, olen + 1 + plen + 1);
|
|
||||||
if (!preload) {
|
|
||||||
perror("realloc");
|
|
||||||
exit(2);
|
|
||||||
}
|
|
||||||
preload[olen] = ' ';
|
|
||||||
memcpy(preload + olen + 1, prog_full_path, plen);
|
|
||||||
preload[olen + 1 + plen] = 0;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
preload = prog_full_path;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (setenv("LD_PRELOAD", preload, 1) < 0) {
|
|
||||||
perror("setenv");
|
|
||||||
exit(2);
|
|
||||||
}
|
|
||||||
|
|
||||||
execvp(*argv, argv);
|
|
||||||
perror("execve");
|
|
||||||
exit(2);
|
|
||||||
}
|
|
||||||
@ -86,7 +86,7 @@ maintenance model and what the user wants is passed, then the LLM is invited to
|
|||||||
provide its opinion on the need for a backport and an explanation of the reason
|
provide its opinion on the need for a backport and an explanation of the reason
|
||||||
for its choice. This often helps the user to find a quick summary about the
|
for its choice. This often helps the user to find a quick summary about the
|
||||||
patch. All these outputs are then converted to a long HTML page with colors and
|
patch. All these outputs are then converted to a long HTML page with colors and
|
||||||
radio buttons, where patches are preselected based on this classification,
|
radio buttons, where patches are pre-selected based on this classification,
|
||||||
that the user can consult and adjust, read the commits if needed, and the
|
that the user can consult and adjust, read the commits if needed, and the
|
||||||
selected patches finally provide some copy-pastable commands in a text-area to
|
selected patches finally provide some copy-pastable commands in a text-area to
|
||||||
select commit IDs to work on, typically in a form that's suitable for a simple
|
select commit IDs to work on, typically in a form that's suitable for a simple
|
||||||
|
|||||||
@ -1,70 +0,0 @@
|
|||||||
BEGININPUT
|
|
||||||
BEGINCONTEXT
|
|
||||||
|
|
||||||
HAProxy's development cycle consists in one development branch, and multiple
|
|
||||||
maintenance branches.
|
|
||||||
|
|
||||||
All the development is made into the development branch exclusively. This
|
|
||||||
includes mostly new features, doc updates, cleanups and or course, fixes.
|
|
||||||
|
|
||||||
The maintenance branches, also called stable branches, never see any
|
|
||||||
development, and only receive ultra-safe fixes for bugs that affect them,
|
|
||||||
that are picked from the development branch.
|
|
||||||
|
|
||||||
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
|
|
||||||
release, the development branch enters maintenance and a new development branch
|
|
||||||
is created with a new, higher version. The current development branch is
|
|
||||||
3.2-dev, and maintenance branches are 3.1 and below.
|
|
||||||
|
|
||||||
Fixes created in the development branch for issues that were introduced in an
|
|
||||||
earlier branch are applied in descending order to each and every version till
|
|
||||||
that branch that introduced the issue: 3.1 first, then 3.0, then 2.9, then 2.8
|
|
||||||
and so on. This operation is called "backporting". A fix for an issue is never
|
|
||||||
backported beyond the branch that introduced the issue. An important point is
|
|
||||||
that the project maintainers really aim at zero regression in maintenance
|
|
||||||
branches, so they're never willing to take any risk backporting patches that
|
|
||||||
are not deemed strictly necessary.
|
|
||||||
|
|
||||||
Fixes consist of patches managed using the Git version control tool and are
|
|
||||||
identified by a Git commit ID and a commit message. For this reason we
|
|
||||||
indistinctly talk about backporting fixes, commits, or patches; all mean the
|
|
||||||
same thing. When mentioning commit IDs, developers always use a short form
|
|
||||||
made of the first 8 characters only, and expect the AI assistant to do the
|
|
||||||
same.
|
|
||||||
|
|
||||||
It seldom happens that some fixes depend on changes that were brought by other
|
|
||||||
patches that were not in some branches and that will need to be backported as
|
|
||||||
well for the fix to work. In this case, such information is explicitly provided
|
|
||||||
in the commit message by the patch's author in natural language.
|
|
||||||
|
|
||||||
Developers are serious and always indicate if a patch needs to be backported.
|
|
||||||
Sometimes they omit the exact target branch, or they will say that the patch is
|
|
||||||
"needed" in some older branch, but it means the same. If a commit message
|
|
||||||
doesn't mention any backport instructions, it means that the commit does not
|
|
||||||
have to be backported. And patches that are not strictly bug fixes nor doc
|
|
||||||
improvements are normally not backported. For example, fixes for design
|
|
||||||
limitations, architectural improvements and performance optimizations are
|
|
||||||
considered too risky for a backport. Finally, all bug fixes are tagged as
|
|
||||||
"BUG" at the beginning of their subject line. Patches that are not tagged as
|
|
||||||
such are not bugs, and must never be backported unless their commit message
|
|
||||||
explicitly requests so.
|
|
||||||
|
|
||||||
ENDCONTEXT
|
|
||||||
|
|
||||||
A developer is reviewing the development branch, trying to spot which commits
|
|
||||||
need to be backported to maintenance branches. This person is already expert
|
|
||||||
on HAProxy and everything related to Git, patch management, and the risks
|
|
||||||
associated with backports, so he doesn't want to be told how to proceed nor to
|
|
||||||
review the contents of the patch.
|
|
||||||
|
|
||||||
The goal for this developer is to get some help from the AI assistant to save
|
|
||||||
some precious time on this tedious review work. In order to do a better job, he
|
|
||||||
needs an accurate summary of the information and instructions found in each
|
|
||||||
commit message. Specifically he needs to figure if the patch fixes a problem
|
|
||||||
affecting an older branch or not, if it needs to be backported, if so to which
|
|
||||||
branches, and if other patches need to be backported along with it.
|
|
||||||
|
|
||||||
The indented text block below after an "id" line and starting with a Subject line
|
|
||||||
is a commit message from the HAProxy development branch that describes a patch
|
|
||||||
applied to that branch, starting with its subject line, please read it carefully.
|
|
||||||
|
|
||||||
@ -1,29 +0,0 @@
|
|||||||
|
|
||||||
ENDINPUT
|
|
||||||
BEGININSTRUCTION
|
|
||||||
|
|
||||||
You are an AI assistant that follows instruction extremely well. Help as much
|
|
||||||
as you can, responding to a single question using a single response.
|
|
||||||
|
|
||||||
The developer wants to know if he needs to backport the patch above to fix
|
|
||||||
maintenance branches, for which branches, and what possible dependencies might
|
|
||||||
be mentioned in the commit message. Carefully study the commit message and its
|
|
||||||
backporting instructions if any (otherwise it should probably not be backported),
|
|
||||||
then provide a very concise and short summary that will help the developer decide
|
|
||||||
to backport it, or simply to skip it.
|
|
||||||
|
|
||||||
Start by explaining in one or two sentences what you recommend for this one and why.
|
|
||||||
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
|
|
||||||
where X is a single word among:
|
|
||||||
- "yes", if you recommend to backport the patch right now either because
|
|
||||||
it explicitly states this or because it's a fix for a bug that affects
|
|
||||||
a maintenance branch (3.1 or lower);
|
|
||||||
- "wait", if this patch explicitly mentions that it must be backported, but
|
|
||||||
only after waiting some time.
|
|
||||||
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
|
|
||||||
lack of explicit backport instructions, or it's just an improvement);
|
|
||||||
- "uncertain" otherwise for cases not covered above
|
|
||||||
|
|
||||||
ENDINSTRUCTION
|
|
||||||
|
|
||||||
Explanation:
|
|
||||||
@ -1,70 +0,0 @@
|
|||||||
BEGININPUT
|
|
||||||
BEGINCONTEXT
|
|
||||||
|
|
||||||
HAProxy's development cycle consists in one development branch, and multiple
|
|
||||||
maintenance branches.
|
|
||||||
|
|
||||||
All the development is made into the development branch exclusively. This
|
|
||||||
includes mostly new features, doc updates, cleanups and or course, fixes.
|
|
||||||
|
|
||||||
The maintenance branches, also called stable branches, never see any
|
|
||||||
development, and only receive ultra-safe fixes for bugs that affect them,
|
|
||||||
that are picked from the development branch.
|
|
||||||
|
|
||||||
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
|
|
||||||
release, the development branch enters maintenance and a new development branch
|
|
||||||
is created with a new, higher version. The current development branch is
|
|
||||||
3.3-dev, and maintenance branches are 3.2 and below.
|
|
||||||
|
|
||||||
Fixes created in the development branch for issues that were introduced in an
|
|
||||||
earlier branch are applied in descending order to each and every version till
|
|
||||||
that branch that introduced the issue: 3.2 first, then 3.1, then 3.0, then 2.9
|
|
||||||
and so on. This operation is called "backporting". A fix for an issue is never
|
|
||||||
backported beyond the branch that introduced the issue. An important point is
|
|
||||||
that the project maintainers really aim at zero regression in maintenance
|
|
||||||
branches, so they're never willing to take any risk backporting patches that
|
|
||||||
are not deemed strictly necessary.
|
|
||||||
|
|
||||||
Fixes consist of patches managed using the Git version control tool and are
|
|
||||||
identified by a Git commit ID and a commit message. For this reason we
|
|
||||||
indistinctly talk about backporting fixes, commits, or patches; all mean the
|
|
||||||
same thing. When mentioning commit IDs, developers always use a short form
|
|
||||||
made of the first 8 characters only, and expect the AI assistant to do the
|
|
||||||
same.
|
|
||||||
|
|
||||||
It seldom happens that some fixes depend on changes that were brought by other
|
|
||||||
patches that were not in some branches and that will need to be backported as
|
|
||||||
well for the fix to work. In this case, such information is explicitly provided
|
|
||||||
in the commit message by the patch's author in natural language.
|
|
||||||
|
|
||||||
Developers are serious and always indicate if a patch needs to be backported.
|
|
||||||
Sometimes they omit the exact target branch, or they will say that the patch is
|
|
||||||
"needed" in some older branch, but it means the same. If a commit message
|
|
||||||
doesn't mention any backport instructions, it means that the commit does not
|
|
||||||
have to be backported. And patches that are not strictly bug fixes nor doc
|
|
||||||
improvements are normally not backported. For example, fixes for design
|
|
||||||
limitations, architectural improvements and performance optimizations are
|
|
||||||
considered too risky for a backport. Finally, all bug fixes are tagged as
|
|
||||||
"BUG" at the beginning of their subject line. Patches that are not tagged as
|
|
||||||
such are not bugs, and must never be backported unless their commit message
|
|
||||||
explicitly requests so.
|
|
||||||
|
|
||||||
ENDCONTEXT
|
|
||||||
|
|
||||||
A developer is reviewing the development branch, trying to spot which commits
|
|
||||||
need to be backported to maintenance branches. This person is already expert
|
|
||||||
on HAProxy and everything related to Git, patch management, and the risks
|
|
||||||
associated with backports, so he doesn't want to be told how to proceed nor to
|
|
||||||
review the contents of the patch.
|
|
||||||
|
|
||||||
The goal for this developer is to get some help from the AI assistant to save
|
|
||||||
some precious time on this tedious review work. In order to do a better job, he
|
|
||||||
needs an accurate summary of the information and instructions found in each
|
|
||||||
commit message. Specifically he needs to figure if the patch fixes a problem
|
|
||||||
affecting an older branch or not, if it needs to be backported, if so to which
|
|
||||||
branches, and if other patches need to be backported along with it.
|
|
||||||
|
|
||||||
The indented text block below after an "id" line and starting with a Subject line
|
|
||||||
is a commit message from the HAProxy development branch that describes a patch
|
|
||||||
applied to that branch, starting with its subject line, please read it carefully.
|
|
||||||
|
|
||||||
@ -1,29 +0,0 @@
|
|||||||
|
|
||||||
ENDINPUT
|
|
||||||
BEGININSTRUCTION
|
|
||||||
|
|
||||||
You are an AI assistant that follows instruction extremely well. Help as much
|
|
||||||
as you can, responding to a single question using a single response.
|
|
||||||
|
|
||||||
The developer wants to know if he needs to backport the patch above to fix
|
|
||||||
maintenance branches, for which branches, and what possible dependencies might
|
|
||||||
be mentioned in the commit message. Carefully study the commit message and its
|
|
||||||
backporting instructions if any (otherwise it should probably not be backported),
|
|
||||||
then provide a very concise and short summary that will help the developer decide
|
|
||||||
to backport it, or simply to skip it.
|
|
||||||
|
|
||||||
Start by explaining in one or two sentences what you recommend for this one and why.
|
|
||||||
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
|
|
||||||
where X is a single word among:
|
|
||||||
- "yes", if you recommend to backport the patch right now either because
|
|
||||||
it explicitly states this or because it's a fix for a bug that affects
|
|
||||||
a maintenance branch (3.2 or lower);
|
|
||||||
- "wait", if this patch explicitly mentions that it must be backported, but
|
|
||||||
only after waiting some time.
|
|
||||||
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
|
|
||||||
lack of explicit backport instructions, or it's just an improvement);
|
|
||||||
- "uncertain" otherwise for cases not covered above
|
|
||||||
|
|
||||||
ENDINSTRUCTION
|
|
||||||
|
|
||||||
Explanation:
|
|
||||||
@ -1,70 +0,0 @@
|
|||||||
BEGININPUT
|
|
||||||
BEGINCONTEXT
|
|
||||||
|
|
||||||
HAProxy's development cycle consists in one development branch, and multiple
|
|
||||||
maintenance branches.
|
|
||||||
|
|
||||||
All the development is made into the development branch exclusively. This
|
|
||||||
includes mostly new features, doc updates, cleanups and or course, fixes.
|
|
||||||
|
|
||||||
The maintenance branches, also called stable branches, never see any
|
|
||||||
development, and only receive ultra-safe fixes for bugs that affect them,
|
|
||||||
that are picked from the development branch.
|
|
||||||
|
|
||||||
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
|
|
||||||
release, the development branch enters maintenance and a new development branch
|
|
||||||
is created with a new, higher version. The current development branch is
|
|
||||||
3.4-dev, and maintenance branches are 3.3 and below.
|
|
||||||
|
|
||||||
Fixes created in the development branch for issues that were introduced in an
|
|
||||||
earlier branch are applied in descending order to each and every version till
|
|
||||||
that branch that introduced the issue: 3.3 first, then 3.2, then 3.1, then 3.0
|
|
||||||
and so on. This operation is called "backporting". A fix for an issue is never
|
|
||||||
backported beyond the branch that introduced the issue. An important point is
|
|
||||||
that the project maintainers really aim at zero regression in maintenance
|
|
||||||
branches, so they're never willing to take any risk backporting patches that
|
|
||||||
are not deemed strictly necessary.
|
|
||||||
|
|
||||||
Fixes consist of patches managed using the Git version control tool and are
|
|
||||||
identified by a Git commit ID and a commit message. For this reason we
|
|
||||||
indistinctly talk about backporting fixes, commits, or patches; all mean the
|
|
||||||
same thing. When mentioning commit IDs, developers always use a short form
|
|
||||||
made of the first 8 characters only, and expect the AI assistant to do the
|
|
||||||
same.
|
|
||||||
|
|
||||||
It seldom happens that some fixes depend on changes that were brought by other
|
|
||||||
patches that were not in some branches and that will need to be backported as
|
|
||||||
well for the fix to work. In this case, such information is explicitly provided
|
|
||||||
in the commit message by the patch's author in natural language.
|
|
||||||
|
|
||||||
Developers are serious and always indicate if a patch needs to be backported.
|
|
||||||
Sometimes they omit the exact target branch, or they will say that the patch is
|
|
||||||
"needed" in some older branch, but it means the same. If a commit message
|
|
||||||
doesn't mention any backport instructions, it means that the commit does not
|
|
||||||
have to be backported. And patches that are not strictly bug fixes nor doc
|
|
||||||
improvements are normally not backported. For example, fixes for design
|
|
||||||
limitations, architectural improvements and performance optimizations are
|
|
||||||
considered too risky for a backport. Finally, all bug fixes are tagged as
|
|
||||||
"BUG" at the beginning of their subject line. Patches that are not tagged as
|
|
||||||
such are not bugs, and must never be backported unless their commit message
|
|
||||||
explicitly requests so.
|
|
||||||
|
|
||||||
ENDCONTEXT
|
|
||||||
|
|
||||||
A developer is reviewing the development branch, trying to spot which commits
|
|
||||||
need to be backported to maintenance branches. This person is already expert
|
|
||||||
on HAProxy and everything related to Git, patch management, and the risks
|
|
||||||
associated with backports, so he doesn't want to be told how to proceed nor to
|
|
||||||
review the contents of the patch.
|
|
||||||
|
|
||||||
The goal for this developer is to get some help from the AI assistant to save
|
|
||||||
some precious time on this tedious review work. In order to do a better job, he
|
|
||||||
needs an accurate summary of the information and instructions found in each
|
|
||||||
commit message. Specifically he needs to figure if the patch fixes a problem
|
|
||||||
affecting an older branch or not, if it needs to be backported, if so to which
|
|
||||||
branches, and if other patches need to be backported along with it.
|
|
||||||
|
|
||||||
The indented text block below after an "id" line and starting with a Subject line
|
|
||||||
is a commit message from the HAProxy development branch that describes a patch
|
|
||||||
applied to that branch, starting with its subject line, please read it carefully.
|
|
||||||
|
|
||||||
@ -1,29 +0,0 @@
|
|||||||
|
|
||||||
ENDINPUT
|
|
||||||
BEGININSTRUCTION
|
|
||||||
|
|
||||||
You are an AI assistant that follows instruction extremely well. Help as much
|
|
||||||
as you can, responding to a single question using a single response.
|
|
||||||
|
|
||||||
The developer wants to know if he needs to backport the patch above to fix
|
|
||||||
maintenance branches, for which branches, and what possible dependencies might
|
|
||||||
be mentioned in the commit message. Carefully study the commit message and its
|
|
||||||
backporting instructions if any (otherwise it should probably not be backported),
|
|
||||||
then provide a very concise and short summary that will help the developer decide
|
|
||||||
to backport it, or simply to skip it.
|
|
||||||
|
|
||||||
Start by explaining in one or two sentences what you recommend for this one and why.
|
|
||||||
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
|
|
||||||
where X is a single word among:
|
|
||||||
- "yes", if you recommend to backport the patch right now either because
|
|
||||||
it explicitly states this or because it's a fix for a bug that affects
|
|
||||||
a maintenance branch (3.3 or lower);
|
|
||||||
- "wait", if this patch explicitly mentions that it must be backported, but
|
|
||||||
only after waiting some time.
|
|
||||||
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
|
|
||||||
lack of explicit backport instructions, or it's just an improvement);
|
|
||||||
- "uncertain" otherwise for cases not covered above
|
|
||||||
|
|
||||||
ENDINSTRUCTION
|
|
||||||
|
|
||||||
Explanation:
|
|
||||||
@ -22,8 +22,7 @@ STABLE=$(cd "$HAPROXY_DIR" && git describe --tags "v${BRANCH}-dev0^" |cut -f1,2
|
|||||||
PATCHES_DIR="$PATCHES_PFX"-"$BRANCH"
|
PATCHES_DIR="$PATCHES_PFX"-"$BRANCH"
|
||||||
|
|
||||||
(cd "$HAPROXY_DIR"
|
(cd "$HAPROXY_DIR"
|
||||||
# avoid git pull, it chokes on forced push
|
git pull
|
||||||
git remote update origin; git reset origin/master;git checkout -f
|
|
||||||
last_file=$(ls -1 "$PATCHES_DIR"/*.patch 2>/dev/null | tail -n1)
|
last_file=$(ls -1 "$PATCHES_DIR"/*.patch 2>/dev/null | tail -n1)
|
||||||
if [ -n "$last_file" ]; then
|
if [ -n "$last_file" ]; then
|
||||||
restart=$(head -n1 "$last_file" | cut -f2 -d' ')
|
restart=$(head -n1 "$last_file" | cut -f2 -d' ')
|
||||||
|
|||||||
@ -17,9 +17,9 @@
|
|||||||
//const int codes[CODES] = { 200,400,401,403,404,405,407,408,410,413,421,422,425,429,500,501,502,503,504};
|
//const int codes[CODES] = { 200,400,401,403,404,405,407,408,410,413,421,422,425,429,500,501,502,503,504};
|
||||||
|
|
||||||
#define CODES 32
|
#define CODES 32
|
||||||
const int codes[CODES] = { 200,400,401,403,404,405,407,408,410,413,414,421,422,425,429,431,500,501,502,503,504,
|
const int codes[CODES] = { 200,400,401,403,404,405,407,408,410,413,421,422,425,429,500,501,502,503,504,
|
||||||
/* padding entries below, which will fall back to the default code */
|
/* padding entries below, which will fall back to the default code */
|
||||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
|
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
|
||||||
|
|
||||||
unsigned mul, xor;
|
unsigned mul, xor;
|
||||||
unsigned bmul = 0, bxor = 0;
|
unsigned bmul = 0, bxor = 0;
|
||||||
|
|||||||
@ -1,233 +0,0 @@
|
|||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
|
|
||||||
#include <haproxy/connection-t.h>
|
|
||||||
#include <haproxy/intops.h>
|
|
||||||
|
|
||||||
struct tevt_info {
|
|
||||||
const char *loc;
|
|
||||||
const char **types;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
/* will be sufficient for even largest flag names */
|
|
||||||
static char buf[4096];
|
|
||||||
static size_t bsz = sizeof(buf);
|
|
||||||
|
|
||||||
|
|
||||||
static const char *tevt_unknown_types[16] = {
|
|
||||||
[ 0] = "-", [ 1] = "-", [ 2] = "-", [ 3] = "-",
|
|
||||||
[ 4] = "-", [ 5] = "-", [ 6] = "-", [ 7] = "-",
|
|
||||||
[ 8] = "-", [ 9] = "-", [10] = "-", [11] = "-",
|
|
||||||
[12] = "-", [13] = "-", [14] = "-", [15] = "-",
|
|
||||||
};
|
|
||||||
|
|
||||||
static const char *tevt_fd_types[16] = {
|
|
||||||
[ 0] = "-", [ 1] = "shutw", [ 2] = "shutr", [ 3] = "rcv_err",
|
|
||||||
[ 4] = "snd_err", [ 5] = "-", [ 6] = "-", [ 7] = "conn_err",
|
|
||||||
[ 8] = "intercepted", [ 9] = "conn_poll_err", [10] = "poll_err", [11] = "poll_hup",
|
|
||||||
[12] = "-", [13] = "-", [14] = "-", [15] = "-",
|
|
||||||
};
|
|
||||||
|
|
||||||
static const char *tevt_hs_types[16] = {
|
|
||||||
[ 0] = "-", [ 1] = "-", [ 2] = "-", [ 3] = "-",
|
|
||||||
[ 4] = "snd_err", [ 5] = "truncated_shutr", [ 6] = "truncated_rcv_err", [ 7] = "-",
|
|
||||||
[ 8] = "-", [ 9] = "-", [10] = "-", [11] = "-",
|
|
||||||
[12] = "-", [13] = "-", [14] = "-", [15] = "-",
|
|
||||||
};
|
|
||||||
|
|
||||||
static const char *tevt_xprt_types[16] = {
|
|
||||||
[ 0] = "-", [ 1] = "shutw", [ 2] = "shutr", [ 3] = "rcv_err",
|
|
||||||
[ 4] = "snd_err", [ 5] = "-", [ 6] = "-", [ 7] = "-",
|
|
||||||
[ 8] = "-", [ 9] = "-", [10] = "-", [11] = "-",
|
|
||||||
[12] = "-", [13] = "-", [14] = "-", [15] = "-",
|
|
||||||
};
|
|
||||||
|
|
||||||
static const char *tevt_muxc_types[16] = {
|
|
||||||
[ 0] = "-", [ 1] = "shutw", [ 2] = "shutr", [ 3] = "rcv_err",
|
|
||||||
[ 4] = "snd_err", [ 5] = "truncated_shutr", [ 6] = "truncated_rcv_err", [ 7] = "tout",
|
|
||||||
[ 8] = "goaway_rcvd", [ 9] = "proto_err", [10] = "internal_err", [11] = "other_err",
|
|
||||||
[12] = "graceful_shut", [13] = "-", [14] = "-", [15] = "-",
|
|
||||||
};
|
|
||||||
|
|
||||||
static const char *tevt_se_types[16] = {
|
|
||||||
[ 0] = "-", [ 1] = "shutw", [ 2] = "eos", [ 3] = "rcv_err",
|
|
||||||
[ 4] = "snd_err", [ 5] = "truncated_eos", [ 6] = "truncated_rcv_err", [ 7] = "-",
|
|
||||||
[ 8] = "rst_rcvd", [ 9] = "proto_err", [10] = "internal_err", [11] = "other_err",
|
|
||||||
[12] = "cancelled", [13] = "-", [14] = "-", [15] = "-",
|
|
||||||
};
|
|
||||||
|
|
||||||
static const char *tevt_strm_types[16] = {
|
|
||||||
[ 0] = "-", [ 1] = "shutw", [ 2] = "eos", [ 3] = "rcv_err",
|
|
||||||
[ 4] = "snd_err", [ 5] = "truncated_eos", [ 6] = "truncated_rcv_err", [ 7] = "tout",
|
|
||||||
[ 8] = "intercepted", [ 9] = "proto_err", [10] = "internal_err", [11] = "other_err",
|
|
||||||
[12] = "aborted", [13] = "-", [14] = "-", [15] = "-",
|
|
||||||
};
|
|
||||||
|
|
||||||
static const struct tevt_info tevt_location[26] = {
|
|
||||||
[ 0] = {.loc = "-", .types = tevt_unknown_types}, [ 1] = {.loc = "-", .types = tevt_unknown_types},
|
|
||||||
[ 2] = {.loc = "-", .types = tevt_unknown_types}, [ 3] = {.loc = "-", .types = tevt_unknown_types},
|
|
||||||
[ 4] = {.loc = "se", .types = tevt_se_types}, [ 5] = {.loc = "fd", .types = tevt_fd_types},
|
|
||||||
[ 6] = {.loc = "-", .types = tevt_unknown_types}, [ 7] = {.loc = "hs", .types = tevt_hs_types},
|
|
||||||
[ 8] = {.loc = "-", .types = tevt_unknown_types}, [ 9] = {.loc = "-", .types = tevt_unknown_types},
|
|
||||||
[10] = {.loc = "-", .types = tevt_unknown_types}, [11] = {.loc = "-", .types = tevt_unknown_types},
|
|
||||||
[12] = {.loc = "muxc", .types = tevt_muxc_types}, [13] = {.loc = "-", .types = tevt_unknown_types},
|
|
||||||
[14] = {.loc = "-", .types = tevt_unknown_types}, [15] = {.loc = "-", .types = tevt_unknown_types},
|
|
||||||
[16] = {.loc = "-", .types = tevt_unknown_types}, [17] = {.loc = "-", .types = tevt_unknown_types},
|
|
||||||
[18] = {.loc = "strm", .types = tevt_strm_types}, [19] = {.loc = "-", .types = tevt_unknown_types},
|
|
||||||
[20] = {.loc = "-", .types = tevt_unknown_types}, [21] = {.loc = "-", .types = tevt_unknown_types},
|
|
||||||
[22] = {.loc = "-", .types = tevt_unknown_types}, [23] = {.loc = "xprt", .types = tevt_xprt_types},
|
|
||||||
[24] = {.loc = "-", .types = tevt_unknown_types}, [25] = {.loc = "-", .types = tevt_unknown_types},
|
|
||||||
};
|
|
||||||
|
|
||||||
void usage_exit(const char *name)
|
|
||||||
{
|
|
||||||
fprintf(stderr, "Usage: %s { value* | - }\n", name);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
char *to_upper(char *dst, const char *src)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; src[i]; i++)
|
|
||||||
dst[i] = toupper(src[i]);
|
|
||||||
dst[i] = 0;
|
|
||||||
return dst;
|
|
||||||
}
|
|
||||||
|
|
||||||
char *tevt_show_events(char *buf, size_t len, const char *delim, const char *value)
|
|
||||||
{
|
|
||||||
char loc[5];
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!value || !*value) {
|
|
||||||
snprintf(buf, len, "##NONE");
|
|
||||||
goto end;
|
|
||||||
}
|
|
||||||
if (strcmp(value, "-") == 0) {
|
|
||||||
snprintf(buf, len, "##UNK");
|
|
||||||
goto end;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (strlen(value) % 2 != 0) {
|
|
||||||
snprintf(buf, len, "##INV");
|
|
||||||
goto end;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (*value) {
|
|
||||||
struct tevt_info info;
|
|
||||||
char l = value[0];
|
|
||||||
char t = value[1];
|
|
||||||
|
|
||||||
if (!isalpha(l) || !isxdigit(t)) {
|
|
||||||
snprintf(buf, len, "##INV");
|
|
||||||
goto end;
|
|
||||||
}
|
|
||||||
|
|
||||||
info = tevt_location[tolower(l) - 'a'];
|
|
||||||
ret = snprintf(buf, len, "%s:%s%s",
|
|
||||||
isupper(l) ? to_upper(loc, info.loc) : info.loc,
|
|
||||||
info.types[hex2i(t)],
|
|
||||||
value[2] != 0 ? delim : "");
|
|
||||||
if (ret < 0)
|
|
||||||
break;
|
|
||||||
len -= ret;
|
|
||||||
buf += ret;
|
|
||||||
value += 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
end:
|
|
||||||
return buf;
|
|
||||||
}
|
|
||||||
|
|
||||||
char *tevt_show_tuple_events(char *buf, size_t len, char *value)
|
|
||||||
{
|
|
||||||
char *p = value;
|
|
||||||
|
|
||||||
/* skip '{' */
|
|
||||||
p++;
|
|
||||||
while (*p) {
|
|
||||||
char *v;
|
|
||||||
char c;
|
|
||||||
|
|
||||||
while (*p == ' ' || *p == '\t')
|
|
||||||
p++;
|
|
||||||
|
|
||||||
v = p;
|
|
||||||
while (*p && *p != ',' && *p != '}')
|
|
||||||
p++;
|
|
||||||
c = *p;
|
|
||||||
*p = 0;
|
|
||||||
|
|
||||||
tevt_show_events(buf, len, " > ", v);
|
|
||||||
printf("\t- %s\n", buf);
|
|
||||||
|
|
||||||
*p = c;
|
|
||||||
if (*p == ',')
|
|
||||||
p++;
|
|
||||||
else if (*p == '}')
|
|
||||||
break;
|
|
||||||
else {
|
|
||||||
printf("\t- ##INV\n");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
*buf = 0;
|
|
||||||
return buf;
|
|
||||||
}
|
|
||||||
|
|
||||||
int main(int argc, char **argv)
|
|
||||||
{
|
|
||||||
const char *name = argv[0];
|
|
||||||
char line[128];
|
|
||||||
char *value;
|
|
||||||
int multi = 0;
|
|
||||||
int use_stdin = 0;
|
|
||||||
char *err;
|
|
||||||
|
|
||||||
while (argc == 1)
|
|
||||||
usage_exit(name);
|
|
||||||
|
|
||||||
argv++; argc--;
|
|
||||||
if (argc > 1)
|
|
||||||
multi = 1;
|
|
||||||
|
|
||||||
if (strcmp(argv[0], "-") == 0)
|
|
||||||
use_stdin = 1;
|
|
||||||
|
|
||||||
while (argc > 0) {
|
|
||||||
if (use_stdin) {
|
|
||||||
value = fgets(line, sizeof(line), stdin);
|
|
||||||
if (!value)
|
|
||||||
break;
|
|
||||||
|
|
||||||
/* skip common leading delimiters that slip from copy-paste */
|
|
||||||
while (*value == ' ' || *value == '\t' || *value == ':' || *value == '=')
|
|
||||||
value++;
|
|
||||||
|
|
||||||
err = value;
|
|
||||||
while (*err && *err != '\n')
|
|
||||||
err++;
|
|
||||||
*err = 0;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
value = argv[0];
|
|
||||||
argv++; argc--;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (multi)
|
|
||||||
printf("### %-8s : ", value);
|
|
||||||
|
|
||||||
if (*value == '{') {
|
|
||||||
if (!use_stdin)
|
|
||||||
printf("\n");
|
|
||||||
tevt_show_tuple_events(buf, bsz, value);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
tevt_show_events(buf, bsz, " > ", value);
|
|
||||||
printf("%s\n", buf);
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
@ -3,9 +3,7 @@ DeviceAtlas Device Detection
|
|||||||
|
|
||||||
In order to add DeviceAtlas Device Detection support, you would need to download
|
In order to add DeviceAtlas Device Detection support, you would need to download
|
||||||
the API source code from https://deviceatlas.com/deviceatlas-haproxy-module.
|
the API source code from https://deviceatlas.com/deviceatlas-haproxy-module.
|
||||||
Once extracted, two modes are supported :
|
Once extracted :
|
||||||
|
|
||||||
1/ Build HAProxy and DeviceAtlas in one command
|
|
||||||
|
|
||||||
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=<path to the API root folder>
|
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=<path to the API root folder>
|
||||||
|
|
||||||
@ -16,6 +14,10 @@ directory. Also, in the case the api cache support is not needed and/or a C++ to
|
|||||||
|
|
||||||
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=<path to the API root folder> DEVICEATLAS_NOCACHE=1
|
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=<path to the API root folder> DEVICEATLAS_NOCACHE=1
|
||||||
|
|
||||||
|
However, if the API had been installed beforehand, DEVICEATLAS_SRC
|
||||||
|
can be omitted. Note that the DeviceAtlas C API version supported is from the 3.x
|
||||||
|
releases series (3.2.1 minimum recommended).
|
||||||
|
|
||||||
For HAProxy developers who need to verify that their changes didn't accidentally
|
For HAProxy developers who need to verify that their changes didn't accidentally
|
||||||
break the DeviceAtlas code, it is possible to build a dummy library provided in
|
break the DeviceAtlas code, it is possible to build a dummy library provided in
|
||||||
the addons/deviceatlas/dummy directory and to use it as an alternative for the
|
the addons/deviceatlas/dummy directory and to use it as an alternative for the
|
||||||
@ -25,29 +27,6 @@ validate API changes :
|
|||||||
|
|
||||||
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=$PWD/addons/deviceatlas/dummy
|
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=$PWD/addons/deviceatlas/dummy
|
||||||
|
|
||||||
2/ Build and install DeviceAtlas according to https://docs.deviceatlas.com/apis/enterprise/c/<release version>/README.html
|
|
||||||
|
|
||||||
For example :
|
|
||||||
In the deviceatlas library folder :
|
|
||||||
$ cmake .
|
|
||||||
$ make
|
|
||||||
$ sudo make install
|
|
||||||
|
|
||||||
In the HAProxy folder :
|
|
||||||
$ make TARGET=<target> USE_DEVICEATLAS=1
|
|
||||||
|
|
||||||
Note that if the -DCMAKE_INSTALL_PREFIX cmake option had been used, it is necessary to set as well DEVICEATLAS_LIB and
|
|
||||||
DEVICEATLAS_INC as follow :
|
|
||||||
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_INC=<CMAKE_INSTALL_PREFIX value>/include DEVICEATLAS_LIB=<CMAKE_INSTALL_PREFIX value>/lib
|
|
||||||
|
|
||||||
For example :
|
|
||||||
$ cmake -DCMAKE_INSTALL_PREFIX=/opt/local
|
|
||||||
$ make
|
|
||||||
$ sudo make install
|
|
||||||
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_INC=/opt/local/include DEVICEATLAS_LIB=/opt/local/lib
|
|
||||||
|
|
||||||
Note that DEVICEATLAS_SRC is omitted in this case.
|
|
||||||
|
|
||||||
These are supported DeviceAtlas directives (see doc/configuration.txt) :
|
These are supported DeviceAtlas directives (see doc/configuration.txt) :
|
||||||
- deviceatlas-json-file <path to the DeviceAtlas JSON data file>.
|
- deviceatlas-json-file <path to the DeviceAtlas JSON data file>.
|
||||||
- deviceatlas-log-level <number> (0 to 3, level of information returned by
|
- deviceatlas-log-level <number> (0 to 3, level of information returned by
|
||||||
|
|||||||
@ -362,7 +362,7 @@ option set-process-time <var name>
|
|||||||
latency added by the SPOE processing for the last handled event or group.
|
latency added by the SPOE processing for the last handled event or group.
|
||||||
|
|
||||||
If several events or groups are processed for the same stream, this value
|
If several events or groups are processed for the same stream, this value
|
||||||
will be overridden.
|
will be overrideen.
|
||||||
|
|
||||||
See also: "option set-total-time".
|
See also: "option set-total-time".
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
A number of contributors are often embarrassed with coding style issues, they
|
A number of contributors are often embarrassed with coding style issues, they
|
||||||
don't always know if they're doing it right, especially since the coding style
|
don't always know if they're doing it right, especially since the coding style
|
||||||
has evolved along the years. What is explained here is not necessarily what is
|
has elvoved along the years. What is explained here is not necessarily what is
|
||||||
applied in the code, but new code should as much as possible conform to this
|
applied in the code, but new code should as much as possible conform to this
|
||||||
style. Coding style fixes happen when code is replaced. It is useless to send
|
style. Coding style fixes happen when code is replaced. It is useless to send
|
||||||
patches to fix coding style only, they will be rejected, unless they belong to
|
patches to fix coding style only, they will be rejected, unless they belong to
|
||||||
|
|||||||
10175
doc/configuration.txt
10175
doc/configuration.txt
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
140
doc/haterm.txt
140
doc/haterm.txt
@ -1,140 +0,0 @@
|
|||||||
------
|
|
||||||
HATerm
|
|
||||||
------
|
|
||||||
HAProxy's dummy HTTP
|
|
||||||
server for benchmarks
|
|
||||||
|
|
||||||
1. Background
|
|
||||||
-------------
|
|
||||||
|
|
||||||
HATerm is a dummy HTTP server that leverages the flexible and scalable
|
|
||||||
architecture of HAProxy to ease benchmarking of HTTP agents in all versions of
|
|
||||||
HTTP currently supported by HAProxy (HTTP/1, HTTP/2, HTTP/3), and both in clear
|
|
||||||
and TLS / QUIC. It follows the same principle as its ancestor HTTPTerm [1],
|
|
||||||
consisting in producing HTTP responses entirely configured by the request
|
|
||||||
parameters (size, response time, status etc). It also preserves the spirit
|
|
||||||
HTTPTerm which does not require any configuration beyond an optional listening
|
|
||||||
address and a port number, though it also supports advanced configurations with
|
|
||||||
the full spectrum of HAProxy features for specific testing. The goal remains
|
|
||||||
to make it almost as fast as the original HTTPTerm so that it can become a
|
|
||||||
de-facto replacement, with a compatible command line and request parameters
|
|
||||||
that will not change users' habits.
|
|
||||||
|
|
||||||
[1] https://github.com/wtarreau/httpterm
|
|
||||||
|
|
||||||
|
|
||||||
2. Compilation
|
|
||||||
--------------
|
|
||||||
|
|
||||||
HATerm may be compiled in the same way as HAProxy but with "haterm" as Makefile
|
|
||||||
target to provide on the "make" command line as follows:
|
|
||||||
|
|
||||||
$ make -j $(nproc) TARGET=linux-glibc haterm
|
|
||||||
|
|
||||||
HATerm supports HTTPS/SSL/TCP:
|
|
||||||
|
|
||||||
$ make TARGET=linux-glibc USE_OPENSSL=1
|
|
||||||
|
|
||||||
It also supports QUIC:
|
|
||||||
|
|
||||||
$ make -j $(nproc) TARGET=linux-glibc USE_OPENSSL=1 USE_QUIC=1 haterm
|
|
||||||
|
|
||||||
Technically speaking, it uses the regular HAProxy source and object code with a
|
|
||||||
different command line parser. As such, all build options supported by HAProxy
|
|
||||||
also apply to HATerm. See INSTALL for more details about how to compile them.
|
|
||||||
|
|
||||||
|
|
||||||
3. Execution
|
|
||||||
------------
|
|
||||||
|
|
||||||
HATerm is a very easy to use HTTP server with supports for all the HTTP
|
|
||||||
versions. It displays its usage when run without argument or wrong arguments:
|
|
||||||
|
|
||||||
$ ./haterm
|
|
||||||
Usage : haterm -L [<ip>]:<clear port>[:<TCP&QUIC SSL port>] [-L...]* [opts]
|
|
||||||
where <opts> may be any combination of:
|
|
||||||
-G <line> : multiple option; append <line> to the "global" section
|
|
||||||
-F <line> : multiple option; append <line> to the "frontend" section
|
|
||||||
-T <line> : multiple option; append <line> to the "traces" section
|
|
||||||
-C : dump the configuration and exit
|
|
||||||
-D : goes daemon
|
|
||||||
-b <keysize> : RSA key size in bits (ex: "2048", "4096"...)
|
|
||||||
-c <curves> : ECSDA curves (ex: "P-256", "P-384"...)
|
|
||||||
-v : shows version
|
|
||||||
-d : enable the traces for all http protocols
|
|
||||||
--quic-bind-opts <opts> : append options to QUIC "bind" lines
|
|
||||||
--tcp-bind-opts <opts> : append options to TCP "bind" lines
|
|
||||||
|
|
||||||
|
|
||||||
Arguments -G, -F, -T permit to append one or multiple lines at the end of their
|
|
||||||
respective sections. A tab character ('\t') is prepended at the beginning of
|
|
||||||
the argument, and a line feed ('\n') is appended at the end. It is also
|
|
||||||
possible to insert multiple lines at once using escape sequences '\n' and '\t'
|
|
||||||
inside the string argument.
|
|
||||||
|
|
||||||
As HAProxy, HATerm may listen on several TCP/UDP addresses which can be
|
|
||||||
provided by multiple "-L" options. To be functional, it needs at least one
|
|
||||||
correct "-L" option to be set.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
|
|
||||||
$ ./haterm -L 127.0.0.1:8888 # listen on 127.0.0.1:8888 TCP address
|
|
||||||
|
|
||||||
$ ./haterm -L 127.0.0.1:8888:8889 # listen on 127.0.0.1:8888 TCP address,
|
|
||||||
# 127.0.01:8889 SSL/TCP address,
|
|
||||||
# and 127.0.01:8889 QUIC/UDP address
|
|
||||||
|
|
||||||
$ ./haterm -L 127.0.0.1:8888:8889 -L [::1]:8888:8889
|
|
||||||
|
|
||||||
With USE_QUIC_OPENSSL_COMPAT support, the user must configure a global
|
|
||||||
section as for HAProxy. HATerm sets internally its configuration in.
|
|
||||||
memory as this is done by HAProxy from configuration files:
|
|
||||||
|
|
||||||
$ ./haterm -L 127.0.0.1:8888:8889
|
|
||||||
[NOTICE] (1371578) : haproxy version is 3.4-dev4-ba5eab-28
|
|
||||||
[NOTICE] (1371578) : path to executable is ./haterm
|
|
||||||
[ALERT] (1371578) : Binding [haterm cfgfile:12] for frontend
|
|
||||||
___haterm_frontend___: this SSL library does not
|
|
||||||
support the QUIC protocol. A limited compatibility
|
|
||||||
layer may be enabled using the "limited-quic" global
|
|
||||||
option if desired.
|
|
||||||
|
|
||||||
Such an alert may be fixed with "-G' option:
|
|
||||||
|
|
||||||
$ ./haterm -L 127.0.0.1:8888:8889 -G "limited-quic"
|
|
||||||
|
|
||||||
|
|
||||||
When the SSL support is not compiled in, the second port is ignored. This is
|
|
||||||
also the case for the QUIC support.
|
|
||||||
|
|
||||||
HATerm adjusts its responses depending on the requests it receives. An empty
|
|
||||||
query string provides the information about how the URIs are understood by
|
|
||||||
HATerm:
|
|
||||||
|
|
||||||
$ curl http://127.0.0.1:8888/?
|
|
||||||
HAProxy's dummy HTTP server for benchmarks - version 3.4-dev4.
|
|
||||||
All integer argument values are in the form [digits]*[kmgr] (r=random(0..1))
|
|
||||||
The following arguments are supported to override the default objects :
|
|
||||||
- /?s=<size> return <size> bytes.
|
|
||||||
E.g. /?s=20k
|
|
||||||
- /?r=<retcode> present <retcode> as the HTTP return code.
|
|
||||||
E.g. /?r=404
|
|
||||||
- /?c=<cache> set the return as not cacheable if <1.
|
|
||||||
E.g. /?c=0
|
|
||||||
- /?A=<req-after> drain the request body after sending the response.
|
|
||||||
E.g. /?A=1
|
|
||||||
- /?C=<close> force the response to use close if >0.
|
|
||||||
E.g. /?C=1
|
|
||||||
- /?K=<keep-alive> force the response to use keep-alive if >0.
|
|
||||||
E.g. /?K=1
|
|
||||||
- /?t=<time> wait <time> milliseconds before responding.
|
|
||||||
E.g. /?t=500
|
|
||||||
- /?k=<enable> Enable transfer encoding chunked with only one chunk
|
|
||||||
if >0.
|
|
||||||
- /?R=<enable> Enable sending random data if >0.
|
|
||||||
|
|
||||||
Note that those arguments may be cumulated on one line separated by a set of
|
|
||||||
delimiters among [&?,;/] :
|
|
||||||
- GET /?s=20k&c=1&t=700&K=30r HTTP/1.0
|
|
||||||
- GET /?r=500?s=0?c=0?t=1000 HTTP/1.0
|
|
||||||
|
|
||||||
@ -5,7 +5,7 @@
|
|||||||
|
|
||||||
The buffer list API allows one to share a certain amount of buffers between
|
The buffer list API allows one to share a certain amount of buffers between
|
||||||
multiple entities, which will each see their own as lists of buffers, while
|
multiple entities, which will each see their own as lists of buffers, while
|
||||||
keeping a shared free list. The immediate use case is for muxes, which may
|
keeping a sharedd free list. The immediate use case is for muxes, which may
|
||||||
want to allocate up to a certain number of buffers per connection, shared
|
want to allocate up to a certain number of buffers per connection, shared
|
||||||
among all streams. In this case, each stream will first request a new list
|
among all streams. In this case, each stream will first request a new list
|
||||||
for its own use, then may request extra entries from the free list. At any
|
for its own use, then may request extra entries from the free list. At any
|
||||||
|
|||||||
@ -539,28 +539,15 @@ message. These functions are used by HTX analyzers or by multiplexers.
|
|||||||
with the first block not removed, or NULL if everything was removed, and
|
with the first block not removed, or NULL if everything was removed, and
|
||||||
the amount of data drained.
|
the amount of data drained.
|
||||||
|
|
||||||
- htx_xfer() transfers HTX blocks from an HTX message to another, stopping
|
- htx_xfer_blks() transfers HTX blocks from an HTX message to another,
|
||||||
when a specific amount of bytes, including meta-data, was copied. If the
|
stopping on the first block of a specified type or when a specific amount
|
||||||
tail block is a DATA block, it may be partially copied. All other block
|
of bytes, including meta-data, was moved. If the tail block is a DATA
|
||||||
are transferred at once. By default, copied blocks are removed from the
|
block, it may be partially moved. All other block are transferred at once
|
||||||
original HTX message and headers and trailers parts cannot be partially
|
or kept. This function returns a mixed value, with the last block moved,
|
||||||
copied. But flags can be set to change the default behavior:
|
or NULL if nothing was moved, and the amount of data transferred. When
|
||||||
|
HEADERS or TRAILERS blocks must be transferred, this function transfers
|
||||||
- HTX_XFER_KEEP_SRC_BLKS: source blocks are not removed
|
all of them. Otherwise, if it is not possible, it triggers an error. It is
|
||||||
- HTX_XFER_PARTIAL_HDRS_COPY: partial headers and trailers
|
the caller responsibility to transfer all headers or trailers at once.
|
||||||
part can be xferred
|
|
||||||
- HTX_XFER_HDRS_ONLY: Only the headers part is xferred
|
|
||||||
|
|
||||||
- htx_xfer_blks() [DEPRECATED] transfers HTX blocks from an HTX message to
|
|
||||||
another, stopping after the first block of a specified type is transferred
|
|
||||||
or when a specific amount of bytes, including meta-data, was moved. If the
|
|
||||||
tail block is a DATA block, it may be partially moved. All other block are
|
|
||||||
transferred at once or kept. This function returns a mixed value, with the
|
|
||||||
last block moved, or NULL if nothing was moved, and the amount of data
|
|
||||||
transferred. When HEADERS or TRAILERS blocks must be transferred, this
|
|
||||||
function transfers all of them. Otherwise, if it is not possible, it
|
|
||||||
triggers an error. It is the caller responsibility to transfer all headers
|
|
||||||
or trailers at once.
|
|
||||||
|
|
||||||
- htx_append_msg() append an HTX message to another one. All the message is
|
- htx_append_msg() append an HTX message to another one. All the message is
|
||||||
copied or nothing. So, if an error occurred, a rollback is performed. This
|
copied or nothing. So, if an error occurred, a rollback is performed. This
|
||||||
|
|||||||
@ -314,16 +314,6 @@ alphanumerically ordered:
|
|||||||
call to cfg_register_section() with the three arguments at stage
|
call to cfg_register_section() with the three arguments at stage
|
||||||
STG_REGISTER.
|
STG_REGISTER.
|
||||||
|
|
||||||
You can only register a section once, but you can register post callbacks
|
|
||||||
multiple time for this section with REGISTER_CONFIG_POST_SECTION().
|
|
||||||
|
|
||||||
- REGISTER_CONFIG_POST_SECTION(name, post)
|
|
||||||
|
|
||||||
Registers a function which will be called after a section is parsed. This is
|
|
||||||
the same as the <post> argument in REGISTER_CONFIG_SECTION(), the difference
|
|
||||||
is that it allows to register multiple <post> callbacks and to register them
|
|
||||||
elsewhere in the code.
|
|
||||||
|
|
||||||
- REGISTER_PER_THREAD_ALLOC(fct)
|
- REGISTER_PER_THREAD_ALLOC(fct)
|
||||||
|
|
||||||
Registers a call to register_per_thread_alloc(fct) at stage STG_REGISTER.
|
Registers a call to register_per_thread_alloc(fct) at stage STG_REGISTER.
|
||||||
|
|||||||
@ -1,86 +0,0 @@
|
|||||||
2025-08-13 - Memory allocation in HAProxy 3.3
|
|
||||||
|
|
||||||
The vast majority of dynamic memory allocations are performed from pools. Pools
|
|
||||||
are optimized to store pre-calibrated objects of the right size for a given
|
|
||||||
usage, try to favor locality and hot objects as much as possible, and are
|
|
||||||
heavily instrumented to detect and help debug a wide class of bugs including
|
|
||||||
buffer overflows, use-after-free, etc.
|
|
||||||
|
|
||||||
For objects of random sizes, or those used only at configuration time, pools
|
|
||||||
are not suited, and the regular malloc/free family is available, in addition of
|
|
||||||
a few others.
|
|
||||||
|
|
||||||
The standard allocation calls are intercepted at the code level (#define) when
|
|
||||||
the code is compiled with -DDEBUG_MEM_STATS. For this reason, these calls are
|
|
||||||
redefined as macros in "bug.h", and one must not try to use the pointers to
|
|
||||||
such functions, as this may break DEBUG_MEM_STATS. This provides fine-grained
|
|
||||||
stats about allocation/free per line of source code using locally implemented
|
|
||||||
counters that can be consulted by "debug dev memstats". The calls are
|
|
||||||
categorized into one of "calloc", "free", "malloc", "realloc", "strdup",
|
|
||||||
"p_alloc", "p_free", the latter two designating pools. Extra calls such as
|
|
||||||
memalign() and similar are also intercepted and counted as malloc.
|
|
||||||
|
|
||||||
Due to the nature of this replacement, DEBUG_MEM_STATS cannot see operations
|
|
||||||
performed in libraries or dependencies.
|
|
||||||
|
|
||||||
In addition to DEBUG_MEM_STATS, when haproxy is built with USE_MEMORY_PROFILING
|
|
||||||
the standard functions are wrapped by new ones defined in "activity.c", which
|
|
||||||
also hold counters by call place. These ones are able to trace activity in
|
|
||||||
libraries because the functions check the return pointer to figure where the
|
|
||||||
call was made. The approach is different and relies on a large hash table. The
|
|
||||||
files, function names and line numbers are not know, but by passing the pointer
|
|
||||||
to dladdr(), we can often resolve most of these symbols. These operations are
|
|
||||||
consulted via "show profiling memory". It must first be enabled either in the
|
|
||||||
global config "profiling.memory on" or the CLI using "set profiling memory on".
|
|
||||||
Memory profiling can also track pool allocations and frees thanks to knowing
|
|
||||||
the size of the element and knowing a place where to store it. Some future
|
|
||||||
evolutions might consider making this possible as well for pure malloc/free
|
|
||||||
too by leveraging malloc_usable_size() a bit more.
|
|
||||||
|
|
||||||
Finally, 3.3 brought aligned allocations. These are made available via a new
|
|
||||||
family of functions around ha_aligned_alloc() that simply map to either
|
|
||||||
posix_memalign(), memalign() or _aligned_malloc() for CYGWIN, depending on
|
|
||||||
which one is available. This latter one requires to pass the pointer to
|
|
||||||
_aligned_free() instead of free(), so for this reason, all aligned allocations
|
|
||||||
have to be released using ha_aligned_free(). Since this mostly happens on
|
|
||||||
configuration elements, in practice it's not as inconvenient as it can sound.
|
|
||||||
These functions are in reality macros handled in "bug.h" like the previous
|
|
||||||
ones in order to deal with DEBUG_MEM_STATS. All "alloc" variants are reported
|
|
||||||
in memstats as "malloc". All "zalloc" variants are reported in memstats as
|
|
||||||
"calloc".
|
|
||||||
|
|
||||||
The currently available allocators are the following:
|
|
||||||
|
|
||||||
- void *ha_aligned_alloc(size_t align, size_t size)
|
|
||||||
- void *ha_aligned_zalloc(size_t align, size_t size)
|
|
||||||
|
|
||||||
Equivalent of malloc() but aligned to <align> bytes. The alignment MUST be
|
|
||||||
at least as large as one word and MUST be a power of two. The "zalloc"
|
|
||||||
variant also zeroes the area on success. Both return NULL on failure.
|
|
||||||
|
|
||||||
- void *ha_aligned_alloc_safe(size_t align, size_t size)
|
|
||||||
- void *ha_aligned_zalloc_safe(size_t align, size_t size)
|
|
||||||
|
|
||||||
Equivalent of malloc() but aligned to <align> bytes. The alignment is
|
|
||||||
automatically adjusted to the nearest larger power of two that is at least
|
|
||||||
as large as a word. The "zalloc" variant also zeroes the area on
|
|
||||||
success. Both return NULL on failure.
|
|
||||||
|
|
||||||
- (type *)ha_aligned_alloc_typed(size_t count, type)
|
|
||||||
(type *)ha_aligned_zalloc_typed(size_t count, type)
|
|
||||||
|
|
||||||
This macro returns an area aligned to the required alignment for type
|
|
||||||
<type>, large enough for <count> objects of this type, and the result is a
|
|
||||||
pointer of this type. The goal is to ease allocation of known structures
|
|
||||||
whose alignment is not necessarily known to the developer (and to avoid
|
|
||||||
encouraging to hard-code alignment). The cast in return also provides a
|
|
||||||
last-minute control in case a wrong type is mistakenly used due to a poor
|
|
||||||
copy-paste or an extra "*" after the type. When DEBUG_MEM_STATS is in use,
|
|
||||||
the type is stored as a string in the ".extra" field so that it can be
|
|
||||||
displayed in "debug dev memstats". The "zalloc" variant also zeroes the
|
|
||||||
area on success. Both return NULL on failure.
|
|
||||||
|
|
||||||
- void ha_aligned_free(void *ptr)
|
|
||||||
|
|
||||||
Frees the area pointed to by ptr. It is the equivalent of free() but for
|
|
||||||
objects allocated using one of the functions above.
|
|
||||||
@ -245,30 +245,6 @@ mt_list_pop(l)
|
|||||||
#=========#
|
#=========#
|
||||||
|
|
||||||
|
|
||||||
mt_list_pop_locked(l)
|
|
||||||
Removes the list's first element, returns it locked. If the list was empty,
|
|
||||||
NULL is returned. A macro MT_LIST_POP_LOCKED() is provided for a
|
|
||||||
more convenient use; instead of returning the list element, it will return
|
|
||||||
the structure holding the element, taking care of preserving the NULL.
|
|
||||||
|
|
||||||
before:
|
|
||||||
+---+ +---+ +---+ +---+ +---+ +---+ +---+
|
|
||||||
#=>| L |<===>| A |<===>| B |<===>| C |<===>| D |<===>| E |<===>| F |<=#
|
|
||||||
# +---+ +---+ +---+ +---+ +---+ +---+ +---+ #
|
|
||||||
#=====================================================================#
|
|
||||||
|
|
||||||
after:
|
|
||||||
+---+ +---+ +---+ +---+ +---+ +---+
|
|
||||||
#=>| L |<===>| B |<===>| C |<===>| D |<===>| E |<===>| F |<=#
|
|
||||||
# +---+ +---+ +---+ +---+ +---+ +---+ #
|
|
||||||
#===========================================================#
|
|
||||||
|
|
||||||
+---+
|
|
||||||
# x| A |x #
|
|
||||||
# +---+ #
|
|
||||||
#=========#
|
|
||||||
|
|
||||||
|
|
||||||
_mt_list_lock_next(elt)
|
_mt_list_lock_next(elt)
|
||||||
Locks the link that starts at the next pointer of the designated element.
|
Locks the link that starts at the next pointer of the designated element.
|
||||||
The link is replaced by two locked pointers, and a pointer to the next
|
The link is replaced by two locked pointers, and a pointer to the next
|
||||||
@ -400,9 +376,6 @@ mt_list_lock_prev(elt)
|
|||||||
Return A elt
|
Return A elt
|
||||||
value: <===>
|
value: <===>
|
||||||
|
|
||||||
mt_list_try_lock_prev(elt)
|
|
||||||
Does the same thing as mt_list_lock_prev(), except if the list is
|
|
||||||
locked already, it returns { NULL, NULL } instead of waiting.
|
|
||||||
|
|
||||||
mt_list_lock_elem(elt)
|
mt_list_lock_elem(elt)
|
||||||
Locks the element only. Both of its pointers are replaced by two locked
|
Locks the element only. Both of its pointers are replaced by two locked
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
2025-08-11 - Pools structure and API
|
2022-02-24 - Pools structure and API
|
||||||
|
|
||||||
1. Background
|
1. Background
|
||||||
-------------
|
-------------
|
||||||
@ -204,14 +204,6 @@ the cache, when this option is set, objects are picked from the cache from the
|
|||||||
oldest one instead of the freshest one. This way even late memory corruptions
|
oldest one instead of the freshest one. This way even late memory corruptions
|
||||||
have a chance to be detected.
|
have a chance to be detected.
|
||||||
|
|
||||||
Another non-destructive approach is to use "-dMbackup". A full copy of the
|
|
||||||
object is made after its end, which eases inspection (e.g. of the parts
|
|
||||||
scratched by the pool_item elements), and a comparison is made upon allocation
|
|
||||||
of that object, just like with "-dMintegrity", causing a crash on mismatch. The
|
|
||||||
initial 4 words corresponding to the list are ignored as well. Note that when
|
|
||||||
both "-dMbackup" and "-dMintegrity" are used, the copy is performed before
|
|
||||||
being scratched, and the comparison is done by "-dMintegrity" only.
|
|
||||||
|
|
||||||
When build option DEBUG_MEMORY_POOLS is set, or the boot-time option "-dMtag"
|
When build option DEBUG_MEMORY_POOLS is set, or the boot-time option "-dMtag"
|
||||||
is passed on the executable's command line, pool objects are allocated with
|
is passed on the executable's command line, pool objects are allocated with
|
||||||
one extra pointer compared to the requested size, so that the bytes that follow
|
one extra pointer compared to the requested size, so that the bytes that follow
|
||||||
@ -239,6 +231,10 @@ currently in use:
|
|||||||
+------------+ +------------+ / is set at build time
|
+------------+ +------------+ / is set at build time
|
||||||
or -dMtag at boot time
|
or -dMtag at boot time
|
||||||
|
|
||||||
|
Right now no provisions are made to return objects aligned on larger boundaries
|
||||||
|
than those currently covered by malloc() (i.e. two pointers). This need appears
|
||||||
|
from time to time and the layout above might evolve a little bit if needed.
|
||||||
|
|
||||||
|
|
||||||
4. Storage in the process-wide shared pool
|
4. Storage in the process-wide shared pool
|
||||||
------------------------------------------
|
------------------------------------------
|
||||||
@ -346,25 +342,7 @@ struct pool_head *create_pool(char *name, uint size, uint flags)
|
|||||||
"-dMno-merge" is passed on the executable's command line, the pools
|
"-dMno-merge" is passed on the executable's command line, the pools
|
||||||
also need to have the exact same name to be merged. In addition, unless
|
also need to have the exact same name to be merged. In addition, unless
|
||||||
MEM_F_EXACT is set in <flags>, the object size will usually be rounded
|
MEM_F_EXACT is set in <flags>, the object size will usually be rounded
|
||||||
up to the size of pointers (16 or 32 bytes). MEM_F_UAF may be set on a
|
up to the size of pointers (16 or 32 bytes). The name that will appear
|
||||||
per-pool basis to enable the UAF detection only for this specific pool,
|
|
||||||
saving the massive overhead of global usage. The name that will appear
|
|
||||||
in the pool upon merging is the name of the first created pool. The
|
|
||||||
returned pointer is the new (or reused) pool head, or NULL upon error.
|
|
||||||
Pools created this way must be destroyed using pool_destroy().
|
|
||||||
|
|
||||||
struct pool_head *create_aligned_pool(char *name, uint size, uint align, uint flags)
|
|
||||||
Create a new pool named <name> for objects of size <size> bytes and
|
|
||||||
aligned to <align> bytes (0 meaning use the platform's default). Pool
|
|
||||||
names are truncated to their first 11 characters. Pools of very similar
|
|
||||||
size will usually be merged if both have set the flag MEM_F_SHARED in
|
|
||||||
<flags>. When DEBUG_DONT_SHARE_POOLS was set at build time, or
|
|
||||||
"-dMno-merge" is passed on the executable's command line, the pools
|
|
||||||
also need to have the exact same name to be merged. In addition, unless
|
|
||||||
MEM_F_EXACT is set in <flags>, the object size will usually be rounded
|
|
||||||
up to the size of pointers (16 or 32 bytes). MEM_F_UAF may be set on a
|
|
||||||
per-pool basis to enable the UAF detection only for this specific pool,
|
|
||||||
saving the massive overhead of global usage. The name that will appear
|
|
||||||
in the pool upon merging is the name of the first created pool. The
|
in the pool upon merging is the name of the first created pool. The
|
||||||
returned pointer is the new (or reused) pool head, or NULL upon error.
|
returned pointer is the new (or reused) pool head, or NULL upon error.
|
||||||
Pools created this way must be destroyed using pool_destroy().
|
Pools created this way must be destroyed using pool_destroy().
|
||||||
@ -482,20 +460,6 @@ complicate maintenance.
|
|||||||
|
|
||||||
A few macros exist to ease the declaration of pools:
|
A few macros exist to ease the declaration of pools:
|
||||||
|
|
||||||
DECLARE_ALIGNED_POOL(ptr, name, size, align)
|
|
||||||
Placed at the top level of a file, this declares a global memory pool
|
|
||||||
as variable <ptr>, name <name> and size <size> bytes per element, all
|
|
||||||
of which will be aligned to <align> bytes. The alignment will be
|
|
||||||
rounded up to the next power of two and will be at least as large as a
|
|
||||||
word on the platform. This is made via a call to REGISTER_ALIGNED_POOL()
|
|
||||||
and by assigning the resulting pointer to variable <ptr>. <ptr> will be
|
|
||||||
created of type "struct pool_head *". If the pool needs to be visible
|
|
||||||
outside of the function (which is likely), it will also need to be
|
|
||||||
declared somewhere as "extern struct pool_head *<ptr>;". It is
|
|
||||||
recommended to place such declarations very early in the source file so
|
|
||||||
that the variable is already known to all subsequent functions which
|
|
||||||
may use it.
|
|
||||||
|
|
||||||
DECLARE_POOL(ptr, name, size)
|
DECLARE_POOL(ptr, name, size)
|
||||||
Placed at the top level of a file, this declares a global memory pool
|
Placed at the top level of a file, this declares a global memory pool
|
||||||
as variable <ptr>, name <name> and size <size> bytes per element. This
|
as variable <ptr>, name <name> and size <size> bytes per element. This
|
||||||
@ -507,17 +471,6 @@ DECLARE_POOL(ptr, name, size)
|
|||||||
declarations very early in the source file so that the variable is
|
declarations very early in the source file so that the variable is
|
||||||
already known to all subsequent functions which may use it.
|
already known to all subsequent functions which may use it.
|
||||||
|
|
||||||
DECLARE_STATIC_ALIGNED_POOL(ptr, name, size, align)
|
|
||||||
Placed at the top level of a file, this declares a global memory pool
|
|
||||||
as variable <ptr>, name <name> and size <size> bytes per element, all
|
|
||||||
of which will be aligned to <align> bytes. The alignment will be
|
|
||||||
rounded up to the next power of two and will be at least as large as a
|
|
||||||
word on the platform. This is made via a call to REGISTER_ALIGNED_POOL()
|
|
||||||
and by assigning the resulting pointer to local variable <ptr>. <ptr>
|
|
||||||
will be created of type "static struct pool_head *". It is recommended
|
|
||||||
to place such declarations very early in the source file so that the
|
|
||||||
variable is already known to all subsequent functions which may use it.
|
|
||||||
|
|
||||||
DECLARE_STATIC_POOL(ptr, name, size)
|
DECLARE_STATIC_POOL(ptr, name, size)
|
||||||
Placed at the top level of a file, this declares a static memory pool
|
Placed at the top level of a file, this declares a static memory pool
|
||||||
as variable <ptr>, name <name> and size <size> bytes per element. This
|
as variable <ptr>, name <name> and size <size> bytes per element. This
|
||||||
@ -527,42 +480,6 @@ DECLARE_STATIC_POOL(ptr, name, size)
|
|||||||
early in the source file so that the variable is already known to all
|
early in the source file so that the variable is already known to all
|
||||||
subsequent functions which may use it.
|
subsequent functions which may use it.
|
||||||
|
|
||||||
DECLARE_STATIC_TYPED_POOL(ptr, name, type[, extra[, align]])
|
|
||||||
Placed at the top level of a file, this declares a global memory pool
|
|
||||||
as variable <ptr>, name <name>, and configured to allocate objects of
|
|
||||||
type <type>. It is optionally possible to grow these objects by <extra>
|
|
||||||
bytes (e.g. if they contain some variable length data at the end), and
|
|
||||||
to force them to be aligned to <align> bytes. If only alignment is
|
|
||||||
desired without extra data, pass 0 as <extra>. Alignment must be at
|
|
||||||
least as large as the type's, and a control is enforced at declaration
|
|
||||||
time so that objects cannot be less aligned than what is promised to
|
|
||||||
the compiler. The default alignment of zero indicates that the default
|
|
||||||
one (from the type) should be used. This is made via a call to
|
|
||||||
REGISTER_ALIGNED_POOL() and by assigning the resulting pointer to local
|
|
||||||
variable <ptr>. <ptr> will be created of type "static struct pool_head
|
|
||||||
*". It is recommended to place such declarations very early in the
|
|
||||||
source file so that the variable is already known to all subsequent
|
|
||||||
functions which may use it.
|
|
||||||
|
|
||||||
DECLARE_TYPED_POOL(ptr, name, type[, extra[, align]])
|
|
||||||
Placed at the top level of a file, this declares a global memory pool
|
|
||||||
as variable <ptr>, name <name>, and configured to allocate objects of
|
|
||||||
type <type>. It is optionally possible to grow these objects by <extra>
|
|
||||||
bytes (e.g. if they contain some variable length data at the end), and
|
|
||||||
to force them to be aligned to <align> bytes. If only alignment is
|
|
||||||
desired without extra data, pass 0 as <extra>. Alignment must be at
|
|
||||||
least as large as the type's, and a control is enforced at declaration
|
|
||||||
time so that objects cannot be less aligned than what is promised to
|
|
||||||
the compiler. The default alignment of zero indicates that the default
|
|
||||||
one (from the type) should be used. This is made via a call to
|
|
||||||
REGISTER_ALIGNED_POOL() and by assigning the resulting pointer to
|
|
||||||
variable <ptr>. <ptr> will be created of type "struct pool_head *". If
|
|
||||||
the pool needs to be visible outside of the function (which is likely),
|
|
||||||
it will also need to be declared somewhere as "extern struct pool_head
|
|
||||||
*<ptr>;". It is recommended to place such declarations very early in
|
|
||||||
the source file so that the variable is already known to all subsequent
|
|
||||||
functions which may use it.
|
|
||||||
|
|
||||||
|
|
||||||
6. Build options
|
6. Build options
|
||||||
----------------
|
----------------
|
||||||
|
|||||||
@ -98,37 +98,19 @@ void task_set_thread(t, id)
|
|||||||
indicate "any thread". It's ignored and replaced by zero when threads
|
indicate "any thread". It's ignored and replaced by zero when threads
|
||||||
are disabled.
|
are disabled.
|
||||||
|
|
||||||
void tasklet_wakeup(tl, [flags])
|
void tasklet_wakeup(tl)
|
||||||
Make sure that tasklet <tl> will wake up, that is, will execute at
|
Make sure that tasklet <tl> will wake up, that is, will execute at
|
||||||
least once. The tasklet will run on its assigned thread, or on any
|
least once. The tasklet will run on its assigned thread, or on any
|
||||||
thread if its TID is negative. An optional <flags> value may be passed
|
thread if its TID is negative.
|
||||||
to set a wakeup cause on the tasklet's flags, typically TASK_WOKEN_* or
|
|
||||||
TASK_F_UEVT*. When not set, 0 is passed (i.e. no flags are changed).
|
|
||||||
|
|
||||||
struct list *tasklet_wakeup_after(head, tl, [flags])
|
void tasklet_wakeup_on(tl, thr)
|
||||||
Schedule tasklet <tl> to run immediately the current one if <head> is
|
|
||||||
NULL, or after the last queued one if <head> is non-null. The new head
|
|
||||||
is returned, to be passed to the next call. The purpose here is to
|
|
||||||
permit instant wakeups of resumed tasklets that still preserve
|
|
||||||
ordering between them. A typical use case is for a mux' I/O handler to
|
|
||||||
instantly wake up a series of urgent streams before continuing with
|
|
||||||
already queued tasklets. This may induce extra latencies for pending
|
|
||||||
jobs and must only be used extremely carefully when it's certain that
|
|
||||||
the processing will benefit from using fresh data from the L1 cache.
|
|
||||||
An optional <flags> value may be passed to set a wakeup cause on the
|
|
||||||
tasklet's flags, typically TASK_WOKEN_* or TASK_F_UEVT*. When not set,
|
|
||||||
0 is passed (i.e. no flags are changed).
|
|
||||||
|
|
||||||
void tasklet_wakeup_on(tl, thr, [flags])
|
|
||||||
Make sure that tasklet <tl> will wake up on thread <thr>, that is, will
|
Make sure that tasklet <tl> will wake up on thread <thr>, that is, will
|
||||||
execute at least once. The designated thread may only differ from the
|
execute at least once. The designated thread may only differ from the
|
||||||
calling one if the tasklet is already configured to run on another
|
calling one if the tasklet is already configured to run on another
|
||||||
thread, and it is not permitted to self-assign a tasklet if its tid is
|
thread, and it is not permitted to self-assign a tasklet if its tid is
|
||||||
negative, as it may already be scheduled to run somewhere else. Just in
|
negative, as it may already be scheduled to run somewhere else. Just in
|
||||||
case, only use tasklet_wakeup() which will pick the tasklet's assigned
|
case, only use tasklet_wakeup() which will pick the tasklet's assigned
|
||||||
thread ID. An optional <flags> value may be passed to set a wakeup
|
thread ID.
|
||||||
cause on the tasklet's flags, typically TASK_WOKEN_* or TASK_F_UEVT*.
|
|
||||||
When not set, 0 is passed (i.e. no flags are changed).
|
|
||||||
|
|
||||||
struct tasklet *tasklet_new()
|
struct tasklet *tasklet_new()
|
||||||
Allocate a new tasklet and set it to run by default on the calling
|
Allocate a new tasklet and set it to run by default on the calling
|
||||||
@ -215,14 +197,6 @@ state field before the call to ->process()
|
|||||||
|
|
||||||
- TASK_WOKEN_OTHER any other application-defined wake-up reason.
|
- TASK_WOKEN_OTHER any other application-defined wake-up reason.
|
||||||
|
|
||||||
- TASK_F_UEVT1 one-shot user-defined event type 1. This is application
|
|
||||||
specific, and reset to 0 when the handler is called.
|
|
||||||
|
|
||||||
- TASK_F_UEVT2 one-shot user-defined event type 2. This is application
|
|
||||||
specific, and reset to 0 when the handler is called.
|
|
||||||
|
|
||||||
- TASK_F_UEVT3 one-shot user-defined event type 3. This is application
|
|
||||||
specific, and reset to 0 when the handler is called.
|
|
||||||
|
|
||||||
In addition, a few persistent flags may be observed or manipulated by the
|
In addition, a few persistent flags may be observed or manipulated by the
|
||||||
application, both for tasks and tasklets:
|
application, both for tasks and tasklets:
|
||||||
|
|||||||
@ -11,7 +11,7 @@ default init, this was controversial but fedora and archlinux already uses it.
|
|||||||
At this time HAProxy still had a multi-process model, and the way haproxy is
|
At this time HAProxy still had a multi-process model, and the way haproxy is
|
||||||
working was incompatible with the daemon mode.
|
working was incompatible with the daemon mode.
|
||||||
|
|
||||||
Systemd is compatible with traditional forking services, but somehow HAProxy
|
Systemd is compatible with traditionnal forking services, but somehow HAProxy
|
||||||
is different. To work correctly, systemd needs a main PID, this is the PID of
|
is different. To work correctly, systemd needs a main PID, this is the PID of
|
||||||
the process that systemd will supervises.
|
the process that systemd will supervises.
|
||||||
|
|
||||||
@ -45,7 +45,7 @@ However the wrapper suffered from several problems:
|
|||||||
|
|
||||||
### mworker V1
|
### mworker V1
|
||||||
|
|
||||||
HAProxy 1.8 got rid of the wrapper which was replaced by the master worker
|
HAProxy 1.8 got ride of the wrapper which was replaced by the master worker
|
||||||
mode. This first version was basically a reintegration of the wrapper features
|
mode. This first version was basically a reintegration of the wrapper features
|
||||||
within HAProxy. HAProxy is launched with the -W flag, read the configuration and
|
within HAProxy. HAProxy is launched with the -W flag, read the configuration and
|
||||||
then fork. In mworker mode, the master is usually launched as a root process,
|
then fork. In mworker mode, the master is usually launched as a root process,
|
||||||
@ -86,7 +86,7 @@ retrieved automatically.
|
|||||||
The master is supervising the workers, when a current worker (not a previous one
|
The master is supervising the workers, when a current worker (not a previous one
|
||||||
from before the reload) is exiting without being asked for a reload, the master
|
from before the reload) is exiting without being asked for a reload, the master
|
||||||
will emit an "exit-on-failure" error and will kill every workers with a SIGTERM
|
will emit an "exit-on-failure" error and will kill every workers with a SIGTERM
|
||||||
and exits with the same error code than the failed worker, this behavior can be
|
and exits with the same error code than the failed master, this behavior can be
|
||||||
changed by using the "no exit-on-failure" option in the global section.
|
changed by using the "no exit-on-failure" option in the global section.
|
||||||
|
|
||||||
While the master is supervising the workers using the wait() function, the
|
While the master is supervising the workers using the wait() function, the
|
||||||
@ -186,8 +186,8 @@ number that can be found in HAPROXY_PROCESSES. With this change the stats socket
|
|||||||
in the configuration is less useful and everything can be done from the master
|
in the configuration is less useful and everything can be done from the master
|
||||||
CLI.
|
CLI.
|
||||||
|
|
||||||
With 2.7, the reload mechanism of the master CLI evolved, with previous versions,
|
With 2.7, the reload mecanism of the master CLI evolved, with previous versions,
|
||||||
this mechanism was asynchronous, so once the `reload` command was received, the
|
this mecanism was asynchronous, so once the `reload` command was received, the
|
||||||
master would reload, the active master CLI connection was closed, and there was
|
master would reload, the active master CLI connection was closed, and there was
|
||||||
no way to return a status as a response to the `reload` command. To achieve a
|
no way to return a status as a response to the `reload` command. To achieve a
|
||||||
synchronous reload, a dedicated sockpair is used, one side uses a master CLI
|
synchronous reload, a dedicated sockpair is used, one side uses a master CLI
|
||||||
@ -208,38 +208,3 @@ starts with -st to achieve a hard stop on the previous worker.
|
|||||||
Version 3.0 got rid of the libsystemd dependencies for sd_notify() after the
|
Version 3.0 got rid of the libsystemd dependencies for sd_notify() after the
|
||||||
events of xz/openssh, the function is now implemented directly in haproxy in
|
events of xz/openssh, the function is now implemented directly in haproxy in
|
||||||
src/systemd.c.
|
src/systemd.c.
|
||||||
|
|
||||||
### mworker V3
|
|
||||||
|
|
||||||
This version was implemented with HAProxy 3.1, the goal was to stop parsing and
|
|
||||||
applying the configuration in the master process.
|
|
||||||
|
|
||||||
One of the caveats of the previous implementation was that the parser could take
|
|
||||||
a lot of time, and the master process would be stuck in the parser instead of
|
|
||||||
handling its polling loop, signals etc. Some parts of the configuration parsing
|
|
||||||
could also be less reliable with third-party code (EXTRA_OBJS), it could, for
|
|
||||||
example, allow opening FDs and not closing them before the reload which
|
|
||||||
would crash the master after a few reloads.
|
|
||||||
|
|
||||||
The startup of the master-worker was reorganized this way:
|
|
||||||
|
|
||||||
- the "discovery" mode, which is a lighter configuration parsing step, only
|
|
||||||
applies the configuration which need to be effective for the master process.
|
|
||||||
For example, "master-worker", "mworker-max-reloads" and less than 20 other
|
|
||||||
keywords that are identified by KWF_DISCOVERY in the code. It is really fast
|
|
||||||
as it don't need all the configuration to be applied in the master process.
|
|
||||||
|
|
||||||
- the master will then fork a worker, with a PROC_O_INIT flag. This worker has
|
|
||||||
a temporary sockpair connected to the master CLI. Once the worker is forked,
|
|
||||||
the master initializes its configuration and starts its polling loop.
|
|
||||||
|
|
||||||
- The newly forked worker will try to parse the configuration, which could
|
|
||||||
result in a failure (exit 1), or any bad error code. In case of success, the
|
|
||||||
worker will send a "READY" message to the master CLI then close this FD. At
|
|
||||||
this step everything was initialized and the worker can enter its polling
|
|
||||||
loop.
|
|
||||||
|
|
||||||
- The master then waits for the worker, it could:
|
|
||||||
* receive the READY message over the mCLI, resulting in a successful loading
|
|
||||||
of haproxy
|
|
||||||
* receive a SIGCHLD, meaning the worker exited and couldn't load
|
|
||||||
|
|||||||
@ -114,7 +114,7 @@ SHUT RDY ACT
|
|||||||
1 1 1 => shut pending
|
1 1 1 => shut pending
|
||||||
|
|
||||||
PB: we can land into final shut if one thread disables the FD while another
|
PB: we can land into final shut if one thread disables the FD while another
|
||||||
one that was waiting on it reports it as shut. Theoretically it should be
|
one that was waiting on it reports it as shut. Theorically it should be
|
||||||
implicitly ready though, since reported. But if no data is reported, it
|
implicitly ready though, since reported. But if no data is reported, it
|
||||||
will be reportedly shut only. And no event will be reported then. This
|
will be reportedly shut only. And no event will be reported then. This
|
||||||
might still make sense since it's not active, thus we don't want events.
|
might still make sense since it's not active, thus we don't want events.
|
||||||
|
|||||||
@ -1,53 +0,0 @@
|
|||||||
2025/09/16 - SHM stats file storage description and hints
|
|
||||||
|
|
||||||
Shm stats file (used to share thread-groupable statistics over multiple
|
|
||||||
process through the "shm-stats-file" directive) is made of:
|
|
||||||
|
|
||||||
- a main header which describes the file version, the processes making
|
|
||||||
use of it, the common clock source and hints about the number of
|
|
||||||
objects that are currently stored or provisionned in the file.
|
|
||||||
- an indefinite number of "objects" blocks coming right after the
|
|
||||||
main header, all blocks have the same size which is the size of the
|
|
||||||
maximum underlying object that may be stored. The main header tells
|
|
||||||
how many objects are stored in the file.
|
|
||||||
|
|
||||||
File header looks like this (32/64 bits systems):
|
|
||||||
|
|
||||||
0 8 16 32 48 64
|
|
||||||
+-------+---------+----------------+-------------------+-------------------+
|
|
||||||
| VERSION | 2 bytes | global_now_ms (global mono date in ms)|
|
|
||||||
|MAJOR | MINOR | hole | |
|
|
||||||
+----------------------------------+---------------------------------------+
|
|
||||||
| global_now_ns (global mono date in ns) |
|
|
||||||
+--------------------------------------------------------------------------+
|
|
||||||
| now_offset (offset applied to global monotonic date |
|
|
||||||
| on startup) |
|
|
||||||
+--------------------------------------------------------------------------+
|
|
||||||
| Process slot : | 1byte x 64
|
|
||||||
| pid | heartbeat (ticks) |
|
|
||||||
+----------------------------------+---------------------------------------+
|
|
||||||
| objects | objects slots |
|
|
||||||
| (used objects) | (available for use) |
|
|
||||||
+----------------------------------+---------------------------------------+
|
|
||||||
| padding (for future use) | 128 bytes
|
|
||||||
+--------------------------------------------------------------------------+
|
|
||||||
|
|
||||||
Object block looks like this:
|
|
||||||
|
|
||||||
0 8 16 32 48 64
|
|
||||||
+-------+---------+----------------+-------------------+-------------------+
|
|
||||||
| GUID | 128 bytes
|
|
||||||
+ (zero terminated) +
|
|
||||||
| |
|
|
||||||
+-------+---------+--------------------------------------------------------+
|
|
||||||
| tgid | type | padding |
|
|
||||||
+-------+---------+--------------------------------------------------------+
|
|
||||||
| users (bitmask of process slots making use of the obj) |
|
|
||||||
+--------------------------------------------------------------------------+
|
|
||||||
| object data |
|
|
||||||
| (version dependent) |
|
|
||||||
| struct be_counters_shared_tg or |
|
|
||||||
| struct fe_counters_shared_tg |
|
|
||||||
+--------------------------------------------------------------------------+
|
|
||||||
| padding (to anticipate evolutions) | 64 bytes
|
|
||||||
+--------------------------------------------------------------------------+
|
|
||||||
@ -1,50 +0,0 @@
|
|||||||
2026-03-12 - thread execution context
|
|
||||||
|
|
||||||
Thread execution context (thread_exec_ctx) is a combination of type and pointer
|
|
||||||
that are set in the current running thread at th_ctx->exec_ctx when entering
|
|
||||||
certain processing (tasks, sample fetch functions, actions, CLI keywords etc).
|
|
||||||
They're refined along execution, so that a task such as process_stream could
|
|
||||||
temporarily switch to a converter while evaluating an expression and switch
|
|
||||||
back to process_stream. They are reported in thread dumps and are mixed with
|
|
||||||
caller locations for memory profiling. As such they are intentionally not too
|
|
||||||
precise in order to avoid an explosion of the number of buckets. At the moment,
|
|
||||||
the level of granularity it provides is sufficient to try to narrow a
|
|
||||||
misbehaving origin down to a list of keywords. The context types can currently
|
|
||||||
be:
|
|
||||||
|
|
||||||
- something registered via an initcall, with the initcall's location
|
|
||||||
- something registered via an ha_caller, with the caller's location
|
|
||||||
- an explicit sample fetch / converter / action / CLI keyword list
|
|
||||||
- an explicit function (mainly used for actions without keywords)
|
|
||||||
- a task / tasklet (no distinction is made), using the ->process pointer
|
|
||||||
- a filter (e.g. compression), via flt_conf, reporting name
|
|
||||||
- a mux (via the mux_ops, reporting the name)
|
|
||||||
- an applet (e.g. cache, stats, CLI)
|
|
||||||
|
|
||||||
A macro EXEC_CTX_MAKE(type, pointer) makes a thread_exec_ctx from such
|
|
||||||
values.
|
|
||||||
|
|
||||||
A macro EXEC_CTX_NO_RET(ctx, statement) calls a void statement under the
|
|
||||||
specified context.
|
|
||||||
|
|
||||||
A macro EXEC_CTX_WITH_RET(ctx, expr) calls an expression under the specified
|
|
||||||
context.
|
|
||||||
|
|
||||||
Most locations were modified to directly use these macros on the fly, by
|
|
||||||
retrieving the context from where it was set on the element being evaluated
|
|
||||||
(e.g. an action rule contains the context inherited by the action keyword
|
|
||||||
that was used to create it).
|
|
||||||
|
|
||||||
In tools.c, chunk_append_thread_ctx() tries to decode the given exec_ctx and
|
|
||||||
appends it into the provided buffer. It's used by ha_thread_dump_one() and
|
|
||||||
cli_io_handler_show_activity() for memory profiling. In this latter case,
|
|
||||||
the detected thread_ctx are reported in the output under brackets prefixed
|
|
||||||
with "[via ...]" to distinguish call paths to the same allocators.
|
|
||||||
|
|
||||||
A good way to test if a context is properly reported is to place a bleeding
|
|
||||||
malloc() call into one of the monitored functions, e.g.:
|
|
||||||
|
|
||||||
DISGUISE(malloc(8));
|
|
||||||
|
|
||||||
and issue "show profiling memory" after stressing the function. Its context
|
|
||||||
must appear on the right with the number of calls.
|
|
||||||
@ -1,144 +0,0 @@
|
|||||||
2025-02-13 - Details of the watchdog's internals
|
|
||||||
------------------------------------------------
|
|
||||||
|
|
||||||
1. The watchdog timer
|
|
||||||
---------------------
|
|
||||||
|
|
||||||
The watchdog sets up a timer that triggers every 1 to 1000ms. This is pre-
|
|
||||||
initialized by init_wdt() which positions wdt_handler() as the signal handler
|
|
||||||
of signal WDTSIG (SIGALRM).
|
|
||||||
|
|
||||||
But this is not sufficient, an alarm actually has to be set. This is done for
|
|
||||||
each thread by init_wdt_per_thread() which calls clock_setup_signal_timer()
|
|
||||||
which in turn enables a ticking timer for the current thread, that delivers
|
|
||||||
the WDTSIG signal (SIGALRM) to the process. Since there's no notion of thread
|
|
||||||
at this point, there are as many timers as there are threads, and each signal
|
|
||||||
comes with an integer value which in fact contains the thread number as passed
|
|
||||||
to clock_setup_signal_timer() during initialization.
|
|
||||||
|
|
||||||
The timer preferably uses CLOCK_THREAD_CPUTIME_ID if available, otherwise
|
|
||||||
falls back to CLOCK_REALTIME. The former is more accurate as it really counts
|
|
||||||
the time spent in the process, while the latter might also account for time
|
|
||||||
stuck on paging in etc.
|
|
||||||
|
|
||||||
Then wdt_ping() is called to arm the timer. It's set to trigger every
|
|
||||||
<wdt_warn_blocked_traffic_ns> interval. It is also called by wdt_handler()
|
|
||||||
to reprogram a new wakeup after it has ticked.
|
|
||||||
|
|
||||||
When wdt_handler() is called, it reads the thread number in si_value.sival_int,
|
|
||||||
as positioned during initialization. Most of the time the signal lands on the
|
|
||||||
wrong thread (typically thread 1 regardless of the reported thread). From this
|
|
||||||
point, the function retrieves the various info related to that thread's recent
|
|
||||||
activity (its current time and flags), ignores corner cases such as if that
|
|
||||||
thread is already dumping another one, being dumped, in the poller, has quit,
|
|
||||||
etc.
|
|
||||||
|
|
||||||
If the thread was not marked as stuck, it's verified that no progress was made
|
|
||||||
for at least one second, in which case the TH_FL_STUCK flag is set. The lack of
|
|
||||||
progress is measured by the distance between the thread's current cpu_time and
|
|
||||||
its prev_cpu_time. If the lack of progress is at least as large as the warning
|
|
||||||
threshold, then the signal is bounced to the faulty thread if it's not the
|
|
||||||
current one. Since this bounce is based on the time spent without update, it
|
|
||||||
already doesn't happen often.
|
|
||||||
|
|
||||||
Once on the faulty thread, two checks are performed:
|
|
||||||
1) if the thread was already marked as stuck, then the thread is considered
|
|
||||||
as definitely stuck, and ha_panic() is called. It will not return.
|
|
||||||
|
|
||||||
2) a check is made to verify if the scheduler is still ticking, by reading
|
|
||||||
and setting a variable that only the scheduler can clear when leaving a
|
|
||||||
task. If the scheduler didn't make any progress, ha_stuck_warning() is
|
|
||||||
called to emit a warning about that thread.
|
|
||||||
|
|
||||||
Most of the time there's no panic of course, and a wdt_ping() is performed
|
|
||||||
before leaving the handler to reprogram a check for that thread.
|
|
||||||
|
|
||||||
2. The debug handler
|
|
||||||
--------------------
|
|
||||||
|
|
||||||
Both ha_panic() and ha_stuck_warning() are quite similar. In both cases, they
|
|
||||||
will first verify that no panic is in progress and just return if so. This is
|
|
||||||
verified using mark_tained() which atomically sets a tainted bit and returns
|
|
||||||
the previous value. ha_panic() sets TAINTED_PANIC while ha_stuck_warning() will
|
|
||||||
set TAINTED_WARN_BLOCKED_TRAFFIC.
|
|
||||||
|
|
||||||
ha_panic() uses the current thread's trash buffer to produce the messages, as
|
|
||||||
we don't care about its contents since that thread will never return. However
|
|
||||||
ha_stuck_warning() instead uses a local 8kB buffer in the thread's stack.
|
|
||||||
ha_panic() will call ha_thread_dump_fill() for each thread, to complete the
|
|
||||||
buffer being filled with each thread's dump messages. ha_stuck_warning() only
|
|
||||||
calls ha_thread_dump_one(), which works on the current thread. In both cases
|
|
||||||
the message is then directly sent to fd #2 (stderr) and ha_thread_dump_done()
|
|
||||||
is called to release the dumped thread.
|
|
||||||
|
|
||||||
Both print a few extra messages, but ha_panic() just ends by looping on abort()
|
|
||||||
until the process dies.
|
|
||||||
|
|
||||||
ha_thread_dump_fill() uses a locking mechanism to make sure that each thread is
|
|
||||||
only dumped once at a time. For this it atomically sets is thread_dump_buffer
|
|
||||||
to point to the target buffer. The thread_dump_buffer has 4 possible values:
|
|
||||||
- NULL: no dump in progress
|
|
||||||
- a valid, even, pointer: this is the pointer to the buffer that's currently
|
|
||||||
in the process of being filled by the thread
|
|
||||||
- a valid pointer + 1: this is the pointer of the now filled buffer, that the
|
|
||||||
caller can consume. The atomic |1 at the end marks the end of the dump.
|
|
||||||
- 0x2: this indicates to the dumping function that it is responsible for
|
|
||||||
assigning its own buffer itself (used by the debug_handler to pick one of
|
|
||||||
its own trash buffers during a panic). The idea here is that each thread
|
|
||||||
will keep their own copy of their own dump so that it can be later found in
|
|
||||||
the core file for inspection.
|
|
||||||
|
|
||||||
A copy of the last valid thread_dump_buffer used is kept in last_dump_buffer,
|
|
||||||
for easier post-mortem analysis. This one may be NULL or even invalid, but
|
|
||||||
usually during a panic it will be valid, and may reveal useful hints even if it
|
|
||||||
still contains the dump of the last warning. Usually this will point to a trash
|
|
||||||
buffer or to stack area.
|
|
||||||
|
|
||||||
ha_thread_dump_fill() then either directly calls ha_thread_dump_one() if the
|
|
||||||
target thread is the current thread, or sends the target thread DEBUGSIG
|
|
||||||
(SIGURG) if it's a different thread. This signal is initialized at boot time
|
|
||||||
by init_debug() to call handler debug_handler().
|
|
||||||
|
|
||||||
debug_handler() then operates on the target thread and recognizes that it must
|
|
||||||
allocate its own buffer if the pointer is 0x2, calls ha_thread_dump_one(), then
|
|
||||||
waits forever (it does not return from the signal handler so as to make sure
|
|
||||||
the dumped thread will not badly interact with other ones).
|
|
||||||
|
|
||||||
ha_thread_dump_one() collects some info, that it prints all along into the
|
|
||||||
target buffer. Depending on the situation, it will dump current tasks or not,
|
|
||||||
may mark that Lua is involved and TAINTED_LUA_STUCK, and if running in shared
|
|
||||||
mode, also taint the process with TAINTED_LUA_STUCK_SHARED. It calls
|
|
||||||
ha_dump_backtrace() before returning.
|
|
||||||
|
|
||||||
ha_dump_backtrace() produces a backtrace into a local buffer (100 entries max),
|
|
||||||
then dumps the code bytes nearby the crashing instrution, dumps pointers and
|
|
||||||
tries to resolve function names, and sends all of that into the target buffer.
|
|
||||||
On some architectures (x86_64, arm64), it will also try to detect and decode
|
|
||||||
call instructions and resolve them to called functions.
|
|
||||||
|
|
||||||
3. Improvements
|
|
||||||
---------------
|
|
||||||
|
|
||||||
The symbols resolution is extremely expensive, particularly for the warnings
|
|
||||||
which should be fast. But we need it, it's just unfortunate that it strikes at
|
|
||||||
the wrong moment. At least ha_dump_backtrace() does disable signals while it's
|
|
||||||
resolving, in order to avoid unwanted re-entrance. In addition, the called
|
|
||||||
function resolve_sym_name() uses some locking and refrains from calling the
|
|
||||||
dladdr family of functions in a re-entrant way (in the worst case only well
|
|
||||||
known symbols will be resolved)..
|
|
||||||
|
|
||||||
In an ideal case, ha_dump_backtrace() would dump the pointers to a local array,
|
|
||||||
which would then later be resolved asynchronously in a tasklet. This can work
|
|
||||||
because the code bytes will not change either so the dump can be done at once
|
|
||||||
there.
|
|
||||||
|
|
||||||
However the tasks dumps are not much compatible with this. For example
|
|
||||||
ha_task_dump() makes a number of tests and itself will call hlua_traceback() if
|
|
||||||
needed, so it might still need to be dumped in real time synchronously and
|
|
||||||
buffered. But then it's difficult to reassemble chunks of text between the
|
|
||||||
backtrace (that needs to be resolved later) and the tasks/lua parts. Or maybe
|
|
||||||
we can afford to disable Lua trace dumps in warnings and keep them only for
|
|
||||||
panics (where the asynchronous resolution is not needed) ?
|
|
||||||
|
|
||||||
Also differentiating the call paths for warnings and panics is not something
|
|
||||||
easy either.
|
|
||||||
@ -1,7 +1,7 @@
|
|||||||
-----------------------
|
-----------------------
|
||||||
HAProxy Starter Guide
|
HAProxy Starter Guide
|
||||||
-----------------------
|
-----------------------
|
||||||
version 3.4
|
version 3.1
|
||||||
|
|
||||||
|
|
||||||
This document is an introduction to HAProxy for all those who don't know it, as
|
This document is an introduction to HAProxy for all those who don't know it, as
|
||||||
@ -1693,7 +1693,7 @@ A small team of trusted developers will receive it and will be able to propose
|
|||||||
a fix. We usually don't use embargoes and once a fix is available it gets
|
a fix. We usually don't use embargoes and once a fix is available it gets
|
||||||
merged. In some rare circumstances it can happen that a release is coordinated
|
merged. In some rare circumstances it can happen that a release is coordinated
|
||||||
with software vendors. Please note that this process usually messes up with
|
with software vendors. Please note that this process usually messes up with
|
||||||
everyone's work, and that rushed up releases can sometimes introduce new bugs,
|
eveyone's work, and that rushed up releases can sometimes introduce new bugs,
|
||||||
so it's best avoided unless strictly necessary; as such, there is often little
|
so it's best avoided unless strictly necessary; as such, there is often little
|
||||||
consideration for reports that needlessly cause such extra burden, and the best
|
consideration for reports that needlessly cause such extra burden, and the best
|
||||||
way to see your work credited usually is to provide a working fix, which will
|
way to see your work credited usually is to provide a working fix, which will
|
||||||
|
|||||||
@ -348,20 +348,8 @@ Core class
|
|||||||
end
|
end
|
||||||
..
|
..
|
||||||
|
|
||||||
.. js:function:: core.get_patref(name)
|
|
||||||
|
|
||||||
**context**: init, task, action, sample-fetch, converter
|
|
||||||
|
|
||||||
Find the pattern object *name* used by HAProxy. It corresponds to the
|
|
||||||
generic pattern reference used to handle both ACL ands Maps.
|
|
||||||
|
|
||||||
:param string name: reference name
|
|
||||||
:returns: A :ref:`patref_class` object.
|
|
||||||
|
|
||||||
.. js:function:: core.add_acl(name, key)
|
.. js:function:: core.add_acl(name, key)
|
||||||
|
|
||||||
**LEGACY**
|
|
||||||
|
|
||||||
**context**: init, task, action, sample-fetch, converter
|
**context**: init, task, action, sample-fetch, converter
|
||||||
|
|
||||||
Add the ACL *key* in the ACLs list referenced by *name*.
|
Add the ACL *key* in the ACLs list referenced by *name*.
|
||||||
@ -369,14 +357,8 @@ Core class
|
|||||||
:param string name: the name that reference the ACL entries.
|
:param string name: the name that reference the ACL entries.
|
||||||
:param string key: the key which will be added.
|
:param string key: the key which will be added.
|
||||||
|
|
||||||
.. Note::
|
|
||||||
This function is not optimal due to systematic Map reference lookup.
|
|
||||||
It is recommended to use :js:func:`Patref.add()` instead.
|
|
||||||
|
|
||||||
.. js:function:: core.del_acl(name, key)
|
.. js:function:: core.del_acl(name, key)
|
||||||
|
|
||||||
**LEGACY**
|
|
||||||
|
|
||||||
**context**: init, task, action, sample-fetch, converter
|
**context**: init, task, action, sample-fetch, converter
|
||||||
|
|
||||||
Delete the ACL entry referenced by the key *key* in the list of ACLs
|
Delete the ACL entry referenced by the key *key* in the list of ACLs
|
||||||
@ -385,14 +367,8 @@ Core class
|
|||||||
:param string name: the name that reference the ACL entries.
|
:param string name: the name that reference the ACL entries.
|
||||||
:param string key: the key which will be deleted.
|
:param string key: the key which will be deleted.
|
||||||
|
|
||||||
.. Note::
|
|
||||||
This function is not optimal due to systematic Map reference lookup.
|
|
||||||
It is recommended to use :js:func:`Patref.del()` instead.
|
|
||||||
|
|
||||||
.. js:function:: core.del_map(name, key)
|
.. js:function:: core.del_map(name, key)
|
||||||
|
|
||||||
**LEGACY**
|
|
||||||
|
|
||||||
**context**: init, task, action, sample-fetch, converter
|
**context**: init, task, action, sample-fetch, converter
|
||||||
|
|
||||||
Delete the map entry indexed with the specified key in the list of maps
|
Delete the map entry indexed with the specified key in the list of maps
|
||||||
@ -401,10 +377,6 @@ Core class
|
|||||||
:param string name: the name that reference the map entries.
|
:param string name: the name that reference the map entries.
|
||||||
:param string key: the key which will be deleted.
|
:param string key: the key which will be deleted.
|
||||||
|
|
||||||
.. Note::
|
|
||||||
This function is not optimal due to systematic Map reference lookup.
|
|
||||||
It is recommended to use :js:func:`Patref.del()` instead.
|
|
||||||
|
|
||||||
.. js:function:: core.get_info()
|
.. js:function:: core.get_info()
|
||||||
|
|
||||||
**context**: body, init, task, action, sample-fetch, converter
|
**context**: body, init, task, action, sample-fetch, converter
|
||||||
@ -513,7 +485,7 @@ Core class
|
|||||||
|
|
||||||
.. js:function:: core.msleep(milliseconds)
|
.. js:function:: core.msleep(milliseconds)
|
||||||
|
|
||||||
**context**: task, action
|
**context**: body, init, task, action
|
||||||
|
|
||||||
The `core.msleep()` stops the Lua execution between specified milliseconds.
|
The `core.msleep()` stops the Lua execution between specified milliseconds.
|
||||||
|
|
||||||
@ -858,8 +830,6 @@ Core class
|
|||||||
|
|
||||||
.. js:function:: core.set_map(name, key, value)
|
.. js:function:: core.set_map(name, key, value)
|
||||||
|
|
||||||
**LEGACY**
|
|
||||||
|
|
||||||
**context**: init, task, action, sample-fetch, converter
|
**context**: init, task, action, sample-fetch, converter
|
||||||
|
|
||||||
Set the value *value* associated to the key *key* in the map referenced by
|
Set the value *value* associated to the key *key* in the map referenced by
|
||||||
@ -869,13 +839,9 @@ Core class
|
|||||||
:param string key: the key to set or replace
|
:param string key: the key to set or replace
|
||||||
:param string value: the associated value
|
:param string value: the associated value
|
||||||
|
|
||||||
.. Note::
|
|
||||||
This function is not optimal due to systematic Map reference lookup.
|
|
||||||
It is recommended to use :js:func:`Patref.set()` instead.
|
|
||||||
|
|
||||||
.. js:function:: core.sleep(int seconds)
|
.. js:function:: core.sleep(int seconds)
|
||||||
|
|
||||||
**context**: task, action
|
**context**: body, init, task, action
|
||||||
|
|
||||||
The `core.sleep()` functions stop the Lua execution between specified seconds.
|
The `core.sleep()` functions stop the Lua execution between specified seconds.
|
||||||
|
|
||||||
@ -893,9 +859,7 @@ Core class
|
|||||||
|
|
||||||
**context**: init, task, action
|
**context**: init, task, action
|
||||||
|
|
||||||
This function returns a new object of a *httpclient* class. An *httpclient*
|
This function returns a new object of a *httpclient* class.
|
||||||
object must be used to process one and only one request. It must never be
|
|
||||||
reused to process several requests.
|
|
||||||
|
|
||||||
:returns: A :ref:`httpclient_class` object.
|
:returns: A :ref:`httpclient_class` object.
|
||||||
|
|
||||||
@ -928,25 +892,12 @@ Core class
|
|||||||
its work and wants to give back the control to HAProxy without executing the
|
its work and wants to give back the control to HAProxy without executing the
|
||||||
remaining code. It can be seen as a multi-level "return".
|
remaining code. It can be seen as a multi-level "return".
|
||||||
|
|
||||||
.. js:function:: core.wait([milliseconds])
|
|
||||||
|
|
||||||
**context**: task, action
|
|
||||||
|
|
||||||
Give back the hand at the HAProxy scheduler. Unlike :js:func:`core.yield`
|
|
||||||
the task will not be woken up automatically to resume as fast as possible.
|
|
||||||
Instead, it will wait for an event to wake the task. If milliseconds argument
|
|
||||||
is provided then the Lua execution will be automatically resumed passed this
|
|
||||||
delay even if no event caused the task to wake itself up.
|
|
||||||
|
|
||||||
:param integer milliseconds: automatic wakeup passed this delay. (optional)
|
|
||||||
|
|
||||||
.. js:function:: core.yield()
|
.. js:function:: core.yield()
|
||||||
|
|
||||||
**context**: task, action
|
**context**: task, action, sample-fetch, converter
|
||||||
|
|
||||||
Give back the hand at the HAProxy scheduler. It is used when the LUA
|
Give back the hand at the HAProxy scheduler. It is used when the LUA
|
||||||
processing consumes a lot of processing time. Lua execution will be resumed
|
processing consumes a lot of processing time.
|
||||||
automatically (automatic reschedule).
|
|
||||||
|
|
||||||
.. js:function:: core.parse_addr(address)
|
.. js:function:: core.parse_addr(address)
|
||||||
|
|
||||||
@ -1089,13 +1040,18 @@ Core class
|
|||||||
perform the heavy job in a dedicated task and allow remaining events to be
|
perform the heavy job in a dedicated task and allow remaining events to be
|
||||||
processed more quickly.
|
processed more quickly.
|
||||||
|
|
||||||
.. js:function:: core.use_native_mailers_config()
|
.. js:function:: core.disable_legacy_mailers()
|
||||||
|
|
||||||
**context**: body
|
**LEGACY**
|
||||||
|
|
||||||
Inform haproxy that the script will make use of the native "mailers"
|
**context**: body, init
|
||||||
config section (although legacy). In other words, inform haproxy that
|
|
||||||
:js:func:`Proxy.get_mailers()` will be used later in the program.
|
Disable the sending of email alerts through the legacy email sending
|
||||||
|
function when mailers are used in the configuration.
|
||||||
|
|
||||||
|
Use this when sending email alerts directly from lua.
|
||||||
|
|
||||||
|
:see: :js:func:`Proxy.get_mailers()`
|
||||||
|
|
||||||
.. _proxy_class:
|
.. _proxy_class:
|
||||||
|
|
||||||
@ -1224,14 +1180,8 @@ Proxy class
|
|||||||
|
|
||||||
**LEGACY**
|
**LEGACY**
|
||||||
|
|
||||||
Returns a table containing legacy mailers config (from haproxy configuration
|
Returns a table containing mailers config for the current proxy or nil
|
||||||
file) for the current proxy or nil if mailers are not available for the proxy.
|
if mailers are not available for the proxy.
|
||||||
|
|
||||||
.. warning::
|
|
||||||
When relying on :js:func:`Proxy.get_mailers()` to retrieve mailers
|
|
||||||
configuration, :js:func:`core.use_native_mailers_config()` must be called
|
|
||||||
first from body or init context to inform haproxy that Lua makes use of the
|
|
||||||
legacy mailers config.
|
|
||||||
|
|
||||||
:param class_proxy px: A :ref:`proxy_class` which indicates the manipulated
|
:param class_proxy px: A :ref:`proxy_class` which indicates the manipulated
|
||||||
proxy.
|
proxy.
|
||||||
@ -1248,6 +1198,10 @@ ProxyMailers class
|
|||||||
|
|
||||||
This class provides mailers config for a given proxy.
|
This class provides mailers config for a given proxy.
|
||||||
|
|
||||||
|
If sending emails directly from lua, please consider
|
||||||
|
:js:func:`core.disable_legacy_mailers()` to disable the email sending from
|
||||||
|
haproxy. (Or email alerts will be sent twice...)
|
||||||
|
|
||||||
.. js:attribute:: ProxyMailers.track_server_health
|
.. js:attribute:: ProxyMailers.track_server_health
|
||||||
|
|
||||||
Boolean set to true if the option "log-health-checks" is configured on
|
Boolean set to true if the option "log-health-checks" is configured on
|
||||||
@ -1881,17 +1835,6 @@ Queue class
|
|||||||
|
|
||||||
Use :js:func:`core.queue` to get a new Queue object.
|
Use :js:func:`core.queue` to get a new Queue object.
|
||||||
|
|
||||||
.. js:function:: Queue.alarm()
|
|
||||||
|
|
||||||
**context**: task, action, service
|
|
||||||
|
|
||||||
Sets a wakeup alarm on the current Lua context so that when new data
|
|
||||||
becomes available on the Queue, the current Lua context is woken up
|
|
||||||
automatically. It can be combined with :js:func:`core.wait` to wait
|
|
||||||
for Queue events.
|
|
||||||
|
|
||||||
:param class_queue queue: A :ref:`queue_class` to the current queue
|
|
||||||
|
|
||||||
.. js:function:: Queue.size(queue)
|
.. js:function:: Queue.size(queue)
|
||||||
|
|
||||||
This function returns the number of items within the Queue.
|
This function returns the number of items within the Queue.
|
||||||
@ -2580,9 +2523,7 @@ HTTPClient class
|
|||||||
.. js:class:: HTTPClient
|
.. js:class:: HTTPClient
|
||||||
|
|
||||||
The httpclient class allows issue of outbound HTTP requests through a simple
|
The httpclient class allows issue of outbound HTTP requests through a simple
|
||||||
API without the knowledge of HAProxy internals. Any instance must be used to
|
API without the knowledge of HAProxy internals.
|
||||||
process one and only one request. It must never be reused to process several
|
|
||||||
requests.
|
|
||||||
|
|
||||||
.. js:function:: HTTPClient.get(httpclient, request)
|
.. js:function:: HTTPClient.get(httpclient, request)
|
||||||
.. js:function:: HTTPClient.head(httpclient, request)
|
.. js:function:: HTTPClient.head(httpclient, request)
|
||||||
@ -3471,178 +3412,6 @@ Map class
|
|||||||
:param string str: Is the string used as key.
|
:param string str: Is the string used as key.
|
||||||
:returns: a string containing the result or empty string if no match.
|
:returns: a string containing the result or empty string if no match.
|
||||||
|
|
||||||
.. _patref_class:
|
|
||||||
|
|
||||||
Patref class
|
|
||||||
=================
|
|
||||||
|
|
||||||
.. js:class:: Patref
|
|
||||||
|
|
||||||
Patref object corresponds to the internal HAProxy pat_ref element which
|
|
||||||
is used to store ACL and MAP elements. It is identified by its name
|
|
||||||
(reference) which often is a filename, unless it is prefixed by 'virt@'
|
|
||||||
for virtual references or 'opt@' for references that don't necessarily
|
|
||||||
point to real file. From Lua, :ref:`patref_class` object may be used to
|
|
||||||
directly manipulate existing pattern reference storage. For convenience,
|
|
||||||
Patref objects may be directly accessed and listed as a table thanks to
|
|
||||||
index and pairs metamethods. Note however that for the index metamethod,
|
|
||||||
in case of duplicated entries, only the first matching entry is returned.
|
|
||||||
|
|
||||||
.. Warning::
|
|
||||||
Not meant to be shared between multiple contexts. If multiple contexts
|
|
||||||
need to work on the same pattern reference, each context should have
|
|
||||||
its own patref object.
|
|
||||||
|
|
||||||
Patref object is obtained using the :js:func:`core.get_patref()`
|
|
||||||
function
|
|
||||||
|
|
||||||
.. js:function:: Patref.get_name(ref)
|
|
||||||
|
|
||||||
:returns: the name of the pattern reference object.
|
|
||||||
|
|
||||||
.. js:function:: Patref.is_map(ref)
|
|
||||||
|
|
||||||
:returns: true if the pattern reference is used to handle maps instead
|
|
||||||
of acl, false otherwise.
|
|
||||||
|
|
||||||
.. js:function:: Patref.purge(ref)
|
|
||||||
|
|
||||||
Completely prune all pattern reference entries pointed to by Patref object.
|
|
||||||
This special operation doesn't require committing.
|
|
||||||
|
|
||||||
.. js:function:: Patref.prepare(ref)
|
|
||||||
|
|
||||||
Create a new empty version for Patref Object. It can be used to manipulate
|
|
||||||
the Patref object with update methods without applying the updates until the
|
|
||||||
commit() method is called.
|
|
||||||
|
|
||||||
.. js:function:: Patref.commit(ref)
|
|
||||||
|
|
||||||
Tries to commit pending Patref object updates, that is updates made to the
|
|
||||||
local object will be committed to the underlying pattern reference storage
|
|
||||||
in an atomic manner upon success. Upon failure, local pending updates are
|
|
||||||
lost. Upon success, all other pending updates on the pattern reference
|
|
||||||
(e.g.: "prepare" from the cli or from other Patref Lua objects) started
|
|
||||||
before the new one will be pruned.
|
|
||||||
|
|
||||||
:returns: true on success and nil on failure (followed by an error message).
|
|
||||||
|
|
||||||
See :js:func:`Patref.prepare()` and :js:func:`Patref.giveup()`
|
|
||||||
|
|
||||||
.. js:function:: Patref.giveup(ref)
|
|
||||||
|
|
||||||
Drop the pending patref version created using Patref:prepare(): get back to
|
|
||||||
live dataset.
|
|
||||||
|
|
||||||
.. js:function:: Patref.add(ref, key[, value])
|
|
||||||
|
|
||||||
Add a new key to the pattern reference, with associated value for maps.
|
|
||||||
|
|
||||||
:param string key: the string used as a key
|
|
||||||
:param string value: the string used as value to be associated with the key
|
|
||||||
(only relevant for maps)
|
|
||||||
:returns: true on success and nil on failure (followed by an error message).
|
|
||||||
|
|
||||||
.. Note::
|
|
||||||
Affects the live pattern reference version, unless :js:func:`Patref.prepare()`
|
|
||||||
was called and is still ongoing (waiting for commit or giveup)
|
|
||||||
|
|
||||||
.. js:function:: patref.add_bulk(ref, table)
|
|
||||||
|
|
||||||
Adds multiple entries at once to the Pattern reference. It is recommended
|
|
||||||
to use this one over :js:func:`Patref.prepare()` to add a lot of entries
|
|
||||||
at once because this one is more efficient.
|
|
||||||
|
|
||||||
:param table table: For ACL, a table of keys strings: t[0] = "key1",
|
|
||||||
t[1] = "key2"...
|
|
||||||
|
|
||||||
For Maps, a table of key:value string pairs: t["key"] = "value"
|
|
||||||
:returns: true on success and nil on failure (followed by an error message).
|
|
||||||
|
|
||||||
.. Note::
|
|
||||||
Affects the live pattern reference version, unless :js:func:`Patref.prepare()`
|
|
||||||
was called and is still pending (waiting for commit or giveup)
|
|
||||||
|
|
||||||
.. js:function:: Patref.del(ref, key)
|
|
||||||
|
|
||||||
Delete all entries matching the input key in the pattern reference. In
|
|
||||||
case of duplicate keys, all keys are removed.
|
|
||||||
|
|
||||||
:param string key: the string used as a key
|
|
||||||
:returns: true on success and false on failure.
|
|
||||||
|
|
||||||
.. Note::
|
|
||||||
Affects the live pattern reference version, unless :js:func:`Patref.prepare()`
|
|
||||||
was called and is still ongoing (waiting for commit or giveup)
|
|
||||||
|
|
||||||
.. js:function:: Patref.set(ref, key, value[, force])
|
|
||||||
|
|
||||||
Only relevant for maps. Set existing entries matching key to the provided
|
|
||||||
value. In case of duplicate keys, all matching keys will be set to the new
|
|
||||||
value.
|
|
||||||
|
|
||||||
:param string key: the string used as a key
|
|
||||||
:param string value: the string used as value
|
|
||||||
:param boolean force: create the entry if it doesn't exist (optional,
|
|
||||||
defaults to false)
|
|
||||||
:returns: true on success and nil on failure (followed by an error message)
|
|
||||||
|
|
||||||
.. Note::
|
|
||||||
Affects the live pattern reference version, unless :js:func:`Patref.prepare()`
|
|
||||||
was called and is still ongoing (waiting for commit or giveup)
|
|
||||||
|
|
||||||
.. js:function:: Patref.event_sub(ref, event_types, func)
|
|
||||||
|
|
||||||
Register a function that will be called on specific PAT_REF events.
|
|
||||||
See :js:func:`core.event_sub()` for generalities. Please note however that
|
|
||||||
for performance reasons pattern reference events can only be subscribed
|
|
||||||
per pattern reference (not globally). What this means is that the provided
|
|
||||||
callback function will only be called for events affecting the pattern
|
|
||||||
reference pointed by the Patref object (ref) passed as parameter.
|
|
||||||
|
|
||||||
If you want to be notified for events on a given set of pattern references, it
|
|
||||||
is still possible to perform as many per-patref subscriptions as needed.
|
|
||||||
|
|
||||||
Also, for PAT_REF events, no event data is provided (known as "event_data" in
|
|
||||||
callback function's prototype from :js:func:`core.event_sub()`)
|
|
||||||
|
|
||||||
The list of the available event types for the PAT_REF family are:
|
|
||||||
|
|
||||||
* **PAT_REF_ADD**: element was added to the current version of the pattern
|
|
||||||
reference
|
|
||||||
* **PAT_REF_DEL**: element was deleted from the current version of the
|
|
||||||
pattern reference
|
|
||||||
* **PAT_REF_SET**: element was modified in the current version of the
|
|
||||||
pattern reference
|
|
||||||
* **PAT_REF_CLEAR**: all elements were cleared from the current version of
|
|
||||||
the pattern reference
|
|
||||||
* **PAT_REF_COMMIT**: pending element(s) was/were committed in the current
|
|
||||||
version of the pattern reference
|
|
||||||
|
|
||||||
.. Note::
|
|
||||||
Use **PAT_REF** in **event_types** to subscribe to all pattern reference
|
|
||||||
events types at once.
|
|
||||||
|
|
||||||
Here is a working example showing how to trigger a callback function for the
|
|
||||||
pattern reference associated to file "test.map":
|
|
||||||
|
|
||||||
.. code-block:: lua
|
|
||||||
|
|
||||||
core.register_init(function()
|
|
||||||
-- We assume that "test.map" is a map file referenced in haproxy config
|
|
||||||
-- file, thus it is loaded during config parsing and is expected to be
|
|
||||||
-- available at init Lua stage. Indeed, the below code wouldn't work if
|
|
||||||
-- used directly within body context, as at that time the config is not
|
|
||||||
-- fully parsed.
|
|
||||||
local map_patref = core.get_patref("test.map")
|
|
||||||
map_patref:event_sub({"PAT_REF_ADD"}, function(event, data, sub)
|
|
||||||
-- in the patref event handler
|
|
||||||
print("entry added!")
|
|
||||||
end)
|
|
||||||
end)
|
|
||||||
|
|
||||||
..
|
|
||||||
|
|
||||||
.. _applethttp_class:
|
.. _applethttp_class:
|
||||||
|
|
||||||
AppletHTTP class
|
AppletHTTP class
|
||||||
@ -3911,31 +3680,16 @@ AppletTCP class
|
|||||||
:param class_AppletTCP applet: An :ref:`applettcp_class`
|
:param class_AppletTCP applet: An :ref:`applettcp_class`
|
||||||
:returns: a string. The string can be empty if we reach the end of the stream.
|
:returns: a string. The string can be empty if we reach the end of the stream.
|
||||||
|
|
||||||
.. js:function:: AppletTCP.receive(applet, [size, [timeout]])
|
.. js:function:: AppletTCP.receive(applet, [size])
|
||||||
|
|
||||||
Reads data from the TCP stream, according to the specified read *size*. If the
|
Reads data from the TCP stream, according to the specified read *size*. If the
|
||||||
*size* is missing, the function tries to read all the content of the stream
|
*size* is missing, the function tries to read all the content of the stream
|
||||||
until the end. An optional timeout may be specified in milliseconds. In this
|
until the end.
|
||||||
case the function will return no longer than this delay, with the amount of
|
|
||||||
available data, or nil if there is no data. An empty string is returned if the
|
|
||||||
connection is closed.
|
|
||||||
|
|
||||||
:param class_AppletTCP applet: An :ref:`applettcp_class`
|
:param class_AppletTCP applet: An :ref:`applettcp_class`
|
||||||
:param integer size: the required read size.
|
:param integer size: the required read size.
|
||||||
:returns: return nil if the timeout has expired and no data was available but
|
:returns: always return a string, the string can be empty if the connection is
|
||||||
can still be received. Otherwise, a string is returned, possibly an empty
|
closed.
|
||||||
string if the connection is closed.
|
|
||||||
|
|
||||||
.. js:function:: AppletTCP.try_receive(applet)
|
|
||||||
|
|
||||||
Reads available data from the TCP stream and returns immediately. Returns a
|
|
||||||
string containing read bytes or nil if no bytes are available at that time. An
|
|
||||||
empty string is returned if the connection is closed.
|
|
||||||
|
|
||||||
:param class_AppletTCP applet: An :ref:`applettcp_class`
|
|
||||||
:returns: return nil if no data was available but can still be
|
|
||||||
received. Otherwise, a string is returned, possibly an empty string if the
|
|
||||||
connection is closed.
|
|
||||||
|
|
||||||
.. js:function:: AppletTCP.send(appletmsg)
|
.. js:function:: AppletTCP.send(appletmsg)
|
||||||
|
|
||||||
@ -4612,27 +4366,6 @@ HTTPMessage class
|
|||||||
data by default.
|
data by default.
|
||||||
:returns: an integer containing the amount of bytes copied or -1.
|
:returns: an integer containing the amount of bytes copied or -1.
|
||||||
|
|
||||||
.. js:function:: HTTPMessage.set_body_len(http_msg, length)
|
|
||||||
|
|
||||||
This function changes the expected payload length of the HTTP message
|
|
||||||
**http_msg**. **length** can be an integer value. In that case, a
|
|
||||||
"Content-Length" header is added with the given value. It is also possible to
|
|
||||||
pass the **"chunked"** string instead of an integer value to force the HTTP
|
|
||||||
message to be chunk-encoded. In that case, a "Transfer-Encoding" header is
|
|
||||||
added with the "chunked" value. In both cases, all existing "Content-Length"
|
|
||||||
and "Transfer-Encoding" headers are removed.
|
|
||||||
|
|
||||||
This function should be used in the filter context to be able to alter the
|
|
||||||
payload of the HTTP message. The internal state of the HTTP message is updated
|
|
||||||
accordingly. :js:func:`HTTPMessage.add_header()` or
|
|
||||||
:js:func:`HTTPMessage.set_header()` functions must be used in that case.
|
|
||||||
|
|
||||||
:param class_httpmessage http_msg: The manipulated HTTP message.
|
|
||||||
:param type length: The new payload length to set. It can be an integer or
|
|
||||||
the string "chunked".
|
|
||||||
:returns: true if the payload length was successfully updated, false
|
|
||||||
otherwise.
|
|
||||||
|
|
||||||
.. js:function:: HTTPMessage.set_eom(http_msg)
|
.. js:function:: HTTPMessage.set_eom(http_msg)
|
||||||
|
|
||||||
This function set the end of message for the HTTP message **http_msg**.
|
This function set the end of message for the HTTP message **http_msg**.
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -28,9 +28,7 @@ Revision history
|
|||||||
string encoding. With contributions from Andriy Palamarchuk
|
string encoding. With contributions from Andriy Palamarchuk
|
||||||
(Amazon.com).
|
(Amazon.com).
|
||||||
2020/03/05 - added the unique ID TLV type (Tim Düsterhus)
|
2020/03/05 - added the unique ID TLV type (Tim Düsterhus)
|
||||||
2025/09/09 - added SSL-related TLVs for key exchange group and signature
|
|
||||||
scheme (Steven Collison)
|
|
||||||
2026/01/15 - added SSL client certificate TLV (Simon Ser)
|
|
||||||
|
|
||||||
1. Background
|
1. Background
|
||||||
|
|
||||||
@ -548,9 +546,6 @@ The following types have already been registered for the <type> field :
|
|||||||
#define PP2_SUBTYPE_SSL_CIPHER 0x23
|
#define PP2_SUBTYPE_SSL_CIPHER 0x23
|
||||||
#define PP2_SUBTYPE_SSL_SIG_ALG 0x24
|
#define PP2_SUBTYPE_SSL_SIG_ALG 0x24
|
||||||
#define PP2_SUBTYPE_SSL_KEY_ALG 0x25
|
#define PP2_SUBTYPE_SSL_KEY_ALG 0x25
|
||||||
#define PP2_SUBTYPE_SSL_GROUP 0x26
|
|
||||||
#define PP2_SUBTYPE_SSL_SIG_SCHEME 0x27
|
|
||||||
#define PP2_SUBTYPE_SSL_CLIENT_CERT 0x28
|
|
||||||
#define PP2_TYPE_NETNS 0x30
|
#define PP2_TYPE_NETNS 0x30
|
||||||
|
|
||||||
|
|
||||||
@ -627,10 +622,7 @@ For the type PP2_TYPE_SSL, the value is itself a defined like this :
|
|||||||
uint8_t client;
|
uint8_t client;
|
||||||
uint32_t verify;
|
uint32_t verify;
|
||||||
struct pp2_tlv sub_tlv[0];
|
struct pp2_tlv sub_tlv[0];
|
||||||
} __attribute__((packed));
|
};
|
||||||
|
|
||||||
Note the "packed" attribute which indicates that each field starts immediately
|
|
||||||
after the previous one (i.e. without type-specific alignment nor padding).
|
|
||||||
|
|
||||||
The <verify> field will be zero if the client presented a certificate
|
The <verify> field will be zero if the client presented a certificate
|
||||||
and it was successfully verified, and non-zero otherwise.
|
and it was successfully verified, and non-zero otherwise.
|
||||||
@ -662,25 +654,13 @@ of the used cipher, for example "ECDHE-RSA-AES128-GCM-SHA256".
|
|||||||
The second level TLV PP2_SUBTYPE_SSL_SIG_ALG provides the US-ASCII string name
|
The second level TLV PP2_SUBTYPE_SSL_SIG_ALG provides the US-ASCII string name
|
||||||
of the algorithm used to sign the certificate presented by the frontend when
|
of the algorithm used to sign the certificate presented by the frontend when
|
||||||
the incoming connection was made over an SSL/TLS transport layer, for example
|
the incoming connection was made over an SSL/TLS transport layer, for example
|
||||||
"RSA-SHA256".
|
"SHA256".
|
||||||
|
|
||||||
The second level TLV PP2_SUBTYPE_SSL_KEY_ALG provides the US-ASCII string name
|
The second level TLV PP2_SUBTYPE_SSL_KEY_ALG provides the US-ASCII string name
|
||||||
of the algorithm used to generate the key of the certificate presented by the
|
of the algorithm used to generate the key of the certificate presented by the
|
||||||
frontend when the incoming connection was made over an SSL/TLS transport layer,
|
frontend when the incoming connection was made over an SSL/TLS transport layer,
|
||||||
for example "RSA2048".
|
for example "RSA2048".
|
||||||
|
|
||||||
The second level TLV PP2_SUBTYPE_SSL_GROUP provides the US-ASCII string name of
|
|
||||||
the key exchange algorithm used for the frontend TLS connection, for example
|
|
||||||
"secp256r1".
|
|
||||||
|
|
||||||
The second level TLV PP2_SUBTYPE_SSL_SIG_SCHEME provides the US-ASCII string
|
|
||||||
name of the algorithm the frontend used to sign the ServerKeyExchange or
|
|
||||||
CertificateVerify message, for example "rsa_pss_rsae_sha256".
|
|
||||||
|
|
||||||
The optional second level TLV PP2_SUBTYPE_SSL_CLIENT_CERT provides the raw
|
|
||||||
X.509 client certificate encoded in ASN.1 DER. The frontend may choose to omit
|
|
||||||
this TLV depending on configuration.
|
|
||||||
|
|
||||||
In all cases, the string representation (in UTF8) of the Common Name field
|
In all cases, the string representation (in UTF8) of the Common Name field
|
||||||
(OID: 2.5.4.3) of the client certificate's Distinguished Name, is appended
|
(OID: 2.5.4.3) of the client certificate's Distinguished Name, is appended
|
||||||
using the TLV format and the type PP2_SUBTYPE_SSL_CN. E.g. "example.com".
|
using the TLV format and the type PP2_SUBTYPE_SSL_CN. E.g. "example.com".
|
||||||
|
|||||||
@ -24,7 +24,7 @@ vtest installation
|
|||||||
------------------------
|
------------------------
|
||||||
|
|
||||||
To use vtest you will have to download and compile the recent vtest
|
To use vtest you will have to download and compile the recent vtest
|
||||||
sources found at https://github.com/vtest/VTest2.
|
sources found at https://github.com/vtest/VTest.
|
||||||
|
|
||||||
To compile vtest:
|
To compile vtest:
|
||||||
|
|
||||||
|
|||||||
@ -1,14 +0,0 @@
|
|||||||
global
|
|
||||||
default-path config
|
|
||||||
tune.lua.bool-sample-conversion normal
|
|
||||||
# load all games here
|
|
||||||
lua-load lua/trisdemo.lua
|
|
||||||
|
|
||||||
defaults
|
|
||||||
timeout client 1h
|
|
||||||
|
|
||||||
# map one TCP port to each game
|
|
||||||
.notice 'use "socat TCP-CONNECT:0:7001 STDIO,raw,echo=0" to start playing'
|
|
||||||
frontend trisdemo
|
|
||||||
bind :7001
|
|
||||||
tcp-request content use-service lua.trisdemo
|
|
||||||
@ -1,69 +0,0 @@
|
|||||||
# Example: log HTTP traffic and TLS session keys to separate destinations
|
|
||||||
#
|
|
||||||
# "option httpslog" sends HTTP access logs to the /dev/log syslog server.
|
|
||||||
# TLS session keys are written to 2 ring buffers.
|
|
||||||
#
|
|
||||||
# Requirements:
|
|
||||||
# - HAProxy built with OpenSSL support
|
|
||||||
# - "tune.ssl.keylog on" in the global section
|
|
||||||
#
|
|
||||||
# Retrieve TLS session keys from the ring buffer via the CLI:
|
|
||||||
# For frontend connections:
|
|
||||||
#
|
|
||||||
# (echo "show events keylog-fc -w"; read) | socat /tmp/worker.socket -
|
|
||||||
#
|
|
||||||
# For backend connections:
|
|
||||||
#
|
|
||||||
# (echo "show events keylog-bc -w"; read) | socat /tmp/worker.socket -
|
|
||||||
#
|
|
||||||
# The result is in SSLKEYLOGFILE format and can be saved to a file and loaded
|
|
||||||
# into Wireshark to decrypt captured TLS traffic.
|
|
||||||
|
|
||||||
global
|
|
||||||
stats socket /tmp/worker.socket mode 0660
|
|
||||||
tune.ssl.keylog on
|
|
||||||
|
|
||||||
# Ring buffer for TLS session keys.
|
|
||||||
# "format raw" stores only the log message text, without any syslog envelope,
|
|
||||||
# producing output in the SSLKEYLOGFILE format directly.
|
|
||||||
ring keylog-fc
|
|
||||||
description "TLS session key frontend log"
|
|
||||||
format raw
|
|
||||||
maxlen 2000
|
|
||||||
size 1M
|
|
||||||
|
|
||||||
ring keylog-bc
|
|
||||||
description "TLS session key backend log"
|
|
||||||
format raw
|
|
||||||
maxlen 2000
|
|
||||||
size 1M
|
|
||||||
|
|
||||||
|
|
||||||
defaults
|
|
||||||
mode http
|
|
||||||
timeout client 30s
|
|
||||||
timeout server 30s
|
|
||||||
timeout connect 5s
|
|
||||||
|
|
||||||
log-profile keylog-fc
|
|
||||||
on any format "${HAPROXY_KEYLOG_FC_LOG_FMT}"
|
|
||||||
|
|
||||||
log-profile keylog-bc
|
|
||||||
on any format "${HAPROXY_KEYLOG_BC_LOG_FMT}"
|
|
||||||
|
|
||||||
frontend https-in
|
|
||||||
bind :443 ssl crt "common.pem"
|
|
||||||
|
|
||||||
option httpslog
|
|
||||||
|
|
||||||
# HTTPs access logs sent to the syslog server
|
|
||||||
log /dev/log format raw local0
|
|
||||||
|
|
||||||
# TLS session keys written to the ring buffer
|
|
||||||
log ring@keylog-fc profile keylog-fc local1
|
|
||||||
log ring@keylog-bc profile keylog-bc local1
|
|
||||||
|
|
||||||
default_backend be1
|
|
||||||
|
|
||||||
backend be1
|
|
||||||
server s1 10.0.0.123:443 ssl verify none
|
|
||||||
@ -3,7 +3,7 @@
|
|||||||
-- Provides a pure lua alternative to tcpcheck mailers.
|
-- Provides a pure lua alternative to tcpcheck mailers.
|
||||||
--
|
--
|
||||||
-- To be loaded using "lua-load" from haproxy configuration to handle
|
-- To be loaded using "lua-load" from haproxy configuration to handle
|
||||||
-- email-alerts directly from lua
|
-- email-alerts directly from lua and disable legacy tcpcheck implementation.
|
||||||
|
|
||||||
local SYSLOG_LEVEL = {
|
local SYSLOG_LEVEL = {
|
||||||
["EMERG"] = 0,
|
["EMERG"] = 0,
|
||||||
@ -364,9 +364,9 @@ local function srv_event_add(event, data)
|
|||||||
mailers_track_server_events(data.reference)
|
mailers_track_server_events(data.reference)
|
||||||
end
|
end
|
||||||
|
|
||||||
-- tell haproxy that we do use the legacy native "mailers" config section
|
|
||||||
-- which allows us to retrieve mailers configuration using Proxy:get_mailers()
|
-- disable legacy email-alerts since email-alerts will be sent from lua directly
|
||||||
core.use_native_mailers_config()
|
core.disable_legacy_mailers()
|
||||||
|
|
||||||
-- event subscriptions are purposely performed in an init function to prevent
|
-- event subscriptions are purposely performed in an init function to prevent
|
||||||
-- email alerts from being generated too early (when process is starting up)
|
-- email alerts from being generated too early (when process is starting up)
|
||||||
|
|||||||
@ -1,251 +0,0 @@
|
|||||||
-- Example game of falling pieces for HAProxy CLI/Applet
|
|
||||||
local board_width = 10
|
|
||||||
local board_height = 20
|
|
||||||
local game_name = "Lua Tris Demo"
|
|
||||||
|
|
||||||
-- Shapes with IDs for color mapping
|
|
||||||
local pieces = {
|
|
||||||
{id = 1, shape = {{1,1,1,1}}}, -- I (Cyan)
|
|
||||||
{id = 2, shape = {{1,1},{1,1}}}, -- O (Yellow)
|
|
||||||
{id = 3, shape = {{0,1,0},{1,1,1}}}, -- T (Purple)
|
|
||||||
{id = 4, shape = {{0,1,1},{1,1,0}}}, -- S (Green)
|
|
||||||
{id = 5, shape = {{1,1,0},{0,1,1}}}, -- Z (Red)
|
|
||||||
{id = 6, shape = {{1,0,0},{1,1,1}}}, -- J (Blue)
|
|
||||||
{id = 7, shape = {{0,0,1},{1,1,1}}} -- L (Orange)
|
|
||||||
}
|
|
||||||
|
|
||||||
-- ANSI escape codes
|
|
||||||
local clear_screen = "\27[2J"
|
|
||||||
local cursor_home = "\27[H"
|
|
||||||
local cursor_hide = "\27[?25l"
|
|
||||||
local cursor_show = "\27[?25h"
|
|
||||||
local reset_color = "\27[0m"
|
|
||||||
|
|
||||||
local color_codes = {
|
|
||||||
[1] = "\27[1;36m", -- I: Cyan
|
|
||||||
[2] = "\27[1;37m", -- O: White
|
|
||||||
[3] = "\27[1;35m", -- T: Purple
|
|
||||||
[4] = "\27[1;32m", -- S: Green
|
|
||||||
[5] = "\27[1;31m", -- Z: Red
|
|
||||||
[6] = "\27[1;34m", -- J: Blue
|
|
||||||
[7] = "\27[1;33m" -- L: Yellow
|
|
||||||
}
|
|
||||||
|
|
||||||
local function init_board()
|
|
||||||
local board = {}
|
|
||||||
for y = 1, board_height do
|
|
||||||
board[y] = {}
|
|
||||||
for x = 1, board_width do
|
|
||||||
board[y][x] = 0 -- 0 for empty, piece ID for placed blocks
|
|
||||||
end
|
|
||||||
end
|
|
||||||
return board
|
|
||||||
end
|
|
||||||
|
|
||||||
local function can_place_piece(board, piece, px, py)
|
|
||||||
for y = 1, #piece do
|
|
||||||
for x = 1, #piece[1] do
|
|
||||||
if piece[y][x] == 1 then
|
|
||||||
local board_x = px + x - 1
|
|
||||||
local board_y = py + y - 1
|
|
||||||
if board_x < 1 or board_x > board_width or board_y > board_height or
|
|
||||||
(board_y >= 1 and board[board_y][board_x] ~= 0) then
|
|
||||||
return false
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
return true
|
|
||||||
end
|
|
||||||
|
|
||||||
local function place_piece(board, piece, piece_id, px, py)
|
|
||||||
for y = 1, #piece do
|
|
||||||
for x = 1, #piece[1] do
|
|
||||||
if piece[y][x] == 1 then
|
|
||||||
local board_x = px + x - 1
|
|
||||||
local board_y = py + y - 1
|
|
||||||
if board_y >= 1 and board_y <= board_height then
|
|
||||||
board[board_y][board_x] = piece_id -- Store piece ID for color
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
local function clear_lines(board)
|
|
||||||
local lines_cleared = 0
|
|
||||||
local y = board_height
|
|
||||||
while y >= 1 do
|
|
||||||
local full = true
|
|
||||||
for x = 1, board_width do
|
|
||||||
if board[y][x] == 0 then
|
|
||||||
full = false
|
|
||||||
break
|
|
||||||
end
|
|
||||||
end
|
|
||||||
if full then
|
|
||||||
table.remove(board, y)
|
|
||||||
table.insert(board, 1, {})
|
|
||||||
for x = 1, board_width do
|
|
||||||
board[1][x] = 0
|
|
||||||
end
|
|
||||||
lines_cleared = lines_cleared + 1
|
|
||||||
else
|
|
||||||
y = y - 1
|
|
||||||
end
|
|
||||||
end
|
|
||||||
return lines_cleared
|
|
||||||
end
|
|
||||||
|
|
||||||
local function rotate_piece(piece, piece_id, px, py, board)
|
|
||||||
local new_piece = {}
|
|
||||||
for x = 1, #piece[1] do
|
|
||||||
new_piece[x] = {}
|
|
||||||
for y = 1, #piece do
|
|
||||||
new_piece[x][#piece + 1 - y] = piece[y][x]
|
|
||||||
end
|
|
||||||
end
|
|
||||||
if can_place_piece(board, new_piece, px, py) then
|
|
||||||
return new_piece
|
|
||||||
end
|
|
||||||
return piece
|
|
||||||
end
|
|
||||||
|
|
||||||
function render(applet, board, piece, piece_id, px, py, score)
|
|
||||||
local output = cursor_home
|
|
||||||
output = output .. game_name .. " - Lines: " .. score .. "\r\n"
|
|
||||||
output = output .. "+" .. string.rep("-", board_width * 2) .. "+\r\n"
|
|
||||||
for y = 1, board_height do
|
|
||||||
output = output .. "|"
|
|
||||||
for x = 1, board_width do
|
|
||||||
local char = " "
|
|
||||||
-- Current piece
|
|
||||||
for py_idx = 1, #piece do
|
|
||||||
for px_idx = 1, #piece[1] do
|
|
||||||
if piece[py_idx][px_idx] == 1 then
|
|
||||||
local board_x = px + px_idx - 1
|
|
||||||
local board_y = py + py_idx - 1
|
|
||||||
if board_x == x and board_y == y then
|
|
||||||
char = color_codes[piece_id] .. "[]" .. reset_color
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
-- Placed blocks
|
|
||||||
if board[y][x] ~= 0 then
|
|
||||||
char = color_codes[board[y][x]] .. "[]" .. reset_color
|
|
||||||
end
|
|
||||||
output = output .. char
|
|
||||||
end
|
|
||||||
output = output .. "|\r\n"
|
|
||||||
end
|
|
||||||
output = output .. "+" .. string.rep("-", board_width * 2) .. "+\r\n"
|
|
||||||
output = output .. "Use arrow keys to move, Up to rotate, q to quit"
|
|
||||||
applet:send(output)
|
|
||||||
end
|
|
||||||
|
|
||||||
function handler(applet)
|
|
||||||
local board = init_board()
|
|
||||||
local piece_idx = math.random(#pieces)
|
|
||||||
local current_piece = pieces[piece_idx].shape
|
|
||||||
local piece_id = pieces[piece_idx].id
|
|
||||||
local piece_x = math.floor(board_width / 2) - math.floor(#current_piece[1] / 2)
|
|
||||||
local piece_y = 1
|
|
||||||
local score = 0
|
|
||||||
local game_over = false
|
|
||||||
local delay = 500
|
|
||||||
|
|
||||||
if not can_place_piece(board, current_piece, piece_x, piece_y) then
|
|
||||||
game_over = true
|
|
||||||
end
|
|
||||||
|
|
||||||
applet:send(cursor_hide)
|
|
||||||
applet:send(clear_screen)
|
|
||||||
|
|
||||||
-- fall the piece by one line every delay
|
|
||||||
local function fall_piece()
|
|
||||||
while not game_over do
|
|
||||||
piece_y = piece_y + 1
|
|
||||||
if not can_place_piece(board, current_piece, piece_x, piece_y) then
|
|
||||||
piece_y = piece_y - 1
|
|
||||||
place_piece(board, current_piece, piece_id, piece_x, piece_y)
|
|
||||||
score = score + clear_lines(board)
|
|
||||||
piece_idx = math.random(#pieces)
|
|
||||||
current_piece = pieces[piece_idx].shape
|
|
||||||
piece_id = pieces[piece_idx].id
|
|
||||||
piece_x = math.floor(board_width / 2) - math.floor(#current_piece[1] / 2)
|
|
||||||
piece_y = 1
|
|
||||||
if not can_place_piece(board, current_piece, piece_x, piece_y) then
|
|
||||||
game_over = true
|
|
||||||
end
|
|
||||||
end
|
|
||||||
core.msleep(delay)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
core.register_task(fall_piece)
|
|
||||||
|
|
||||||
local function drop_piece()
|
|
||||||
while can_place_piece(board, current_piece, piece_x, piece_y) do
|
|
||||||
piece_y = piece_y + 1
|
|
||||||
end
|
|
||||||
piece_y = piece_y - 1
|
|
||||||
place_piece(board, current_piece, piece_id, piece_x, piece_y)
|
|
||||||
score = score + clear_lines(board)
|
|
||||||
piece_idx = math.random(#pieces)
|
|
||||||
current_piece = pieces[piece_idx].shape
|
|
||||||
piece_id = pieces[piece_idx].id
|
|
||||||
piece_x = math.floor(board_width / 2) - math.floor(#current_piece[1] / 2)
|
|
||||||
piece_y = 1
|
|
||||||
if not can_place_piece(board, current_piece, piece_x, piece_y) then
|
|
||||||
game_over = true
|
|
||||||
end
|
|
||||||
render(applet, board, current_piece, piece_id, piece_x, piece_y, score)
|
|
||||||
end
|
|
||||||
|
|
||||||
while not game_over do
|
|
||||||
render(applet, board, current_piece, piece_id, piece_x, piece_y, score)
|
|
||||||
|
|
||||||
-- update the delay based on the score: 500 for 0 lines to 100ms for 100 lines.
|
|
||||||
if score >= 100 then
|
|
||||||
delay = 100
|
|
||||||
else
|
|
||||||
delay = 500 - 4*score
|
|
||||||
end
|
|
||||||
|
|
||||||
local input = applet:receive(1, delay)
|
|
||||||
if input then
|
|
||||||
if input == "" or input == "q" then
|
|
||||||
game_over = true
|
|
||||||
elseif input == "\27" then
|
|
||||||
local a = applet:receive(1, delay)
|
|
||||||
if a == "[" then
|
|
||||||
local b = applet:receive(1, delay)
|
|
||||||
if b == "A" then -- Up arrow (rotate clockwise)
|
|
||||||
current_piece = rotate_piece(current_piece, piece_id, piece_x, piece_y, board)
|
|
||||||
elseif b == "B" then -- Down arrow (full drop)
|
|
||||||
drop_piece()
|
|
||||||
elseif b == "C" then -- Right arrow
|
|
||||||
piece_x = piece_x + 1
|
|
||||||
if not can_place_piece(board, current_piece, piece_x, piece_y) then
|
|
||||||
piece_x = piece_x - 1
|
|
||||||
end
|
|
||||||
elseif b == "D" then -- Left arrow
|
|
||||||
piece_x = piece_x - 1
|
|
||||||
if not can_place_piece(board, current_piece, piece_x, piece_y) then
|
|
||||||
piece_x = piece_x + 1
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
applet:send(clear_screen .. cursor_home .. "Game Over! Lines: " .. score .. "\r\n" .. cursor_show)
|
|
||||||
end
|
|
||||||
|
|
||||||
-- works as a TCP applet
|
|
||||||
core.register_service("trisdemo", "tcp", handler)
|
|
||||||
|
|
||||||
-- may also work on the CLI but requires an unbuffered handler
|
|
||||||
core.register_cli({"trisdemo"}, "Play a simple falling pieces game", handler)
|
|
||||||
@ -50,9 +50,6 @@ static inline int acl_pass(enum acl_test_res res)
|
|||||||
* NULL if not found.
|
* NULL if not found.
|
||||||
*/
|
*/
|
||||||
struct acl *find_acl_by_name(const char *name, struct list *head);
|
struct acl *find_acl_by_name(const char *name, struct list *head);
|
||||||
struct acl *find_acl_default(const char *acl_name, struct list *known_acl,
|
|
||||||
char **err, struct arg_list *al,
|
|
||||||
const char *file, int line);
|
|
||||||
|
|
||||||
/* Return a pointer to the ACL keyword <kw> within the list starting at <head>,
|
/* Return a pointer to the ACL keyword <kw> within the list starting at <head>,
|
||||||
* or NULL if not found. Note that if <kw> contains an opening parenthesis,
|
* or NULL if not found. Note that if <kw> contains an opening parenthesis,
|
||||||
@ -104,26 +101,6 @@ struct acl_cond *build_acl_cond(const char *file, int line, struct list *known_a
|
|||||||
*/
|
*/
|
||||||
enum acl_test_res acl_exec_cond(struct acl_cond *cond, struct proxy *px, struct session *sess, struct stream *strm, unsigned int opt);
|
enum acl_test_res acl_exec_cond(struct acl_cond *cond, struct proxy *px, struct session *sess, struct stream *strm, unsigned int opt);
|
||||||
|
|
||||||
|
|
||||||
/* helper that combines acl_exec_cond() and acl_pass(), and also takes into
|
|
||||||
* account cond->pol in order to return either 1 if the cond should pass and
|
|
||||||
* 0 otherwise
|
|
||||||
* <cond> may be NULL, in which case 1 is returned as the cond cannot fail
|
|
||||||
*/
|
|
||||||
static inline int acl_match_cond(struct acl_cond *cond, struct proxy *px, struct session *sess, struct stream *strm, unsigned int opt)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!cond)
|
|
||||||
return 1;
|
|
||||||
|
|
||||||
ret = acl_pass(acl_exec_cond(cond, px, sess, strm, opt));
|
|
||||||
if (cond->pol == ACL_COND_UNLESS)
|
|
||||||
ret = !ret;
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Returns a pointer to the first ACL conflicting with usage at place <where>
|
/* Returns a pointer to the first ACL conflicting with usage at place <where>
|
||||||
* which is one of the SMP_VAL_* bits indicating a check place, or NULL if
|
* which is one of the SMP_VAL_* bits indicating a check place, or NULL if
|
||||||
* no conflict is found. Only full conflicts are detected (ACL is not usable).
|
* no conflict is found. Only full conflicts are detected (ACL is not usable).
|
||||||
|
|||||||
@ -1,124 +0,0 @@
|
|||||||
/* SPDX-License-Identifier: LGPL-2.1-or-later */
|
|
||||||
#ifndef _ACME_T_H_
|
|
||||||
#define _ACME_T_H_
|
|
||||||
|
|
||||||
#include <haproxy/acme_resolvers-t.h>
|
|
||||||
#include <haproxy/istbuf.h>
|
|
||||||
#include <haproxy/openssl-compat.h>
|
|
||||||
|
|
||||||
#if defined(HAVE_ACME)
|
|
||||||
|
|
||||||
#define ACME_RETRY 5
|
|
||||||
|
|
||||||
/* Readiness requirements for challenge */
|
|
||||||
#define ACME_RDY_NONE 0x00
|
|
||||||
#define ACME_RDY_CLI 0x01
|
|
||||||
#define ACME_RDY_DNS 0x02
|
|
||||||
|
|
||||||
/* acme section configuration */
|
|
||||||
struct acme_cfg {
|
|
||||||
char *filename; /* config filename */
|
|
||||||
int linenum; /* config linenum */
|
|
||||||
char *name; /* section name */
|
|
||||||
int reuse_key; /* do we need to renew the private key */
|
|
||||||
int cond_ready; /* ready condition */
|
|
||||||
unsigned int dns_delay; /* delay in seconds before re-triggering DNS resolution (default: 300) */
|
|
||||||
unsigned int dns_timeout; /* time after which the DNS check shouldn't be retried (default: 600) */
|
|
||||||
char *directory; /* directory URL */
|
|
||||||
char *map; /* storage for tokens + thumbprint */
|
|
||||||
struct {
|
|
||||||
char *contact; /* email associated to account */
|
|
||||||
char *file; /* account key filename */
|
|
||||||
EVP_PKEY *pkey; /* account PKEY */
|
|
||||||
char *thumbprint; /* account PKEY JWS thumbprint */
|
|
||||||
} account;
|
|
||||||
|
|
||||||
struct {
|
|
||||||
int type; /* EVP_PKEY_EC or EVP_PKEY_RSA */
|
|
||||||
int bits; /* bits for RSA */
|
|
||||||
int curves; /* NID of curves */
|
|
||||||
} key;
|
|
||||||
char *challenge; /* HTTP-01, DNS-01, etc */
|
|
||||||
char *vars; /* variables put in the dpapi sink */
|
|
||||||
char *provider; /* DNS provider put in the dpapi sink */
|
|
||||||
struct acme_cfg *next;
|
|
||||||
};
|
|
||||||
|
|
||||||
enum acme_st {
|
|
||||||
ACME_RESOURCES = 0,
|
|
||||||
ACME_NEWNONCE,
|
|
||||||
ACME_CHKACCOUNT,
|
|
||||||
ACME_NEWACCOUNT,
|
|
||||||
ACME_NEWORDER,
|
|
||||||
ACME_AUTH,
|
|
||||||
ACME_RSLV_WAIT,
|
|
||||||
ACME_RSLV_TRIGGER,
|
|
||||||
ACME_RSLV_READY,
|
|
||||||
ACME_CHALLENGE,
|
|
||||||
ACME_CHKCHALLENGE,
|
|
||||||
ACME_FINALIZE,
|
|
||||||
ACME_CHKORDER,
|
|
||||||
ACME_CERTIFICATE,
|
|
||||||
ACME_END
|
|
||||||
};
|
|
||||||
|
|
||||||
enum http_st {
|
|
||||||
ACME_HTTP_REQ,
|
|
||||||
ACME_HTTP_RES,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct acme_auth {
|
|
||||||
struct ist dns; /* dns entry */
|
|
||||||
struct ist auth; /* auth URI */
|
|
||||||
struct ist chall; /* challenge URI */
|
|
||||||
struct ist token; /* token */
|
|
||||||
int validated; /* already validated */
|
|
||||||
struct acme_rslv *rslv; /* acme dns-01 resolver */
|
|
||||||
int ready; /* is the challenge ready ? */
|
|
||||||
void *next;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* acme task context */
|
|
||||||
struct acme_ctx {
|
|
||||||
enum acme_st state;
|
|
||||||
enum http_st http_state;
|
|
||||||
int retries;
|
|
||||||
int retryafter;
|
|
||||||
struct httpclient *hc;
|
|
||||||
struct acme_cfg *cfg;
|
|
||||||
struct ckch_store *store;
|
|
||||||
struct {
|
|
||||||
struct ist newNonce;
|
|
||||||
struct ist newAccount;
|
|
||||||
struct ist newOrder;
|
|
||||||
} resources;
|
|
||||||
struct ist nonce;
|
|
||||||
struct ist kid;
|
|
||||||
struct ist order;
|
|
||||||
struct acme_auth *auths;
|
|
||||||
struct acme_auth *next_auth;
|
|
||||||
X509_REQ *req;
|
|
||||||
struct ist finalize;
|
|
||||||
struct ist certificate;
|
|
||||||
unsigned int dnstasks; /* number of DNS tasks running for this ctx */
|
|
||||||
unsigned int dnsstarttime; /* time at which we started the DNS checks */
|
|
||||||
struct task *task;
|
|
||||||
struct ebmb_node node;
|
|
||||||
char name[VAR_ARRAY];
|
|
||||||
};
|
|
||||||
|
|
||||||
#define ACME_EV_SCHED (1ULL << 0) /* scheduling wakeup */
|
|
||||||
#define ACME_EV_NEW (1ULL << 1) /* new task */
|
|
||||||
#define ACME_EV_TASK (1ULL << 2) /* Task handler */
|
|
||||||
#define ACME_EV_REQ (1ULL << 3) /* HTTP Request */
|
|
||||||
#define ACME_EV_RES (1ULL << 4) /* HTTP Response */
|
|
||||||
|
|
||||||
#define ACME_VERB_CLEAN 1
|
|
||||||
#define ACME_VERB_MINIMAL 2
|
|
||||||
#define ACME_VERB_SIMPLE 3
|
|
||||||
#define ACME_VERB_ADVANCED 4
|
|
||||||
#define ACME_VERB_COMPLETE 5
|
|
||||||
|
|
||||||
#endif /* ! HAVE_ACME */
|
|
||||||
|
|
||||||
#endif
|
|
||||||
@ -1,12 +0,0 @@
|
|||||||
/* SPDX-License-Identifier: LGPL-2.1-or-later */
|
|
||||||
#ifndef _ACME_H_
|
|
||||||
#define _ACME_H_
|
|
||||||
|
|
||||||
#include <haproxy/ssl_ckch-t.h>
|
|
||||||
|
|
||||||
int ckch_conf_acme_init(void *value, char *buf, struct ckch_store *s, int cli, const char *filename, int linenum, char **err);
|
|
||||||
EVP_PKEY *acme_gen_tmp_pkey();
|
|
||||||
X509 *acme_gen_tmp_x509();
|
|
||||||
|
|
||||||
|
|
||||||
#endif
|
|
||||||
@ -1,27 +0,0 @@
|
|||||||
/* SPDX-License-Identifier: LGPL-2.1-or-later */
|
|
||||||
#ifndef _HAPROXY_ACME_RESOLVERS_T_H
|
|
||||||
#define _HAPROXY_ACME_RESOLVERS_T_H
|
|
||||||
|
|
||||||
#include <haproxy/obj_type-t.h>
|
|
||||||
#include <haproxy/resolvers-t.h>
|
|
||||||
|
|
||||||
struct dns_counters;
|
|
||||||
|
|
||||||
/* TXT records for dns-01 */
|
|
||||||
|
|
||||||
struct acme_rslv {
|
|
||||||
enum obj_type obj_type; /* OBJ_TYPE_ACME_RSLV */
|
|
||||||
unsigned int *dnstasks; /* number of running DNS resolution for the same acme_task */
|
|
||||||
char *hostname_dn;
|
|
||||||
int hostname_dn_len;
|
|
||||||
struct resolvers *resolvers;
|
|
||||||
struct resolv_requester *requester;
|
|
||||||
int result; /* RSLV_STATUS_* — NONE until done */
|
|
||||||
int error_code; /* RSLV_RESP_* from the error callback */
|
|
||||||
struct task *acme_task; /* ACME task to wake on completion, or NULL */
|
|
||||||
struct ist txt; /* first TXT record found */
|
|
||||||
int (*success_cb)(struct resolv_requester *, struct dns_counters *);
|
|
||||||
int (*error_cb)(struct resolv_requester *, int);
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif /* _HAPROXY_ACME_RESOLVERS_T_H */
|
|
||||||
@ -1,18 +0,0 @@
|
|||||||
/* SPDX-License-Identifier: LGPL-2.1-or-later */
|
|
||||||
#ifndef _HAPROXY_ACME_RESOLVERS_H
|
|
||||||
#define _HAPROXY_ACME_RESOLVERS_H
|
|
||||||
|
|
||||||
#include <haproxy/openssl-compat.h>
|
|
||||||
|
|
||||||
#if defined(HAVE_ACME)
|
|
||||||
|
|
||||||
#include <haproxy/acme_resolvers-t.h>
|
|
||||||
#include <haproxy/acme-t.h>
|
|
||||||
#include <haproxy/resolvers-t.h>
|
|
||||||
|
|
||||||
struct acme_rslv *acme_rslv_start(struct acme_auth *auth, unsigned int *dnstasks, char **errmsg);
|
|
||||||
void acme_rslv_free(struct acme_rslv *rslv);
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* _HAPROXY_ACME_RESOLVERS_H */
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user