mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2026-01-16 14:21:03 +01:00
Compare commits
No commits in common. "master" and "v2.9.0" have entirely different histories.
@ -1,15 +1,15 @@
|
|||||||
FreeBSD_task:
|
FreeBSD_task:
|
||||||
freebsd_instance:
|
freebsd_instance:
|
||||||
matrix:
|
matrix:
|
||||||
image_family: freebsd-14-3
|
image_family: freebsd-13-2
|
||||||
only_if: $CIRRUS_BRANCH =~ 'master|next'
|
only_if: $CIRRUS_BRANCH =~ 'master|next'
|
||||||
install_script:
|
install_script:
|
||||||
- pkg update -f && pkg upgrade -y && pkg install -y openssl git gmake lua54 socat pcre2
|
- pkg update -f && pkg upgrade -y && pkg install -y openssl git gmake lua53 socat pcre
|
||||||
script:
|
script:
|
||||||
- sudo sysctl kern.corefile=/tmp/%N.%P.core
|
- sudo sysctl kern.corefile=/tmp/%N.%P.core
|
||||||
- sudo sysctl kern.sugid_coredump=1
|
- sudo sysctl kern.sugid_coredump=1
|
||||||
- scripts/build-vtest.sh
|
- scripts/build-vtest.sh
|
||||||
- gmake CC=clang V=1 ERR=1 TARGET=freebsd USE_ZLIB=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_OPENSSL=1 USE_LUA=1 LUA_INC=/usr/local/include/lua54 LUA_LIB=/usr/local/lib LUA_LIB_NAME=lua-5.4
|
- gmake CC=clang V=1 ERR=1 TARGET=freebsd USE_ZLIB=1 USE_PCRE=1 USE_OPENSSL=1 USE_LUA=1 LUA_INC=/usr/local/include/lua53 LUA_LIB=/usr/local/lib LUA_LIB_NAME=lua-5.3
|
||||||
- ./haproxy -vv
|
- ./haproxy -vv
|
||||||
- ldd haproxy
|
- ldd haproxy
|
||||||
test_script:
|
test_script:
|
||||||
|
|||||||
34
.github/actions/setup-vtest/action.yml
vendored
34
.github/actions/setup-vtest/action.yml
vendored
@ -1,34 +0,0 @@
|
|||||||
name: 'setup VTest'
|
|
||||||
description: 'ssss'
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: "composite"
|
|
||||||
steps:
|
|
||||||
|
|
||||||
- name: Setup coredumps
|
|
||||||
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
sudo sysctl -w fs.suid_dumpable=1
|
|
||||||
sudo sysctl kernel.core_pattern=/tmp/core.%h.%e.%t
|
|
||||||
|
|
||||||
- name: Setup ulimit for core dumps
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
# This is required for macOS which does not actually allow to increase
|
|
||||||
# the '-n' soft limit to the hard limit, thus failing to run.
|
|
||||||
ulimit -n 65536
|
|
||||||
ulimit -c unlimited
|
|
||||||
|
|
||||||
- name: Install VTest
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
scripts/build-vtest.sh
|
|
||||||
|
|
||||||
- name: Install problem matcher for VTest
|
|
||||||
shell: bash
|
|
||||||
# This allows one to more easily see which tests fail.
|
|
||||||
run: echo "::add-matcher::.github/vtest.json"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
6
.github/h2spec.config
vendored
6
.github/h2spec.config
vendored
@ -19,9 +19,9 @@ defaults
|
|||||||
|
|
||||||
frontend h2
|
frontend h2
|
||||||
mode http
|
mode http
|
||||||
bind 127.0.0.1:8443 ssl crt reg-tests/ssl/certs/common.pem alpn h2,http/1.1
|
bind 127.0.0.1:8443 ssl crt reg-tests/ssl/common.pem alpn h2,http/1.1
|
||||||
default_backend h2b
|
default_backend h2
|
||||||
|
|
||||||
backend h2b
|
backend h2
|
||||||
errorfile 200 .github/errorfile
|
errorfile 200 .github/errorfile
|
||||||
http-request deny deny_status 200
|
http-request deny deny_status 200
|
||||||
|
|||||||
141
.github/matrix.py
vendored
141
.github/matrix.py
vendored
@ -14,7 +14,6 @@ import re
|
|||||||
import sys
|
import sys
|
||||||
import urllib.request
|
import urllib.request
|
||||||
from os import environ
|
from os import environ
|
||||||
from packaging import version
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# this CI is used for both development and stable branches of HAProxy
|
# this CI is used for both development and stable branches of HAProxy
|
||||||
@ -48,7 +47,7 @@ def determine_latest_openssl(ssl):
|
|||||||
latest_tag = ""
|
latest_tag = ""
|
||||||
for tag in tags:
|
for tag in tags:
|
||||||
if "openssl-" in tag:
|
if "openssl-" in tag:
|
||||||
if (not latest_tag) or (version.parse(tag[8:]) > version.parse(latest_tag[8:])):
|
if tag > latest_tag:
|
||||||
latest_tag = tag
|
latest_tag = tag
|
||||||
return "OPENSSL_VERSION={}".format(latest_tag[8:])
|
return "OPENSSL_VERSION={}".format(latest_tag[8:])
|
||||||
|
|
||||||
@ -67,37 +66,6 @@ def determine_latest_aws_lc(ssl):
|
|||||||
latest_tag = max(valid_tags, key=aws_lc_version_string_to_num)
|
latest_tag = max(valid_tags, key=aws_lc_version_string_to_num)
|
||||||
return "AWS_LC_VERSION={}".format(latest_tag[1:])
|
return "AWS_LC_VERSION={}".format(latest_tag[1:])
|
||||||
|
|
||||||
def aws_lc_fips_version_string_to_num(version_string):
|
|
||||||
return tuple(map(int, version_string[12:].split('.')))
|
|
||||||
|
|
||||||
def aws_lc_fips_version_valid(version_string):
|
|
||||||
return re.match('^AWS-LC-FIPS-[0-9]+(\.[0-9]+)*$', version_string)
|
|
||||||
|
|
||||||
@functools.lru_cache(5)
|
|
||||||
def determine_latest_aws_lc_fips(ssl):
|
|
||||||
# the AWS-LC-FIPS tags are at the end of the list, so let's get a lot
|
|
||||||
tags = get_all_github_tags("https://api.github.com/repos/aws/aws-lc/tags?per_page=200")
|
|
||||||
if not tags:
|
|
||||||
return "AWS_LC_FIPS_VERSION=failed_to_detect"
|
|
||||||
valid_tags = list(filter(aws_lc_fips_version_valid, tags))
|
|
||||||
latest_tag = max(valid_tags, key=aws_lc_fips_version_string_to_num)
|
|
||||||
return "AWS_LC_FIPS_VERSION={}".format(latest_tag[12:])
|
|
||||||
|
|
||||||
def wolfssl_version_string_to_num(version_string):
|
|
||||||
return tuple(map(int, version_string[1:].removesuffix('-stable').split('.')))
|
|
||||||
|
|
||||||
def wolfssl_version_valid(version_string):
|
|
||||||
return re.match('^v[0-9]+(\.[0-9]+)*-stable$', version_string)
|
|
||||||
|
|
||||||
@functools.lru_cache(5)
|
|
||||||
def determine_latest_wolfssl(ssl):
|
|
||||||
tags = get_all_github_tags("https://api.github.com/repos/wolfssl/wolfssl/tags")
|
|
||||||
if not tags:
|
|
||||||
return "WOLFSSL_VERSION=failed_to_detect"
|
|
||||||
valid_tags = list(filter(wolfssl_version_valid, tags))
|
|
||||||
latest_tag = max(valid_tags, key=wolfssl_version_string_to_num)
|
|
||||||
return "WOLFSSL_VERSION={}".format(latest_tag[1:].removesuffix('-stable'))
|
|
||||||
|
|
||||||
@functools.lru_cache(5)
|
@functools.lru_cache(5)
|
||||||
def determine_latest_libressl(ssl):
|
def determine_latest_libressl(ssl):
|
||||||
try:
|
try:
|
||||||
@ -117,6 +85,14 @@ def clean_compression(compression):
|
|||||||
return compression.replace("USE_", "").lower()
|
return compression.replace("USE_", "").lower()
|
||||||
|
|
||||||
|
|
||||||
|
def get_asan_flags(cc):
|
||||||
|
return [
|
||||||
|
"USE_OBSOLETE_LINKER=1",
|
||||||
|
'DEBUG_CFLAGS="-g -fsanitize=address"',
|
||||||
|
'LDFLAGS="-fsanitize=address"',
|
||||||
|
'CPU_CFLAGS.generic="-O1"',
|
||||||
|
]
|
||||||
|
|
||||||
def main(ref_name):
|
def main(ref_name):
|
||||||
print("Generating matrix for branch '{}'.".format(ref_name))
|
print("Generating matrix for branch '{}'.".format(ref_name))
|
||||||
|
|
||||||
@ -125,11 +101,9 @@ def main(ref_name):
|
|||||||
# Ubuntu
|
# Ubuntu
|
||||||
|
|
||||||
if "haproxy-" in ref_name:
|
if "haproxy-" in ref_name:
|
||||||
os = "ubuntu-24.04" # stable branch
|
os = "ubuntu-22.04" # stable branch
|
||||||
os_arm = "ubuntu-24.04-arm" # stable branch
|
|
||||||
else:
|
else:
|
||||||
os = "ubuntu-24.04" # development branch
|
os = "ubuntu-latest" # development branch
|
||||||
os_arm = "ubuntu-24.04-arm" # development branch
|
|
||||||
|
|
||||||
TARGET = "linux-glibc"
|
TARGET = "linux-glibc"
|
||||||
for CC in ["gcc", "clang"]:
|
for CC in ["gcc", "clang"]:
|
||||||
@ -150,16 +124,16 @@ def main(ref_name):
|
|||||||
"TARGET": TARGET,
|
"TARGET": TARGET,
|
||||||
"CC": CC,
|
"CC": CC,
|
||||||
"FLAGS": [
|
"FLAGS": [
|
||||||
'DEBUG="-DDEBUG_LIST"',
|
|
||||||
"USE_ZLIB=1",
|
"USE_ZLIB=1",
|
||||||
"USE_OT=1",
|
"USE_OT=1",
|
||||||
"OT_INC=${HOME}/opt-ot/include",
|
"OT_INC=${HOME}/opt-ot/include",
|
||||||
"OT_LIB=${HOME}/opt-ot/lib",
|
"OT_LIB=${HOME}/opt-ot/lib",
|
||||||
"OT_RUNPATH=1",
|
"OT_RUNPATH=1",
|
||||||
"USE_PCRE2=1",
|
"USE_PCRE=1",
|
||||||
"USE_PCRE2_JIT=1",
|
"USE_PCRE_JIT=1",
|
||||||
"USE_LUA=1",
|
"USE_LUA=1",
|
||||||
"USE_OPENSSL=1",
|
"USE_OPENSSL=1",
|
||||||
|
"USE_SYSTEMD=1",
|
||||||
"USE_WURFL=1",
|
"USE_WURFL=1",
|
||||||
"WURFL_INC=addons/wurfl/dummy",
|
"WURFL_INC=addons/wurfl/dummy",
|
||||||
"WURFL_LIB=addons/wurfl/dummy",
|
"WURFL_LIB=addons/wurfl/dummy",
|
||||||
@ -174,37 +148,35 @@ def main(ref_name):
|
|||||||
|
|
||||||
# ASAN
|
# ASAN
|
||||||
|
|
||||||
for os_asan in [os, os_arm]:
|
matrix.append(
|
||||||
matrix.append(
|
{
|
||||||
{
|
"name": "{}, {}, ASAN, all features".format(os, CC),
|
||||||
"name": "{}, {}, ASAN, all features".format(os_asan, CC),
|
"os": os,
|
||||||
"os": os_asan,
|
"TARGET": TARGET,
|
||||||
"TARGET": TARGET,
|
"CC": CC,
|
||||||
"CC": CC,
|
"FLAGS": get_asan_flags(CC)
|
||||||
"FLAGS": [
|
+ [
|
||||||
"USE_OBSOLETE_LINKER=1",
|
"USE_ZLIB=1",
|
||||||
'ARCH_FLAGS="-g -fsanitize=address"',
|
"USE_OT=1",
|
||||||
'OPT_CFLAGS="-O1"',
|
"OT_INC=${HOME}/opt-ot/include",
|
||||||
"USE_ZLIB=1",
|
"OT_LIB=${HOME}/opt-ot/lib",
|
||||||
"USE_OT=1",
|
"OT_RUNPATH=1",
|
||||||
"OT_INC=${HOME}/opt-ot/include",
|
"USE_PCRE=1",
|
||||||
"OT_LIB=${HOME}/opt-ot/lib",
|
"USE_PCRE_JIT=1",
|
||||||
"OT_RUNPATH=1",
|
"USE_LUA=1",
|
||||||
"USE_PCRE2=1",
|
"USE_OPENSSL=1",
|
||||||
"USE_PCRE2_JIT=1",
|
"USE_SYSTEMD=1",
|
||||||
"USE_LUA=1",
|
"USE_WURFL=1",
|
||||||
"USE_OPENSSL=1",
|
"WURFL_INC=addons/wurfl/dummy",
|
||||||
"USE_WURFL=1",
|
"WURFL_LIB=addons/wurfl/dummy",
|
||||||
"WURFL_INC=addons/wurfl/dummy",
|
"USE_DEVICEATLAS=1",
|
||||||
"WURFL_LIB=addons/wurfl/dummy",
|
"DEVICEATLAS_SRC=addons/deviceatlas/dummy",
|
||||||
"USE_DEVICEATLAS=1",
|
"USE_PROMEX=1",
|
||||||
"DEVICEATLAS_SRC=addons/deviceatlas/dummy",
|
"USE_51DEGREES=1",
|
||||||
"USE_PROMEX=1",
|
"51DEGREES_SRC=addons/51degrees/dummy/pattern",
|
||||||
"USE_51DEGREES=1",
|
],
|
||||||
"51DEGREES_SRC=addons/51degrees/dummy/pattern",
|
}
|
||||||
],
|
)
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
for compression in ["USE_ZLIB=1"]:
|
for compression in ["USE_ZLIB=1"]:
|
||||||
matrix.append(
|
matrix.append(
|
||||||
@ -221,10 +193,9 @@ def main(ref_name):
|
|||||||
"stock",
|
"stock",
|
||||||
"OPENSSL_VERSION=1.0.2u",
|
"OPENSSL_VERSION=1.0.2u",
|
||||||
"OPENSSL_VERSION=1.1.1s",
|
"OPENSSL_VERSION=1.1.1s",
|
||||||
"OPENSSL_VERSION=3.5.1",
|
|
||||||
"QUICTLS=yes",
|
"QUICTLS=yes",
|
||||||
"WOLFSSL_VERSION=5.7.0",
|
"WOLFSSL_VERSION=5.6.4",
|
||||||
"AWS_LC_VERSION=1.39.0",
|
"AWS_LC_VERSION=1.16.0",
|
||||||
# "BORINGSSL=yes",
|
# "BORINGSSL=yes",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -236,7 +207,8 @@ def main(ref_name):
|
|||||||
|
|
||||||
for ssl in ssl_versions:
|
for ssl in ssl_versions:
|
||||||
flags = ["USE_OPENSSL=1"]
|
flags = ["USE_OPENSSL=1"]
|
||||||
skipdup=0
|
if ssl == "BORINGSSL=yes" or ssl == "QUICTLS=yes" or "LIBRESSL" in ssl or "WOLFSSL" in ssl or "AWS_LC" in ssl:
|
||||||
|
flags.append("USE_QUIC=1")
|
||||||
if "WOLFSSL" in ssl:
|
if "WOLFSSL" in ssl:
|
||||||
flags.append("USE_OPENSSL_WOLFSSL=1")
|
flags.append("USE_OPENSSL_WOLFSSL=1")
|
||||||
if "AWS_LC" in ssl:
|
if "AWS_LC" in ssl:
|
||||||
@ -246,23 +218,8 @@ def main(ref_name):
|
|||||||
flags.append("SSL_INC=${HOME}/opt/include")
|
flags.append("SSL_INC=${HOME}/opt/include")
|
||||||
if "LIBRESSL" in ssl and "latest" in ssl:
|
if "LIBRESSL" in ssl and "latest" in ssl:
|
||||||
ssl = determine_latest_libressl(ssl)
|
ssl = determine_latest_libressl(ssl)
|
||||||
skipdup=1
|
|
||||||
if "OPENSSL" in ssl and "latest" in ssl:
|
if "OPENSSL" in ssl and "latest" in ssl:
|
||||||
ssl = determine_latest_openssl(ssl)
|
ssl = determine_latest_openssl(ssl)
|
||||||
skipdup=1
|
|
||||||
|
|
||||||
# if "latest" equals a version already in the list
|
|
||||||
if ssl in ssl_versions and skipdup == 1:
|
|
||||||
continue
|
|
||||||
|
|
||||||
openssl_supports_quic = False
|
|
||||||
try:
|
|
||||||
openssl_supports_quic = version.Version(ssl.split("OPENSSL_VERSION=",1)[1]) >= version.Version("3.5.0")
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if ssl == "BORINGSSL=yes" or ssl == "QUICTLS=yes" or "LIBRESSL" in ssl or "WOLFSSL" in ssl or "AWS_LC" in ssl or openssl_supports_quic:
|
|
||||||
flags.append("USE_QUIC=1")
|
|
||||||
|
|
||||||
matrix.append(
|
matrix.append(
|
||||||
{
|
{
|
||||||
@ -278,9 +235,9 @@ def main(ref_name):
|
|||||||
# macOS
|
# macOS
|
||||||
|
|
||||||
if "haproxy-" in ref_name:
|
if "haproxy-" in ref_name:
|
||||||
os = "macos-13" # stable branch
|
os = "macos-12" # stable branch
|
||||||
else:
|
else:
|
||||||
os = "macos-26" # development branch
|
os = "macos-latest" # development branch
|
||||||
|
|
||||||
TARGET = "osx"
|
TARGET = "osx"
|
||||||
for CC in ["clang"]:
|
for CC in ["clang"]:
|
||||||
|
|||||||
12
.github/workflows/aws-lc-fips.yml
vendored
12
.github/workflows/aws-lc-fips.yml
vendored
@ -1,12 +0,0 @@
|
|||||||
name: AWS-LC-FIPS
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 0 * * 4"
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
uses: ./.github/workflows/aws-lc-template.yml
|
|
||||||
with:
|
|
||||||
command: "from matrix import determine_latest_aws_lc_fips; print(determine_latest_aws_lc_fips(''))"
|
|
||||||
94
.github/workflows/aws-lc-template.yml
vendored
94
.github/workflows/aws-lc-template.yml
vendored
@ -1,94 +0,0 @@
|
|||||||
name: AWS-LC template
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
inputs:
|
|
||||||
command:
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
- name: Determine latest AWS-LC release
|
|
||||||
id: get_aws_lc_release
|
|
||||||
run: |
|
|
||||||
result=$(cd .github && python3 -c "${{ inputs.command }}")
|
|
||||||
echo $result
|
|
||||||
echo "result=$result" >> $GITHUB_OUTPUT
|
|
||||||
- name: Cache AWS-LC
|
|
||||||
id: cache_aws_lc
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: '~/opt/'
|
|
||||||
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
|
|
||||||
- name: Install apt dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
|
||||||
sudo apt-get --no-install-recommends -y install socat gdb jose
|
|
||||||
- name: Install AWS-LC
|
|
||||||
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
|
|
||||||
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
|
|
||||||
- name: Compile HAProxy
|
|
||||||
run: |
|
|
||||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
|
||||||
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
|
|
||||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
|
||||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
|
||||||
sudo make install
|
|
||||||
- name: Show HAProxy version
|
|
||||||
id: show-version
|
|
||||||
run: |
|
|
||||||
ldd $(which haproxy)
|
|
||||||
haproxy -vv
|
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
|
||||||
- uses: ./.github/actions/setup-vtest
|
|
||||||
- name: Run VTest for HAProxy
|
|
||||||
id: vtest
|
|
||||||
run: |
|
|
||||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
|
||||||
- name: Run Unit tests
|
|
||||||
id: unittests
|
|
||||||
run: |
|
|
||||||
make unit-tests
|
|
||||||
- name: Show VTest results
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $folder/INFO
|
|
||||||
cat $folder/LOG
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
- name: Show coredumps
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
failed=false
|
|
||||||
shopt -s nullglob
|
|
||||||
for file in /tmp/core.*; do
|
|
||||||
failed=true
|
|
||||||
printf "::group::"
|
|
||||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
if [ "$failed" = true ]; then
|
|
||||||
exit 1;
|
|
||||||
fi
|
|
||||||
- name: Show Unit-Tests results
|
|
||||||
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $result
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
|
|
||||||
60
.github/workflows/aws-lc.yml
vendored
60
.github/workflows/aws-lc.yml
vendored
@ -5,8 +5,62 @@ on:
|
|||||||
- cron: "0 0 * * 4"
|
- cron: "0 0 * * 4"
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
uses: ./.github/workflows/aws-lc-template.yml
|
runs-on: ubuntu-latest
|
||||||
with:
|
steps:
|
||||||
command: "from matrix import determine_latest_aws_lc; print(determine_latest_aws_lc(''))"
|
- uses: actions/checkout@v4
|
||||||
|
- name: Install VTest
|
||||||
|
run: |
|
||||||
|
scripts/build-vtest.sh
|
||||||
|
- name: Determine latest AWS-LC release
|
||||||
|
id: get_aws_lc_release
|
||||||
|
run: |
|
||||||
|
result=$(cd .github && python3 -c "from matrix import determine_latest_aws_lc; print(determine_latest_aws_lc(''))")
|
||||||
|
echo $result
|
||||||
|
echo "result=$result" >> $GITHUB_OUTPUT
|
||||||
|
- name: Cache AWS-LC
|
||||||
|
id: cache_aws_lc
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: '~/opt/'
|
||||||
|
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
|
||||||
|
- name: Install AWS-LC
|
||||||
|
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
|
||||||
|
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
|
||||||
|
- name: Compile HAProxy
|
||||||
|
run: |
|
||||||
|
make -j$(nproc) CC=gcc TARGET=linux-glibc \
|
||||||
|
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
|
||||||
|
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
||||||
|
DEBUG="-DDEBUG_STRICT -DDEBUG_MEMORY_POOLS -DDEBUG_POOL_INTEGRITY" \
|
||||||
|
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
||||||
|
sudo make install
|
||||||
|
- name: Show HAProxy version
|
||||||
|
id: show-version
|
||||||
|
run: |
|
||||||
|
ldd $(which haproxy)
|
||||||
|
haproxy -vv
|
||||||
|
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||||
|
- name: Install problem matcher for VTest
|
||||||
|
run: echo "::add-matcher::.github/vtest.json"
|
||||||
|
- name: Run VTest for HAProxy
|
||||||
|
id: vtest
|
||||||
|
run: |
|
||||||
|
# This is required for macOS which does not actually allow to increase
|
||||||
|
# the '-n' soft limit to the hard limit, thus failing to run.
|
||||||
|
ulimit -n 65536
|
||||||
|
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||||
|
- name: Show VTest results
|
||||||
|
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||||
|
run: |
|
||||||
|
for folder in ${TMPDIR}/haregtests-*/vtc.*; do
|
||||||
|
printf "::group::"
|
||||||
|
cat $folder/INFO
|
||||||
|
cat $folder/LOG
|
||||||
|
echo "::endgroup::"
|
||||||
|
done
|
||||||
|
exit 1
|
||||||
|
|||||||
10
.github/workflows/codespell.yml
vendored
10
.github/workflows/codespell.yml
vendored
@ -3,7 +3,6 @@ name: Spelling Check
|
|||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: "0 0 * * 2"
|
- cron: "0 0 * * 2"
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
@ -11,12 +10,11 @@ permissions:
|
|||||||
jobs:
|
jobs:
|
||||||
codespell:
|
codespell:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
if: ${{ github.repository_owner == 'haproxy' }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
- uses: codespell-project/codespell-problem-matcher@v1.2.0
|
- uses: codespell-project/codespell-problem-matcher@v1
|
||||||
- uses: codespell-project/actions-codespell@master
|
- uses: codespell-project/actions-codespell@master
|
||||||
with:
|
with:
|
||||||
skip: CHANGELOG,Makefile,*.fig,*.pem,./doc/design-thoughts,./doc/internals
|
skip: CHANGELOG,Makefile,*.fig,*.pem,./doc/design-thoughts,./doc/internals
|
||||||
ignore_words_list: pres,ist,ists,hist,wan,ca,cas,que,ans,te,nd,referer,ot,uint,iif,fo,keep-alives,dosen,ifset,thrid,strack,ba,chck,hel,unx,mor,clen,collet,bu,htmp,siz,experim
|
ignore_words_list: ist,ists,hist,wan,ca,cas,que,ans,te,nd,referer,ot,uint,iif,fo,keep-alives,dosen,ifset,thrid,strack,ba,chck,hel,unx,mor,clen
|
||||||
uri_ignore_words_list: trafic,ressources
|
|
||||||
|
|||||||
19
.github/workflows/compliance.yml
vendored
19
.github/workflows/compliance.yml
vendored
@ -11,10 +11,15 @@ permissions:
|
|||||||
jobs:
|
jobs:
|
||||||
h2spec:
|
h2spec:
|
||||||
name: h2spec
|
name: h2spec
|
||||||
runs-on: ubuntu-latest
|
runs-on: ${{ matrix.os }}
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- TARGET: linux-glibc
|
||||||
|
CC: gcc
|
||||||
|
os: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
- name: Install h2spec
|
- name: Install h2spec
|
||||||
id: install-h2spec
|
id: install-h2spec
|
||||||
run: |
|
run: |
|
||||||
@ -23,13 +28,13 @@ jobs:
|
|||||||
tar xvf h2spec.tar.gz
|
tar xvf h2spec.tar.gz
|
||||||
sudo install -m755 h2spec /usr/local/bin/h2spec
|
sudo install -m755 h2spec /usr/local/bin/h2spec
|
||||||
echo "version=${H2SPEC_VERSION}" >> $GITHUB_OUTPUT
|
echo "version=${H2SPEC_VERSION}" >> $GITHUB_OUTPUT
|
||||||
- name: Compile HAProxy with gcc
|
- name: Compile HAProxy with ${{ matrix.CC }}
|
||||||
run: |
|
run: |
|
||||||
make -j$(nproc) all \
|
make -j$(nproc) all \
|
||||||
ERR=1 \
|
ERR=1 \
|
||||||
TARGET=linux-glibc \
|
TARGET=${{ matrix.TARGET }} \
|
||||||
CC=gcc \
|
CC=${{ matrix.CC }} \
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY" \
|
DEBUG="-DDEBUG_STRICT -DDEBUG_MEMORY_POOLS -DDEBUG_POOL_INTEGRITY" \
|
||||||
USE_OPENSSL=1
|
USE_OPENSSL=1
|
||||||
sudo make install
|
sudo make install
|
||||||
- name: Show HAProxy version
|
- name: Show HAProxy version
|
||||||
|
|||||||
2
.github/workflows/contrib.yml
vendored
2
.github/workflows/contrib.yml
vendored
@ -10,7 +10,7 @@ jobs:
|
|||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
- name: Compile admin/halog/halog
|
- name: Compile admin/halog/halog
|
||||||
run: |
|
run: |
|
||||||
make admin/halog/halog
|
make admin/halog/halog
|
||||||
|
|||||||
13
.github/workflows/coverity.yml
vendored
13
.github/workflows/coverity.yml
vendored
@ -15,15 +15,14 @@ permissions:
|
|||||||
jobs:
|
jobs:
|
||||||
scan:
|
scan:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
if: ${{ github.repository_owner == 'haproxy' }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
- name: Install apt dependencies
|
- name: Install apt dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
sudo apt-get update
|
||||||
sudo apt-get --no-install-recommends -y install \
|
sudo apt-get install -y \
|
||||||
liblua5.4-dev \
|
liblua5.3-dev \
|
||||||
libpcre2-dev \
|
|
||||||
libsystemd-dev
|
libsystemd-dev
|
||||||
- name: Install QUICTLS
|
- name: Install QUICTLS
|
||||||
run: |
|
run: |
|
||||||
@ -38,7 +37,7 @@ jobs:
|
|||||||
- name: Build with Coverity build tool
|
- name: Build with Coverity build tool
|
||||||
run: |
|
run: |
|
||||||
export PATH=`pwd`/coverity_tool/bin:$PATH
|
export PATH=`pwd`/coverity_tool/bin:$PATH
|
||||||
cov-build --dir cov-int make CC=clang TARGET=linux-glibc USE_ZLIB=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_LUA=1 USE_OPENSSL=1 USE_QUIC=1 USE_WURFL=1 WURFL_INC=addons/wurfl/dummy WURFL_LIB=addons/wurfl/dummy USE_DEVICEATLAS=1 DEVICEATLAS_SRC=addons/deviceatlas/dummy USE_51DEGREES=1 51DEGREES_SRC=addons/51degrees/dummy/pattern ADDLIB=\"-Wl,-rpath,$HOME/opt/lib/\" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include DEBUG+=-DDEBUG_STRICT=2 DEBUG+=-DDEBUG_USE_ABORT=1
|
cov-build --dir cov-int make CC=clang TARGET=linux-glibc USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 USE_OPENSSL=1 USE_QUIC=1 USE_SYSTEMD=1 USE_WURFL=1 WURFL_INC=addons/wurfl/dummy WURFL_LIB=addons/wurfl/dummy USE_DEVICEATLAS=1 DEVICEATLAS_SRC=addons/deviceatlas/dummy USE_51DEGREES=1 51DEGREES_SRC=addons/51degrees/dummy/pattern ADDLIB=\"-Wl,-rpath,$HOME/opt/lib/\" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include DEBUG+=-DDEBUG_STRICT=1 DEBUG+=-DDEBUG_USE_ABORT=1
|
||||||
- name: Submit build result to Coverity Scan
|
- name: Submit build result to Coverity Scan
|
||||||
run: |
|
run: |
|
||||||
tar czvf cov.tar.gz cov-int
|
tar czvf cov.tar.gz cov-int
|
||||||
|
|||||||
6
.github/workflows/cross-zoo.yml
vendored
6
.github/workflows/cross-zoo.yml
vendored
@ -6,7 +6,6 @@ name: Cross Compile
|
|||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: "0 0 21 * *"
|
- cron: "0 0 21 * *"
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
@ -91,15 +90,14 @@ jobs:
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
steps:
|
steps:
|
||||||
- name: install packages
|
- name: install packages
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
sudo apt-get update
|
||||||
sudo apt-get -yq --force-yes install \
|
sudo apt-get -yq --force-yes install \
|
||||||
gcc-${{ matrix.platform.arch }} \
|
gcc-${{ matrix.platform.arch }} \
|
||||||
${{ matrix.platform.libs }}
|
${{ matrix.platform.libs }}
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
|
||||||
- name: install quictls
|
- name: install quictls
|
||||||
|
|||||||
32
.github/workflows/fedora-rawhide.yml
vendored
32
.github/workflows/fedora-rawhide.yml
vendored
@ -3,7 +3,6 @@ name: Fedora/Rawhide/QuicTLS
|
|||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: "0 0 25 * *"
|
- cron: "0 0 25 * *"
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
@ -12,36 +11,29 @@ jobs:
|
|||||||
build_and_test:
|
build_and_test:
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
platform: [
|
cc: [ gcc, clang ]
|
||||||
{ name: x64, cc: gcc, QUICTLS_EXTRA_ARGS: "", ADDLIB_ATOMIC: "", ARCH_FLAGS: "" },
|
name: ${{ matrix.cc }}
|
||||||
{ name: x64, cc: clang, QUICTLS_EXTRA_ARGS: "", ADDLIB_ATOMIC: "", ARCH_FLAGS: "" },
|
|
||||||
{ name: x86, cc: gcc, QUICTLS_EXTRA_ARGS: "-m32 linux-generic32", ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" },
|
|
||||||
{ name: x86, cc: clang, QUICTLS_EXTRA_ARGS: "-m32 linux-generic32", ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" }
|
|
||||||
]
|
|
||||||
fail-fast: false
|
|
||||||
name: ${{ matrix.platform.cc }}.${{ matrix.platform.name }}
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
container:
|
container:
|
||||||
image: fedora:rawhide
|
image: fedora:rawhide
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
dnf -y install awk diffutils git pcre-devel zlib-devel pcre2-devel 'perl(FindBin)' perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils systemd-devel clang
|
dnf -y install git pcre-devel zlib-devel pcre2-devel 'perl(FindBin)' perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils systemd-devel clang
|
||||||
dnf -y install 'perl(FindBin)' 'perl(File::Compare)' perl-IPC-Cmd 'perl(File::Copy)' glibc-devel.i686 lua-devel.i686 lua-devel.x86_64 systemd-devel.i686 zlib-ng-compat-devel.i686 pcre-devel.i686 libatomic.i686
|
- name: Install VTest
|
||||||
- uses: ./.github/actions/setup-vtest
|
run: scripts/build-vtest.sh
|
||||||
- name: Install QuicTLS
|
- name: Install QuicTLS
|
||||||
run: QUICTLS=yes QUICTLS_EXTRA_ARGS="${{ matrix.platform.QUICTLS_EXTRA_ARGS }}" scripts/build-ssl.sh
|
run: QUICTLS=yes scripts/build-ssl.sh
|
||||||
- name: Build contrib tools
|
- name: Build contrib tools
|
||||||
run: |
|
run: |
|
||||||
make admin/halog/halog
|
make admin/halog/halog
|
||||||
make dev/flags/flags
|
make dev/flags/flags
|
||||||
make dev/poll/poll
|
make dev/poll/poll
|
||||||
make dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
|
make dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
|
||||||
- name: Compile HAProxy with ${{ matrix.platform.cc }}
|
- name: Compile HAProxy with ${{ matrix.cc }}
|
||||||
run: |
|
run: |
|
||||||
make -j3 CC=${{ matrix.platform.cc }} V=1 ERR=1 TARGET=linux-glibc DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" USE_OPENSSL=1 USE_QUIC=1 USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 ADDLIB="${{ matrix.platform.ADDLIB_ATOMIC }} -Wl,-rpath,${HOME}/opt/lib" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include ARCH_FLAGS="${{ matrix.platform.ARCH_FLAGS }}"
|
make -j3 CC=${{ matrix.cc }} V=1 ERR=1 TARGET=linux-glibc USE_OPENSSL=1 USE_QUIC=1 USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 USE_SYSTEMD=1 ADDLIB="-Wl,-rpath,${HOME}/opt/lib" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include
|
||||||
make install
|
make install
|
||||||
- name: Show HAProxy version
|
- name: Show HAProxy version
|
||||||
id: show-version
|
id: show-version
|
||||||
@ -58,13 +50,9 @@ jobs:
|
|||||||
- name: Show VTest results
|
- name: Show VTest results
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||||
run: |
|
run: |
|
||||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
for folder in ${TMPDIR}/haregtests-*/vtc.*; do
|
||||||
printf "::group::"
|
printf "::group::"
|
||||||
cat $folder/INFO
|
cat $folder/INFO
|
||||||
cat $folder/LOG
|
cat $folder/LOG
|
||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
done
|
done
|
||||||
- name: Run Unit tests
|
|
||||||
id: unittests
|
|
||||||
run: |
|
|
||||||
make unit-tests
|
|
||||||
|
|||||||
24
.github/workflows/illumos.yml
vendored
24
.github/workflows/illumos.yml
vendored
@ -1,24 +0,0 @@
|
|||||||
name: Illumos
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 0 25 * *"
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
gcc:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
steps:
|
|
||||||
- name: "Checkout repository"
|
|
||||||
uses: actions/checkout@v5
|
|
||||||
|
|
||||||
- name: "Build on VM"
|
|
||||||
uses: vmactions/solaris-vm@v1
|
|
||||||
with:
|
|
||||||
prepare: |
|
|
||||||
pkg install gcc make
|
|
||||||
run: |
|
|
||||||
gmake CC=gcc TARGET=solaris USE_OPENSSL=1 USE_PROMEX=1
|
|
||||||
20
.github/workflows/musl.yml
vendored
20
.github/workflows/musl.yml
vendored
@ -20,13 +20,13 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
ulimit -c unlimited
|
ulimit -c unlimited
|
||||||
echo '/tmp/core/core.%h.%e.%t' > /proc/sys/kernel/core_pattern
|
echo '/tmp/core/core.%h.%e.%t' > /proc/sys/kernel/core_pattern
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: apk add gcc gdb make tar git python3 libc-dev linux-headers pcre-dev pcre2-dev openssl-dev lua5.3-dev grep socat curl musl-dbg lua5.3-dbg jose
|
run: apk add gcc gdb make tar git python3 libc-dev linux-headers pcre-dev pcre2-dev openssl-dev lua5.3-dev grep socat curl musl-dbg lua5.3-dbg
|
||||||
- name: Install VTest
|
- name: Install VTest
|
||||||
run: scripts/build-vtest.sh
|
run: scripts/build-vtest.sh
|
||||||
- name: Build
|
- name: Build
|
||||||
run: make -j$(nproc) TARGET=linux-musl DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" ARCH_FLAGS='-ggdb3' CC=cc V=1 USE_LUA=1 LUA_INC=/usr/include/lua5.3 LUA_LIB=/usr/lib/lua5.3 USE_OPENSSL=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_PROMEX=1
|
run: make -j$(nproc) TARGET=linux-musl DEBUG_CFLAGS='-ggdb3' CC=cc V=1 USE_LUA=1 LUA_INC=/usr/include/lua5.3 LUA_LIB=/usr/lib/lua5.3 USE_OPENSSL=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_PROMEX=1
|
||||||
- name: Show version
|
- name: Show version
|
||||||
run: ./haproxy -vv
|
run: ./haproxy -vv
|
||||||
- name: Show linked libraries
|
- name: Show linked libraries
|
||||||
@ -37,10 +37,6 @@ jobs:
|
|||||||
- name: Run VTest
|
- name: Run VTest
|
||||||
id: vtest
|
id: vtest
|
||||||
run: make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
run: make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||||
- name: Run Unit tests
|
|
||||||
id: unittests
|
|
||||||
run: |
|
|
||||||
make unit-tests
|
|
||||||
- name: Show coredumps
|
- name: Show coredumps
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||||
run: |
|
run: |
|
||||||
@ -64,13 +60,3 @@ jobs:
|
|||||||
cat $folder/LOG
|
cat $folder/LOG
|
||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
done
|
done
|
||||||
- name: Show Unit-Tests results
|
|
||||||
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $result
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
|
|
||||||
|
|||||||
24
.github/workflows/netbsd.yml
vendored
24
.github/workflows/netbsd.yml
vendored
@ -1,24 +0,0 @@
|
|||||||
name: NetBSD
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 0 25 * *"
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
gcc:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
steps:
|
|
||||||
- name: "Checkout repository"
|
|
||||||
uses: actions/checkout@v5
|
|
||||||
|
|
||||||
- name: "Build on VM"
|
|
||||||
uses: vmactions/netbsd-vm@v1
|
|
||||||
with:
|
|
||||||
prepare: |
|
|
||||||
/usr/sbin/pkg_add gmake curl
|
|
||||||
run: |
|
|
||||||
gmake CC=gcc TARGET=netbsd ERR=1 USE_OPENSSL=1 USE_LUA=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_PROMEX=1 USE_ZLIB=1
|
|
||||||
82
.github/workflows/openssl-ech.yml
vendored
82
.github/workflows/openssl-ech.yml
vendored
@ -1,82 +0,0 @@
|
|||||||
name: openssl ECH
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 3 * * *"
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
- name: Install VTest
|
|
||||||
run: |
|
|
||||||
scripts/build-vtest.sh
|
|
||||||
- name: Install apt dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
|
||||||
sudo apt-get --no-install-recommends -y install socat gdb
|
|
||||||
sudo apt-get --no-install-recommends -y install libpsl-dev
|
|
||||||
- name: Install OpenSSL+ECH
|
|
||||||
run: env OPENSSL_VERSION="git-feature/ech" GIT_TYPE="branch" scripts/build-ssl.sh
|
|
||||||
- name: Install curl+ECH
|
|
||||||
run: env SSL_LIB=${HOME}/opt/ scripts/build-curl.sh
|
|
||||||
- name: Compile HAProxy
|
|
||||||
run: |
|
|
||||||
make -j$(nproc) CC=gcc TARGET=linux-glibc \
|
|
||||||
USE_QUIC=1 USE_OPENSSL=1 USE_ECH=1 \
|
|
||||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
|
||||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
|
|
||||||
ARCH_FLAGS="-ggdb3 -fsanitize=address"
|
|
||||||
sudo make install
|
|
||||||
- name: Show HAProxy version
|
|
||||||
id: show-version
|
|
||||||
run: |
|
|
||||||
ldd $(which haproxy)
|
|
||||||
haproxy -vv
|
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
|
||||||
- name: Install problem matcher for VTest
|
|
||||||
run: echo "::add-matcher::.github/vtest.json"
|
|
||||||
- name: Run VTest for HAProxy
|
|
||||||
id: vtest
|
|
||||||
run: |
|
|
||||||
# This is required for macOS which does not actually allow to increase
|
|
||||||
# the '-n' soft limit to the hard limit, thus failing to run.
|
|
||||||
ulimit -n 65536
|
|
||||||
# allow to catch coredumps
|
|
||||||
ulimit -c unlimited
|
|
||||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
|
||||||
- name: Show VTest results
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $folder/INFO
|
|
||||||
cat $folder/LOG
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
- name: Run Unit tests
|
|
||||||
id: unittests
|
|
||||||
run: |
|
|
||||||
make unit-tests
|
|
||||||
- name: Show coredumps
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
failed=false
|
|
||||||
shopt -s nullglob
|
|
||||||
for file in /tmp/core.*; do
|
|
||||||
failed=true
|
|
||||||
printf "::group::"
|
|
||||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
if [ "$failed" = true ]; then
|
|
||||||
exit 1;
|
|
||||||
fi
|
|
||||||
77
.github/workflows/openssl-master.yml
vendored
77
.github/workflows/openssl-master.yml
vendored
@ -1,77 +0,0 @@
|
|||||||
name: openssl master
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 3 * * *"
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
- name: Install apt dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
|
||||||
sudo apt-get --no-install-recommends -y install socat gdb
|
|
||||||
sudo apt-get --no-install-recommends -y install libpsl-dev
|
|
||||||
- uses: ./.github/actions/setup-vtest
|
|
||||||
- name: Install OpenSSL master
|
|
||||||
run: env OPENSSL_VERSION="git-master" GIT_TYPE="branch" scripts/build-ssl.sh
|
|
||||||
- name: Compile HAProxy
|
|
||||||
run: |
|
|
||||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
|
||||||
USE_QUIC=1 USE_OPENSSL=1 \
|
|
||||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
|
||||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
|
||||||
sudo make install
|
|
||||||
- name: Show HAProxy version
|
|
||||||
id: show-version
|
|
||||||
run: |
|
|
||||||
ldd $(which haproxy)
|
|
||||||
haproxy -vv
|
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
|
||||||
- name: Install problem matcher for VTest
|
|
||||||
run: echo "::add-matcher::.github/vtest.json"
|
|
||||||
- name: Run VTest for HAProxy
|
|
||||||
id: vtest
|
|
||||||
run: |
|
|
||||||
# This is required for macOS which does not actually allow to increase
|
|
||||||
# the '-n' soft limit to the hard limit, thus failing to run.
|
|
||||||
ulimit -n 65536
|
|
||||||
# allow to catch coredumps
|
|
||||||
ulimit -c unlimited
|
|
||||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
|
||||||
- name: Show VTest results
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $folder/INFO
|
|
||||||
cat $folder/LOG
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
- name: Run Unit tests
|
|
||||||
id: unittests
|
|
||||||
run: |
|
|
||||||
make unit-tests
|
|
||||||
- name: Show coredumps
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
failed=false
|
|
||||||
shopt -s nullglob
|
|
||||||
for file in /tmp/core.*; do
|
|
||||||
failed=true
|
|
||||||
printf "::group::"
|
|
||||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
if [ "$failed" = true ]; then
|
|
||||||
exit 1;
|
|
||||||
fi
|
|
||||||
33
.github/workflows/openssl-nodeprecated.yml
vendored
Normal file
33
.github/workflows/openssl-nodeprecated.yml
vendored
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
#
|
||||||
|
# special purpose CI: test against OpenSSL built in "no-deprecated" mode
|
||||||
|
# let us run those builds weekly
|
||||||
|
#
|
||||||
|
# for example, OpenWRT uses such OpenSSL builds (those builds are smaller)
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# some details might be found at NL: https://www.mail-archive.com/haproxy@formilux.org/msg35759.html
|
||||||
|
# GH: https://github.com/haproxy/haproxy/issues/367
|
||||||
|
|
||||||
|
name: openssl no-deprecated
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "0 0 * * 4"
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Install VTest
|
||||||
|
run: |
|
||||||
|
scripts/build-vtest.sh
|
||||||
|
- name: Compile HAProxy
|
||||||
|
run: |
|
||||||
|
make DEFINE="-DOPENSSL_API_COMPAT=0x10100000L -DOPENSSL_NO_DEPRECATED" -j3 CC=gcc ERR=1 TARGET=linux-glibc USE_OPENSSL=1
|
||||||
|
- name: Run VTest
|
||||||
|
run: |
|
||||||
|
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||||
104
.github/workflows/quic-interop-aws-lc.yml
vendored
104
.github/workflows/quic-interop-aws-lc.yml
vendored
@ -1,104 +0,0 @@
|
|||||||
#
|
|
||||||
# goodput,crosstraffic are not run on purpose, those tests are intended to bandwidth measurement, we currently do not want to use GitHub runners for that
|
|
||||||
#
|
|
||||||
|
|
||||||
name: QUIC Interop AWS-LC
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 0 * * 2"
|
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: write
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
|
|
||||||
- name: Log in to the Container registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Build and push Docker image
|
|
||||||
id: push
|
|
||||||
uses: docker/build-push-action@v5
|
|
||||||
with:
|
|
||||||
context: https://github.com/haproxytech/haproxy-qns.git
|
|
||||||
push: true
|
|
||||||
build-args: |
|
|
||||||
SSLLIB=AWS-LC
|
|
||||||
tags: ghcr.io/${{ github.repository }}:aws-lc
|
|
||||||
|
|
||||||
- name: Cleanup registry
|
|
||||||
uses: actions/delete-package-versions@v5
|
|
||||||
with:
|
|
||||||
owner: ${{ github.repository_owner }}
|
|
||||||
package-name: 'haproxy'
|
|
||||||
package-type: container
|
|
||||||
min-versions-to-keep: 1
|
|
||||||
delete-only-untagged-versions: 'true'
|
|
||||||
|
|
||||||
run:
|
|
||||||
needs: build
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
suite: [
|
|
||||||
{ client: chrome, tests: "http3" },
|
|
||||||
{ client: picoquic, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" },
|
|
||||||
{ client: quic-go, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" },
|
|
||||||
{ client: ngtcp2, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" }
|
|
||||||
]
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
name: ${{ matrix.suite.client }}
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
|
|
||||||
- name: Log in to the Container registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Install tshark
|
|
||||||
run: |
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get -y install tshark
|
|
||||||
|
|
||||||
- name: Pull image
|
|
||||||
run: |
|
|
||||||
docker pull ghcr.io/${{ github.repository }}:aws-lc
|
|
||||||
|
|
||||||
- name: Run
|
|
||||||
run: |
|
|
||||||
git clone https://github.com/quic-interop/quic-interop-runner
|
|
||||||
cd quic-interop-runner
|
|
||||||
pip install -r requirements.txt --break-system-packages
|
|
||||||
python run.py -j result.json -l logs -r haproxy=ghcr.io/${{ github.repository }}:aws-lc -t ${{ matrix.suite.tests }} -c ${{ matrix.suite.client }} -s haproxy
|
|
||||||
|
|
||||||
- name: Delete succeeded logs
|
|
||||||
if: failure()
|
|
||||||
run: |
|
|
||||||
cd quic-interop-runner/logs/haproxy_${{ matrix.suite.client }}
|
|
||||||
cat ../../result.json | jq -r '.results[][] | select(.result=="succeeded") | .name' | xargs rm -rf
|
|
||||||
|
|
||||||
- name: Logs upload
|
|
||||||
if: failure()
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: logs-${{ matrix.suite.client }}
|
|
||||||
path: quic-interop-runner/logs/
|
|
||||||
retention-days: 6
|
|
||||||
102
.github/workflows/quic-interop-libressl.yml
vendored
102
.github/workflows/quic-interop-libressl.yml
vendored
@ -1,102 +0,0 @@
|
|||||||
#
|
|
||||||
# goodput,crosstraffic are not run on purpose, those tests are intended to bandwidth measurement, we currently do not want to use GitHub runners for that
|
|
||||||
#
|
|
||||||
|
|
||||||
name: QUIC Interop LibreSSL
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 0 * * 2"
|
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: write
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
|
|
||||||
- name: Log in to the Container registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Build and push Docker image
|
|
||||||
id: push
|
|
||||||
uses: docker/build-push-action@v5
|
|
||||||
with:
|
|
||||||
context: https://github.com/haproxytech/haproxy-qns.git
|
|
||||||
push: true
|
|
||||||
build-args: |
|
|
||||||
SSLLIB=LibreSSL
|
|
||||||
tags: ghcr.io/${{ github.repository }}:libressl
|
|
||||||
|
|
||||||
- name: Cleanup registry
|
|
||||||
uses: actions/delete-package-versions@v5
|
|
||||||
with:
|
|
||||||
owner: ${{ github.repository_owner }}
|
|
||||||
package-name: 'haproxy'
|
|
||||||
package-type: container
|
|
||||||
min-versions-to-keep: 1
|
|
||||||
delete-only-untagged-versions: 'true'
|
|
||||||
|
|
||||||
run:
|
|
||||||
needs: build
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
suite: [
|
|
||||||
{ client: picoquic, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,http3,blackhole,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,v2" },
|
|
||||||
{ client: quic-go, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,http3,blackhole,amplificationlimit,transferloss,transfercorruption,v2" }
|
|
||||||
]
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
name: ${{ matrix.suite.client }}
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
|
|
||||||
- name: Log in to the Container registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Install tshark
|
|
||||||
run: |
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get -y install tshark
|
|
||||||
|
|
||||||
- name: Pull image
|
|
||||||
run: |
|
|
||||||
docker pull ghcr.io/${{ github.repository }}:libressl
|
|
||||||
|
|
||||||
- name: Run
|
|
||||||
run: |
|
|
||||||
git clone https://github.com/quic-interop/quic-interop-runner
|
|
||||||
cd quic-interop-runner
|
|
||||||
pip install -r requirements.txt --break-system-packages
|
|
||||||
python run.py -j result.json -l logs -r haproxy=ghcr.io/${{ github.repository }}:libressl -t ${{ matrix.suite.tests }} -c ${{ matrix.suite.client }} -s haproxy
|
|
||||||
|
|
||||||
- name: Delete succeeded logs
|
|
||||||
if: failure()
|
|
||||||
run: |
|
|
||||||
cd quic-interop-runner/logs/haproxy_${{ matrix.suite.client }}
|
|
||||||
cat ../../result.json | jq -r '.results[][] | select(.result=="succeeded") | .name' | xargs rm -rf
|
|
||||||
|
|
||||||
- name: Logs upload
|
|
||||||
if: failure()
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: logs-${{ matrix.suite.client }}
|
|
||||||
path: quic-interop-runner/logs/
|
|
||||||
retention-days: 6
|
|
||||||
74
.github/workflows/quictls.yml
vendored
74
.github/workflows/quictls.yml
vendored
@ -1,74 +0,0 @@
|
|||||||
#
|
|
||||||
# weekly run against modern QuicTLS branch, i.e. https://github.com/quictls/quictls
|
|
||||||
#
|
|
||||||
|
|
||||||
name: QuicTLS
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 0 * * 4"
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
- name: Install apt dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
|
||||||
sudo apt-get --no-install-recommends -y install socat gdb
|
|
||||||
- name: Install QuicTLS
|
|
||||||
run: env QUICTLS=yes QUICTLS_URL=https://github.com/quictls/quictls scripts/build-ssl.sh
|
|
||||||
- name: Compile HAProxy
|
|
||||||
run: |
|
|
||||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
|
||||||
USE_QUIC=1 USE_OPENSSL=1 \
|
|
||||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
|
||||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
|
|
||||||
ARCH_FLAGS="-ggdb3 -fsanitize=address"
|
|
||||||
sudo make install
|
|
||||||
- name: Show HAProxy version
|
|
||||||
id: show-version
|
|
||||||
run: |
|
|
||||||
ldd $(which haproxy)
|
|
||||||
haproxy -vv
|
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
|
||||||
- uses: ./.github/actions/setup-vtest
|
|
||||||
- name: Run VTest for HAProxy
|
|
||||||
id: vtest
|
|
||||||
run: |
|
|
||||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
|
||||||
- name: Show VTest results
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $folder/INFO
|
|
||||||
cat $folder/LOG
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
- name: Run Unit tests
|
|
||||||
id: unittests
|
|
||||||
run: |
|
|
||||||
make unit-tests
|
|
||||||
- name: Show coredumps
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
failed=false
|
|
||||||
shopt -s nullglob
|
|
||||||
for file in /tmp/core.*; do
|
|
||||||
failed=true
|
|
||||||
printf "::group::"
|
|
||||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
if [ "$failed" = true ]; then
|
|
||||||
exit 1;
|
|
||||||
fi
|
|
||||||
86
.github/workflows/vtest.yml
vendored
86
.github/workflows/vtest.yml
vendored
@ -23,7 +23,7 @@ jobs:
|
|||||||
outputs:
|
outputs:
|
||||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
- name: Generate Build Matrix
|
- name: Generate Build Matrix
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@ -42,12 +42,13 @@ jobs:
|
|||||||
# Configure a short TMPDIR to prevent failures due to long unix socket
|
# Configure a short TMPDIR to prevent failures due to long unix socket
|
||||||
# paths.
|
# paths.
|
||||||
TMPDIR: /tmp
|
TMPDIR: /tmp
|
||||||
|
# Force ASAN output into asan.log to make the output more readable.
|
||||||
|
ASAN_OPTIONS: log_path=asan.log
|
||||||
OT_CPP_VERSION: 1.6.0
|
OT_CPP_VERSION: 1.6.0
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 100
|
fetch-depth: 100
|
||||||
|
|
||||||
#
|
#
|
||||||
# Github Action cache key cannot contain comma, so we calculate it based on job name
|
# Github Action cache key cannot contain comma, so we calculate it based on job name
|
||||||
#
|
#
|
||||||
@ -59,7 +60,7 @@ jobs:
|
|||||||
- name: Cache SSL libs
|
- name: Cache SSL libs
|
||||||
if: ${{ matrix.ssl && matrix.ssl != 'stock' && matrix.ssl != 'BORINGSSL=yes' && matrix.ssl != 'QUICTLS=yes' }}
|
if: ${{ matrix.ssl && matrix.ssl != 'stock' && matrix.ssl != 'BORINGSSL=yes' && matrix.ssl != 'QUICTLS=yes' }}
|
||||||
id: cache_ssl
|
id: cache_ssl
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: '~/opt/'
|
path: '~/opt/'
|
||||||
key: ssl-${{ steps.generate-cache-key.outputs.key }}
|
key: ssl-${{ steps.generate-cache-key.outputs.key }}
|
||||||
@ -67,27 +68,28 @@ jobs:
|
|||||||
- name: Cache OpenTracing
|
- name: Cache OpenTracing
|
||||||
if: ${{ contains(matrix.FLAGS, 'USE_OT=1') }}
|
if: ${{ contains(matrix.FLAGS, 'USE_OT=1') }}
|
||||||
id: cache_ot
|
id: cache_ot
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: '~/opt-ot/'
|
path: '~/opt-ot/'
|
||||||
key: ${{ matrix.os }}-ot-${{ matrix.CC }}-${{ env.OT_CPP_VERSION }}-${{ contains(matrix.name, 'ASAN') }}
|
key: ot-${{ matrix.CC }}-${{ env.OT_CPP_VERSION }}-${{ contains(matrix.name, 'ASAN') }}
|
||||||
- name: Install apt dependencies
|
- name: Install apt dependencies
|
||||||
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
|
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
sudo apt-get update
|
||||||
sudo apt-get --no-install-recommends -y install \
|
sudo apt-get install -y \
|
||||||
${{ contains(matrix.FLAGS, 'USE_LUA=1') && 'liblua5.4-dev' || '' }} \
|
liblua5.3-dev \
|
||||||
${{ contains(matrix.FLAGS, 'USE_PCRE2=1') && 'libpcre2-dev' || '' }} \
|
libpcre2-dev \
|
||||||
${{ contains(matrix.ssl, 'BORINGSSL=yes') && 'ninja-build' || '' }} \
|
libsystemd-dev \
|
||||||
socat \
|
ninja-build \
|
||||||
gdb \
|
socat
|
||||||
jose
|
|
||||||
- name: Install brew dependencies
|
- name: Install brew dependencies
|
||||||
if: ${{ startsWith(matrix.os, 'macos-') }}
|
if: ${{ startsWith(matrix.os, 'macos-') }}
|
||||||
run: |
|
run: |
|
||||||
brew install socat
|
brew install socat
|
||||||
brew install lua
|
brew install lua
|
||||||
- uses: ./.github/actions/setup-vtest
|
- name: Install VTest
|
||||||
|
run: |
|
||||||
|
scripts/build-vtest.sh
|
||||||
- name: Install SSL ${{ matrix.ssl }}
|
- name: Install SSL ${{ matrix.ssl }}
|
||||||
if: ${{ matrix.ssl && matrix.ssl != 'stock' && steps.cache_ssl.outputs.cache-hit != 'true' }}
|
if: ${{ matrix.ssl && matrix.ssl != 'stock' && steps.cache_ssl.outputs.cache-hit != 'true' }}
|
||||||
run: env ${{ matrix.ssl }} scripts/build-ssl.sh
|
run: env ${{ matrix.ssl }} scripts/build-ssl.sh
|
||||||
@ -110,19 +112,10 @@ jobs:
|
|||||||
ERR=1 \
|
ERR=1 \
|
||||||
TARGET=${{ matrix.TARGET }} \
|
TARGET=${{ matrix.TARGET }} \
|
||||||
CC=${{ matrix.CC }} \
|
CC=${{ matrix.CC }} \
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
DEBUG="-DDEBUG_STRICT -DDEBUG_MEMORY_POOLS -DDEBUG_POOL_INTEGRITY" \
|
||||||
${{ join(matrix.FLAGS, ' ') }} \
|
|
||||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
|
||||||
sudo make install-bin
|
|
||||||
- name: Compile admin/halog/halog
|
|
||||||
run: |
|
|
||||||
make -j$(nproc) admin/halog/halog \
|
|
||||||
ERR=1 \
|
|
||||||
TARGET=${{ matrix.TARGET }} \
|
|
||||||
CC=${{ matrix.CC }} \
|
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
|
||||||
${{ join(matrix.FLAGS, ' ') }} \
|
${{ join(matrix.FLAGS, ' ') }} \
|
||||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
||||||
|
sudo make install
|
||||||
- name: Show HAProxy version
|
- name: Show HAProxy version
|
||||||
id: show-version
|
id: show-version
|
||||||
run: |
|
run: |
|
||||||
@ -137,44 +130,33 @@ jobs:
|
|||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
haproxy -vv
|
haproxy -vv
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||||
|
- name: Install problem matcher for VTest
|
||||||
|
# This allows one to more easily see which tests fail.
|
||||||
|
run: echo "::add-matcher::.github/vtest.json"
|
||||||
- name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
|
- name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
|
||||||
id: vtest
|
id: vtest
|
||||||
run: |
|
run: |
|
||||||
|
# This is required for macOS which does not actually allow to increase
|
||||||
|
# the '-n' soft limit to the hard limit, thus failing to run.
|
||||||
|
ulimit -n 65536
|
||||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||||
|
- name: Config syntax check memleak smoke testing
|
||||||
|
if: ${{ contains(matrix.name, 'ASAN') }}
|
||||||
|
run: |
|
||||||
|
./haproxy -f .github/h2spec.config -c
|
||||||
- name: Show VTest results
|
- name: Show VTest results
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||||
run: |
|
run: |
|
||||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
for folder in ${TMPDIR}/haregtests-*/vtc.*; do
|
||||||
printf "::group::"
|
printf "::group::"
|
||||||
cat $folder/INFO
|
cat $folder/INFO
|
||||||
cat $folder/LOG
|
cat $folder/LOG
|
||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
done
|
done
|
||||||
exit 1
|
|
||||||
- name: Run Unit tests
|
|
||||||
id: unittests
|
|
||||||
run: |
|
|
||||||
make unit-tests
|
|
||||||
- name: Show Unit-Tests results
|
|
||||||
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $result
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
- name: Show coredumps
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
failed=false
|
|
||||||
shopt -s nullglob
|
shopt -s nullglob
|
||||||
for file in /tmp/core.*; do
|
for asan in asan.log*; do
|
||||||
failed=true
|
echo "::group::$asan"
|
||||||
printf "::group::"
|
cat $asan
|
||||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
|
||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
done
|
done
|
||||||
if [ "$failed" = true ]; then
|
exit 1
|
||||||
exit 1;
|
|
||||||
fi
|
|
||||||
|
|||||||
4
.github/workflows/windows.yml
vendored
4
.github/workflows/windows.yml
vendored
@ -35,7 +35,7 @@ jobs:
|
|||||||
- USE_THREAD=1
|
- USE_THREAD=1
|
||||||
- USE_ZLIB=1
|
- USE_ZLIB=1
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
- uses: msys2/setup-msys2@v2
|
- uses: msys2/setup-msys2@v2
|
||||||
with:
|
with:
|
||||||
install: >-
|
install: >-
|
||||||
@ -58,7 +58,7 @@ jobs:
|
|||||||
ERR=1 \
|
ERR=1 \
|
||||||
TARGET=${{ matrix.TARGET }} \
|
TARGET=${{ matrix.TARGET }} \
|
||||||
CC=${{ matrix.CC }} \
|
CC=${{ matrix.CC }} \
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY" \
|
DEBUG="-DDEBUG_STRICT -DDEBUG_MEMORY_POOLS -DDEBUG_POOL_INTEGRITY" \
|
||||||
${{ join(matrix.FLAGS, ' ') }}
|
${{ join(matrix.FLAGS, ' ') }}
|
||||||
- name: Show HAProxy version
|
- name: Show HAProxy version
|
||||||
id: show-version
|
id: show-version
|
||||||
|
|||||||
80
.github/workflows/wolfssl.yml
vendored
80
.github/workflows/wolfssl.yml
vendored
@ -1,80 +0,0 @@
|
|||||||
name: WolfSSL
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 0 * * 4"
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
- name: Install apt dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
|
||||||
sudo apt-get --no-install-recommends -y install socat gdb jose
|
|
||||||
- name: Install WolfSSL
|
|
||||||
run: env WOLFSSL_VERSION=git-master WOLFSSL_DEBUG=1 scripts/build-ssl.sh
|
|
||||||
- name: Compile HAProxy
|
|
||||||
run: |
|
|
||||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
|
||||||
USE_OPENSSL_WOLFSSL=1 USE_QUIC=1 \
|
|
||||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
|
||||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
|
|
||||||
ARCH_FLAGS="-ggdb3 -fsanitize=address"
|
|
||||||
sudo make install
|
|
||||||
- name: Show HAProxy version
|
|
||||||
id: show-version
|
|
||||||
run: |
|
|
||||||
ldd $(which haproxy)
|
|
||||||
haproxy -vv
|
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
|
||||||
- uses: ./.github/actions/setup-vtest
|
|
||||||
- name: Run VTest for HAProxy
|
|
||||||
id: vtest
|
|
||||||
run: |
|
|
||||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
|
||||||
- name: Run Unit tests
|
|
||||||
id: unittests
|
|
||||||
run: |
|
|
||||||
make unit-tests
|
|
||||||
- name: Show VTest results
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $folder/INFO
|
|
||||||
cat $folder/LOG
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
- name: Show coredumps
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
failed=false
|
|
||||||
shopt -s nullglob
|
|
||||||
for file in /tmp/core.*; do
|
|
||||||
failed=true
|
|
||||||
printf "::group::"
|
|
||||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
if [ "$failed" = true ]; then
|
|
||||||
exit 1;
|
|
||||||
fi
|
|
||||||
- name: Show Unit-Tests results
|
|
||||||
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $result
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
|
|
||||||
1
.gitignore
vendored
1
.gitignore
vendored
@ -57,4 +57,3 @@ dev/udp/udp-perturb
|
|||||||
/src/dlmalloc.c
|
/src/dlmalloc.c
|
||||||
/tests/test_hashes
|
/tests/test_hashes
|
||||||
doc/lua-api/_build
|
doc/lua-api/_build
|
||||||
dev/term_events/term_events
|
|
||||||
|
|||||||
@ -8,7 +8,7 @@ branches:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
global:
|
global:
|
||||||
- FLAGS="USE_LUA=1 USE_OPENSSL=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_ZLIB=1"
|
- FLAGS="USE_LUA=1 USE_OPENSSL=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_SYSTEMD=1 USE_ZLIB=1"
|
||||||
- TMPDIR=/tmp
|
- TMPDIR=/tmp
|
||||||
|
|
||||||
addons:
|
addons:
|
||||||
|
|||||||
12
BRANCHES
12
BRANCHES
@ -171,17 +171,7 @@ feedback for developers:
|
|||||||
as the previous releases that had 6 months to stabilize. In terms of
|
as the previous releases that had 6 months to stabilize. In terms of
|
||||||
stability it really means that the point zero version already accumulated
|
stability it really means that the point zero version already accumulated
|
||||||
6 months of fixes and that it is much safer to use even just after it is
|
6 months of fixes and that it is much safer to use even just after it is
|
||||||
released. There is one exception though, features marked as "experimental"
|
released.
|
||||||
are not guaranteed to be maintained beyond the release of the next LTS
|
|
||||||
branch. The rationale here is that the experimental status is made to
|
|
||||||
expose an early preview of a feature, that is often incomplete, not always
|
|
||||||
in its definitive form regarding configuration, and for which developers
|
|
||||||
are seeking feedback from the users. It is even possible that changes will
|
|
||||||
be brought within the stable branch and it may happen that the feature
|
|
||||||
breaks. It is not imaginable to always be able to backport bug fixes too
|
|
||||||
far in this context since the code and configuration may change quite a
|
|
||||||
bit. Users who want to try experimental features are expected to upgrade
|
|
||||||
quickly to benefit from the improvements made to that feature.
|
|
||||||
|
|
||||||
- for developers, given that the odd versions are solely used by highly
|
- for developers, given that the odd versions are solely used by highly
|
||||||
skilled users, it's easier to get advanced traces and captures, and there
|
skilled users, it's easier to get advanced traces and captures, and there
|
||||||
|
|||||||
@ -1010,7 +1010,7 @@ you notice you're already practising some of them:
|
|||||||
- continue to send pull requests after having been explained why they are not
|
- continue to send pull requests after having been explained why they are not
|
||||||
welcome.
|
welcome.
|
||||||
|
|
||||||
- give wrong advice to people asking for help, or sending them patches to
|
- give wrong advices to people asking for help, or sending them patches to
|
||||||
try which make no sense, waste their time, and give them a bad impression
|
try which make no sense, waste their time, and give them a bad impression
|
||||||
of the people working on the project.
|
of the people working on the project.
|
||||||
|
|
||||||
|
|||||||
425
INSTALL
425
INSTALL
@ -9,7 +9,7 @@ used to follow updates then it is recommended that instead you use the packages
|
|||||||
provided by your software vendor or Linux distribution. Most of them are taking
|
provided by your software vendor or Linux distribution. Most of them are taking
|
||||||
this task seriously and are doing a good job at backporting important fixes.
|
this task seriously and are doing a good job at backporting important fixes.
|
||||||
|
|
||||||
If for any reason you would prefer a different version than the one packaged
|
If for any reason you'd prefer to use a different version than the one packaged
|
||||||
for your system, you want to be certain to have all the fixes or to get some
|
for your system, you want to be certain to have all the fixes or to get some
|
||||||
commercial support, other choices are available at http://www.haproxy.com/.
|
commercial support, other choices are available at http://www.haproxy.com/.
|
||||||
|
|
||||||
@ -34,26 +34,18 @@ are a few build examples :
|
|||||||
- recent Linux system with all options, make and install :
|
- recent Linux system with all options, make and install :
|
||||||
$ make clean
|
$ make clean
|
||||||
$ make -j $(nproc) TARGET=linux-glibc \
|
$ make -j $(nproc) TARGET=linux-glibc \
|
||||||
USE_OPENSSL=1 USE_QUIC=1 USE_QUIC_OPENSSL_COMPAT=1 \
|
USE_OPENSSL=1 USE_LUA=1 USE_PCRE=1 USE_SYSTEMD=1
|
||||||
USE_LUA=1 USE_PCRE2=1
|
|
||||||
$ sudo make install
|
$ sudo make install
|
||||||
|
|
||||||
- FreeBSD + OpenSSL, build with all options :
|
- FreeBSD and OpenBSD, build with all options :
|
||||||
$ gmake -j $(sysctl -n hw.ncpu) TARGET=freebsd \
|
$ gmake -j 4 TARGET=freebsd USE_OPENSSL=1 USE_LUA=1 USE_PCRE=1
|
||||||
USE_OPENSSL=1 USE_QUIC=1 USE_QUIC_OPENSSL_COMPAT=1 \
|
|
||||||
USE_LUA=1 USE_PCRE2=1
|
|
||||||
|
|
||||||
- OpenBSD + LibreSSL, build with all options :
|
|
||||||
$ gmake -j $(sysctl -n hw.ncpu) TARGET=openbsd \
|
|
||||||
USE_OPENSSL=1 USE_QUIC=1 USE_LUA=1 USE_PCRE2=1
|
|
||||||
|
|
||||||
- embedded Linux, build using a cross-compiler :
|
- embedded Linux, build using a cross-compiler :
|
||||||
$ make -j $(nproc) TARGET=linux-glibc USE_OPENSSL=1 USE_PCRE2=1 \
|
$ make -j $(nproc) TARGET=linux-glibc USE_OPENSSL=1 USE_PCRE=1 \
|
||||||
CC=/opt/cross/gcc730-arm/bin/gcc CFLAGS="-mthumb" ADDLIB=-latomic
|
CC=/opt/cross/gcc730-arm/bin/gcc ADDLIB=-latomic
|
||||||
|
|
||||||
- Build with static PCRE on Solaris / UltraSPARC :
|
- Build with static PCRE on Solaris / UltraSPARC :
|
||||||
$ make -j $(/usr/sbin/psrinfo -p) TARGET=solaris \
|
$ make TARGET=solaris CPU=ultrasparc USE_STATIC_PCRE=1
|
||||||
CPU_CFLAGS="-mcpu=v9" USE_STATIC_PCRE2=1
|
|
||||||
|
|
||||||
For more advanced build options or if a command above reports an error, please
|
For more advanced build options or if a command above reports an error, please
|
||||||
read the following sections.
|
read the following sections.
|
||||||
@ -81,10 +73,10 @@ can use a relatively similar one and adjust specific variables by hand.
|
|||||||
Most configuration variables are in fact booleans. Some options are detected and
|
Most configuration variables are in fact booleans. Some options are detected and
|
||||||
enabled by default if available on the target platform. This is the case for all
|
enabled by default if available on the target platform. This is the case for all
|
||||||
those named "USE_<feature>". These booleans are enabled by "USE_<feature>=1"
|
those named "USE_<feature>". These booleans are enabled by "USE_<feature>=1"
|
||||||
and are disabled by "USE_<feature>=" (with no value) or "USE_<feature>=0". An
|
and are disabled by "USE_<feature>=" (with no value). An exhaustive list of the
|
||||||
exhaustive list of the supported USE_* features is located at the top of the
|
supported USE_* features is located at the top of the main Makefile. The last
|
||||||
main Makefile. The last occurrence of such an option on the command line
|
occurrence of such an option on the command line overrides any previous one.
|
||||||
overrides any previous one. Example :
|
Example :
|
||||||
|
|
||||||
$ make TARGET=generic USE_THREAD=
|
$ make TARGET=generic USE_THREAD=
|
||||||
|
|
||||||
@ -111,22 +103,20 @@ HAProxy requires a working GCC or Clang toolchain and GNU make :
|
|||||||
may want to retry with "gmake" which is the name commonly used for GNU make
|
may want to retry with "gmake" which is the name commonly used for GNU make
|
||||||
on BSD systems.
|
on BSD systems.
|
||||||
|
|
||||||
- GCC >= 4.7 (up to 15 tested). Older versions are no longer supported due to
|
- GCC >= 4.2 (up to 13 tested). Older versions can be made to work with a
|
||||||
the latest mt_list update which only uses c11-like atomics. Newer versions
|
few minor adaptations if really needed. Newer versions may sometimes break
|
||||||
may sometimes break due to compiler regressions or behaviour changes. The
|
due to compiler regressions or behaviour changes. The version shipped with
|
||||||
version shipped with your operating system is very likely to work with no
|
your operating system is very likely to work with no trouble. Clang >= 3.0
|
||||||
trouble. Clang >= 3.0 is also known to work as an alternative solution, and
|
is also known to work as an alternative solution. Recent versions may emit
|
||||||
versions up to 19 were successfully tested. Recent versions may emit a bit
|
a bit more warnings that are worth reporting as they may reveal real bugs.
|
||||||
more warnings that are worth reporting as they may reveal real bugs. TCC
|
TCC (https://repo.or.cz/tinycc.git) is also usable for developers but will
|
||||||
(https://repo.or.cz/tinycc.git) is also usable for developers but will not
|
not support threading and was found at least once to produce bad code in
|
||||||
support threading and was found at least once to produce bad code in some
|
some rare corner cases (since fixed). But it builds extremely quickly
|
||||||
rare corner cases (since fixed). But it builds extremely quickly (typically
|
(typically half a second for the whole project) and is very convenient to
|
||||||
half a second for the whole project) and is very convenient to run quick
|
run quick tests during API changes or code refactoring.
|
||||||
tests during API changes or code refactoring.
|
|
||||||
|
|
||||||
- GNU ld (binutils package), with no particular version. Other linkers might
|
- GNU ld (binutils package), with no particular version. Other linkers might
|
||||||
work but were not tested. The default one from your operating system will
|
work but were not tested.
|
||||||
normally work.
|
|
||||||
|
|
||||||
On debian or Ubuntu systems and their derivatives, you may get all these tools
|
On debian or Ubuntu systems and their derivatives, you may get all these tools
|
||||||
at once by issuing the two following commands :
|
at once by issuing the two following commands :
|
||||||
@ -191,9 +181,9 @@ regex engine which could be slow or even crash on certain patterns.
|
|||||||
|
|
||||||
If you plan on importing a particularly heavy configuration involving a lot of
|
If you plan on importing a particularly heavy configuration involving a lot of
|
||||||
regex, you may benefit from using some alternative regex implementations such as
|
regex, you may benefit from using some alternative regex implementations such as
|
||||||
PCRE. HAProxy natively supports PCRE and PCRE2 (recommended), both in standard
|
PCRE. HAProxy natively supports PCRE and PCRE2, both in standard and JIT
|
||||||
and JIT flavors (Just In Time). The following options are available depending on
|
flavors (Just In Time). The following options are available depending on the
|
||||||
the library version provided on your system :
|
library version provided on your system :
|
||||||
|
|
||||||
- "USE_PCRE=1" : enable PCRE version 1, dynamic linking
|
- "USE_PCRE=1" : enable PCRE version 1, dynamic linking
|
||||||
- "USE_STATIC_PCRE=1" : enable PCRE version 1, static linking
|
- "USE_STATIC_PCRE=1" : enable PCRE version 1, static linking
|
||||||
@ -237,7 +227,7 @@ to forcefully enable it using "USE_LIBCRYPT=1".
|
|||||||
-----------------
|
-----------------
|
||||||
For SSL/TLS, it is necessary to use a cryptography library. HAProxy currently
|
For SSL/TLS, it is necessary to use a cryptography library. HAProxy currently
|
||||||
supports the OpenSSL library, and is known to build and work with branches
|
supports the OpenSSL library, and is known to build and work with branches
|
||||||
1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, and 3.0 to 3.6. It is recommended to use
|
1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, 3.0, 3.1 and 3.2. It is recommended to use
|
||||||
at least OpenSSL 1.1.1 to have support for all SSL keywords and configuration
|
at least OpenSSL 1.1.1 to have support for all SSL keywords and configuration
|
||||||
in HAProxy. OpenSSL follows a long-term support cycle similar to HAProxy's,
|
in HAProxy. OpenSSL follows a long-term support cycle similar to HAProxy's,
|
||||||
and each of the branches above receives its own fixes, without forcing you to
|
and each of the branches above receives its own fixes, without forcing you to
|
||||||
@ -254,20 +244,16 @@ https://github.com/openssl/openssl/issues/17627). If a migration to 3.x is
|
|||||||
mandated by support reasons, at least 3.1 recovers a small fraction of this
|
mandated by support reasons, at least 3.1 recovers a small fraction of this
|
||||||
important loss.
|
important loss.
|
||||||
|
|
||||||
Three OpenSSL derivatives called LibreSSL, QUICTLS, and AWS-LC are
|
Four OpenSSL derivatives called LibreSSL, BoringSSL, QUICTLS, and AWS-LC are
|
||||||
reported to work as well. While there are some efforts from the community to
|
reported to work as well. While there are some efforts from the community to
|
||||||
ensure they work well, OpenSSL remains the primary target and this means that
|
ensure they work well, OpenSSL remains the primary target and this means that
|
||||||
in case of conflicting choices, OpenSSL support will be favored over other
|
in case of conflicting choices, OpenSSL support will be favored over other
|
||||||
options. Note that QUIC is not fully supported when haproxy is built with
|
options. Note that QUIC is not fully supported when haproxy is built with
|
||||||
OpenSSL < 3.5.2 version. In this case, QUICTLS or AWS-LC are the preferred
|
OpenSSL. In this case, QUICTLS is the preferred alternative. As of writing
|
||||||
alternatives. As of writing this, the QuicTLS project follows OpenSSL very
|
this, the QuicTLS project follows OpenSSL very closely and provides update
|
||||||
closely and provides update simultaneously, but being a volunteer-driven
|
simultaneously, but being a volunteer-driven project, its long-term future does
|
||||||
project, its long-term future does not look certain enough to convince
|
not look certain enough to convince operating systems to package it, so it
|
||||||
operating systems to package it, so it needs to be build locally. Recent
|
needs to be build locally. See the section about QUIC in this document.
|
||||||
versions of AWS-LC (>= 1.22 and the FIPS branches) are pretty complete and
|
|
||||||
generally more performant than other OpenSSL derivatives, but may behave
|
|
||||||
slightly differently, particularly when dealing with outdated setups. See
|
|
||||||
the section about QUIC in this document.
|
|
||||||
|
|
||||||
A fifth option is wolfSSL (https://github.com/wolfSSL/wolfssl). It is the only
|
A fifth option is wolfSSL (https://github.com/wolfSSL/wolfssl). It is the only
|
||||||
supported alternative stack not based on OpenSSL, yet which implements almost
|
supported alternative stack not based on OpenSSL, yet which implements almost
|
||||||
@ -295,11 +281,11 @@ SSL library files using SSL_LIB. Example :
|
|||||||
USE_OPENSSL=1 SSL_INC=/opt/ssl-1.1.1/include SSL_LIB=/opt/ssl-1.1.1/lib
|
USE_OPENSSL=1 SSL_INC=/opt/ssl-1.1.1/include SSL_LIB=/opt/ssl-1.1.1/lib
|
||||||
|
|
||||||
To use HAProxy with WolfSSL, WolfSSL must be built with haproxy support, at
|
To use HAProxy with WolfSSL, WolfSSL must be built with haproxy support, at
|
||||||
least WolfSSL 5.6.6 is needed, but a development version might be needed for
|
least WolfSSL 5.6.4 is needed, but a development version might be needed for
|
||||||
some of the features:
|
some of the features:
|
||||||
|
|
||||||
$ cd ~/build/wolfssl
|
$ cd ~/build/wolfssl
|
||||||
$ ./configure --enable-haproxy --enable-quic --prefix=/opt/wolfssl-5.6.6/
|
$ ./configure --enable-haproxy --enable-quic --prefix=/opt/wolfssl-5.6.4/
|
||||||
$ make -j $(nproc)
|
$ make -j $(nproc)
|
||||||
$ make install
|
$ make install
|
||||||
|
|
||||||
@ -307,26 +293,14 @@ Please also note that wolfSSL supports many platform-specific features that may
|
|||||||
affect performance, and that for production uses it might be a good idea to
|
affect performance, and that for production uses it might be a good idea to
|
||||||
check them using "./configure --help". Please refer to the lib's documentation.
|
check them using "./configure --help". Please refer to the lib's documentation.
|
||||||
|
|
||||||
When running wolfSSL in chroot, either mount /dev/[u]random devices into the
|
|
||||||
chroot:
|
|
||||||
|
|
||||||
$ mkdir -p /path/to/chrootdir/dev/
|
|
||||||
$ mknod -m 444 /path/to/chrootdir/dev/random c 1 8
|
|
||||||
$ mknod -m 444 /path/to/chrootdir/dev/urandom c 1 9
|
|
||||||
|
|
||||||
Or, if your OS supports it, enable the getrandom() syscall by appending the
|
|
||||||
following argument to the wolfSSL configure command:
|
|
||||||
|
|
||||||
EXTRA_CFLAGS=-DWOLFSSL_GETRANDOM=1
|
|
||||||
|
|
||||||
Building HAProxy with wolfSSL requires to specify the API variant on the "make"
|
Building HAProxy with wolfSSL requires to specify the API variant on the "make"
|
||||||
command line, for example:
|
command line, for example:
|
||||||
|
|
||||||
$ cd ~/build/haproxy
|
$ cd ~/build/haproxy
|
||||||
$ make -j $(nproc) TARGET=generic USE_OPENSSL_WOLFSSL=1 USE_QUIC=1 \
|
$ make -j $(nproc) TARGET=generic USE_OPENSSL_WOLFSSL=1 USE_QUIC=1 \
|
||||||
SSL_INC=/opt/wolfssl-5.6.6/include SSL_LIB=/opt/wolfssl-5.6.6/lib
|
SSL_INC=/opt/wolfssl-5.6.4/include SSL_LIB=/opt/wolfssl-5.6.4/lib
|
||||||
|
|
||||||
To use HAProxy with AWS-LC you must have version v1.22.0 or newer of AWS-LC
|
To use HAProxy with AWS-LC you must have version v1.13.0 or newer of AWS-LC
|
||||||
built and installed locally.
|
built and installed locally.
|
||||||
$ cd ~/build/aws-lc
|
$ cd ~/build/aws-lc
|
||||||
$ cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/opt/aws-lc
|
$ cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/opt/aws-lc
|
||||||
@ -389,15 +363,10 @@ systems, by passing "USE_SLZ=" to the "make" command.
|
|||||||
|
|
||||||
Please note that SLZ will benefit from some CPU-specific instructions like the
|
Please note that SLZ will benefit from some CPU-specific instructions like the
|
||||||
availability of the CRC32 extension on some ARM processors. Thus it can further
|
availability of the CRC32 extension on some ARM processors. Thus it can further
|
||||||
improve its performance to build with:
|
improve its performance to build with "CPU=native" on the target system, or
|
||||||
|
"CPU=armv81" (modern systems such as Graviton2 or A55/A75 and beyond),
|
||||||
- "CPU_CFLAGS=-march=native" on the target system or
|
"CPU=a72" (e.g. for RPi4, or AWS Graviton), "CPU=a53" (e.g. for RPi3), or
|
||||||
- "CPU_CFLAGS=-march=armv81" on modern systems such as Graviton2 or A55/A75
|
"CPU=armv8-auto" (automatic detection with minor runtime penalty).
|
||||||
and beyond)
|
|
||||||
- "CPU_CFLAGS=-march=a72" (e.g. for RPi4, or AWS Graviton)
|
|
||||||
- "CPU_CFLAGS=-march=a53" (e.g. for RPi3)
|
|
||||||
- "CPU_CFLAGS=-march=armv8-auto" automatic detection with minor runtime
|
|
||||||
penalty)
|
|
||||||
|
|
||||||
A second option involves the widely known zlib library, which is very likely
|
A second option involves the widely known zlib library, which is very likely
|
||||||
installed on your system. In order to use zlib, simply pass "USE_ZLIB=1" to the
|
installed on your system. In order to use zlib, simply pass "USE_ZLIB=1" to the
|
||||||
@ -471,6 +440,12 @@ are the extra libraries that may be referenced at build time :
|
|||||||
on Linux. It is automatically detected and may be disabled
|
on Linux. It is automatically detected and may be disabled
|
||||||
using "USE_DL=", though it should never harm.
|
using "USE_DL=", though it should never harm.
|
||||||
|
|
||||||
|
- USE_SYSTEMD=1 enables support for the sdnotify features of systemd,
|
||||||
|
allowing better integration with systemd on Linux systems
|
||||||
|
which come with it. It is never enabled by default so there
|
||||||
|
is no need to disable it.
|
||||||
|
|
||||||
|
|
||||||
4.10) Common errors
|
4.10) Common errors
|
||||||
-------------------
|
-------------------
|
||||||
Some build errors may happen depending on the options combinations or the
|
Some build errors may happen depending on the options combinations or the
|
||||||
@ -494,8 +469,8 @@ target. Common issues may include:
|
|||||||
other supported compatible library.
|
other supported compatible library.
|
||||||
|
|
||||||
- many "dereferencing pointer 'sa.985' does break strict-aliasing rules"
|
- many "dereferencing pointer 'sa.985' does break strict-aliasing rules"
|
||||||
=> these warnings happen on old compilers (typically gcc before 7.x),
|
=> these warnings happen on old compilers (typically gcc-4.4), and may
|
||||||
and may safely be ignored; newer ones are better on these.
|
safely be ignored; newer ones are better on these.
|
||||||
|
|
||||||
|
|
||||||
4.11) QUIC
|
4.11) QUIC
|
||||||
@ -504,11 +479,10 @@ QUIC is the new transport layer protocol and is required for HTTP/3. This
|
|||||||
protocol stack is currently supported as an experimental feature in haproxy on
|
protocol stack is currently supported as an experimental feature in haproxy on
|
||||||
the frontend side. In order to enable it, use "USE_QUIC=1 USE_OPENSSL=1".
|
the frontend side. In order to enable it, use "USE_QUIC=1 USE_OPENSSL=1".
|
||||||
|
|
||||||
Note that QUIC is not always fully supported by the OpenSSL library depending on
|
Note that QUIC is not fully supported by the OpenSSL library. Indeed QUIC 0-RTT
|
||||||
its version. Indeed QUIC 0-RTT cannot be supported by OpenSSL for versions before
|
cannot be supported by OpenSSL contrary to others libraries with full QUIC
|
||||||
3.5 contrary to others libraries with full QUIC support. The preferred option is
|
support. The preferred option is to use QUICTLS. This is a fork of OpenSSL with
|
||||||
to use QUICTLS. This is a fork of OpenSSL with a QUIC-compatible API. Its
|
a QUIC-compatible API. Its repository is available at this location:
|
||||||
repository is available at this location:
|
|
||||||
|
|
||||||
https://github.com/quictls/openssl
|
https://github.com/quictls/openssl
|
||||||
|
|
||||||
@ -536,18 +510,14 @@ way assuming that wolfSSL was installed in /opt/wolfssl-5.6.0 as shown in 4.5:
|
|||||||
SSL_INC=/opt/wolfssl-5.6.0/include SSL_LIB=/opt/wolfssl-5.6.0/lib
|
SSL_INC=/opt/wolfssl-5.6.0/include SSL_LIB=/opt/wolfssl-5.6.0/lib
|
||||||
LDFLAGS="-Wl,-rpath,/opt/wolfssl-5.6.0/lib"
|
LDFLAGS="-Wl,-rpath,/opt/wolfssl-5.6.0/lib"
|
||||||
|
|
||||||
As last resort, haproxy may be compiled against OpenSSL as follows from 3.5
|
As last resort, haproxy may be compiled against OpenSSL as follows:
|
||||||
version with 0-RTT support:
|
|
||||||
|
|
||||||
$ make TARGET=generic USE_OPENSSL=1 USE_QUIC=1
|
|
||||||
|
|
||||||
or as follows for all OpenSSL versions but without O-RTT support:
|
|
||||||
|
|
||||||
$ make TARGET=generic USE_OPENSSL=1 USE_QUIC=1 USE_QUIC_OPENSSL_COMPAT=1
|
$ make TARGET=generic USE_OPENSSL=1 USE_QUIC=1 USE_QUIC_OPENSSL_COMPAT=1
|
||||||
|
|
||||||
In addition to this requirements, the QUIC listener bindings must be explicitly
|
Note that QUIC 0-RTT is not supported by haproxy QUIC stack when built against
|
||||||
enabled with a specific QUIC tuning parameter. (see "limited-quic" global
|
OpenSSL. In addition to this compilation requirements, the QUIC listener
|
||||||
parameter of haproxy Configuration Manual).
|
bindings must be explicitly enabled with a specific QUIC tuning parameter.
|
||||||
|
(see "limited-quic" global parameter of haproxy Configuration Manual).
|
||||||
|
|
||||||
|
|
||||||
5) How to build HAProxy
|
5) How to build HAProxy
|
||||||
@ -555,17 +525,13 @@ parameter of haproxy Configuration Manual).
|
|||||||
|
|
||||||
This section assumes that you have already read section 2 (basic principles)
|
This section assumes that you have already read section 2 (basic principles)
|
||||||
and section 3 (build environment). It often refers to section 4 (dependencies).
|
and section 3 (build environment). It often refers to section 4 (dependencies).
|
||||||
It goes into more details with the main options.
|
|
||||||
|
|
||||||
|
|
||||||
5.1) Configuring the TARGET
|
|
||||||
---------------------------
|
|
||||||
To build haproxy, you have to choose your target OS amongst the following ones
|
To build haproxy, you have to choose your target OS amongst the following ones
|
||||||
and assign it to the TARGET variable :
|
and assign it to the TARGET variable :
|
||||||
|
|
||||||
- linux-glibc for Linux kernel 4.17 and above
|
- linux-glibc for Linux kernel 2.6.28 and above
|
||||||
- linux-glibc-legacy for Linux kernel 2.6.28 and above without new features
|
- linux-glibc-legacy for Linux kernel 2.6.28 and above without new features
|
||||||
- linux-musl for Linux kernel 4.17 and above with musl libc
|
- linux-musl for Linux kernel 2.6.28 and above with musl libc
|
||||||
- solaris for Solaris 10 and above
|
- solaris for Solaris 10 and above
|
||||||
- freebsd for FreeBSD 10 and above
|
- freebsd for FreeBSD 10 and above
|
||||||
- dragonfly for DragonFlyBSD 4.3 and above
|
- dragonfly for DragonFlyBSD 4.3 and above
|
||||||
@ -580,64 +546,29 @@ and assign it to the TARGET variable :
|
|||||||
- generic for any other OS or version.
|
- generic for any other OS or version.
|
||||||
- custom to manually adjust every setting
|
- custom to manually adjust every setting
|
||||||
|
|
||||||
Example:
|
You may also choose your CPU to benefit from some optimizations. This is
|
||||||
$ make -j $(nproc) TARGET=linux-glibc
|
particularly important on UltraSparc machines. For this, you can assign
|
||||||
|
one of the following choices to the CPU variable :
|
||||||
|
|
||||||
AIX 5.3 is known to work with the generic target. However, for the binary to
|
- i686 for intel PentiumPro, Pentium 2 and above, AMD Athlon (32 bits)
|
||||||
also run on 5.2 or earlier, you need to build with DEFINE="-D_MSGQSUPPORT",
|
- i586 for intel Pentium, AMD K6, VIA C3.
|
||||||
otherwise __fd_select() will be used while not being present in the libc, but
|
- ultrasparc : Sun UltraSparc I/II/III/IV processor
|
||||||
this is easily addressed using the "aix52" target. If you get build errors
|
- power8 : IBM POWER8 processor
|
||||||
because of strange symbols or section mismatches, simply remove -g from
|
- power9 : IBM POWER9 processor
|
||||||
ARCH_FLAGS.
|
- armv81 : modern ARM cores (Cortex A55/A75/A76/A78/X1, Neoverse, Graviton2)
|
||||||
|
- a72 : ARM Cortex-A72 or A73 (e.g. RPi4, Odroid N2, AWS Graviton)
|
||||||
|
- a53 : ARM Cortex-A53 or any of its successors in 64-bit mode (e.g. RPi3)
|
||||||
|
- armv8-auto : support both older and newer armv8 cores with a minor penalty,
|
||||||
|
thanks to gcc 10's outline atomics (default with gcc 10.2).
|
||||||
|
- native : use the build machine's specific processor optimizations. Use with
|
||||||
|
extreme care, and never in virtualized environments (known to break).
|
||||||
|
- generic : any other processor or no CPU-specific optimization. (default)
|
||||||
|
|
||||||
Building on AIX 7.2 works fine using the "aix72-gcc" TARGET. It adds two
|
Alternatively, you may just set the CPU_CFLAGS value to the optimal GCC options
|
||||||
special CFLAGS to prevent the loading of AIX's xmem.h and var.h. This is done
|
for your platform. A second variable named SMALL_OPTS also supports passing a
|
||||||
by defining the corresponding include-guards _H_XMEM and _H_VAR. Without
|
number of defines and compiler options usually for small systems. For better
|
||||||
excluding those header-files the build fails because of redefinition errors.
|
clarity it's recommended to pass the options which result in a smaller binary
|
||||||
Furthermore, the atomic library is added to the LDFLAGS to allow for
|
(like memory limits or -Os) into this variable.
|
||||||
multithreading via USE_THREAD.
|
|
||||||
|
|
||||||
You can easily define your own target with the GNU Makefile. Unknown targets
|
|
||||||
are processed with no default option except USE_POLL=default. So you can very
|
|
||||||
well use that property to define your own set of options. USE_POLL and USE_SLZ
|
|
||||||
can even be disabled by setting them to an empty string or a zero. For
|
|
||||||
example :
|
|
||||||
|
|
||||||
$ gmake TARGET=tiny USE_POLL="" USE_SLZ=0 TARGET_CFLAGS=-fomit-frame-pointer
|
|
||||||
|
|
||||||
|
|
||||||
5.2) Adding extra CFLAGS for compiling
|
|
||||||
--------------------------------------
|
|
||||||
A generic CFLAGS variable may be set to append any option to pass to the C
|
|
||||||
compiler. These flags are passed last so the variable may be used to override
|
|
||||||
other options such as warnings, optimization levels, include paths etc.
|
|
||||||
|
|
||||||
A default optimization level of -O2 is set by variable OPT_CFLAGS which may be
|
|
||||||
overridden if desired. It's used early in the list of CFLAGS so that any other
|
|
||||||
set of CFLAGS providing a different value may easily override it.
|
|
||||||
|
|
||||||
Some platforms may benefit from some CPU-specific options that will enable
|
|
||||||
certain instruction sets, word size or endianness for example. One of them is
|
|
||||||
the common "-march=native" that indicates to modern compilers that they need to
|
|
||||||
optimize for the machine the compiler is running on. Such options may be either
|
|
||||||
passed in the CPU_CFLAGS or in the CFLAGS variable, either will work though
|
|
||||||
one may be more convenient for certain methods of packaging and the other one
|
|
||||||
for other methods. Among the many possible options, the following ones are
|
|
||||||
known for having successfully been used:
|
|
||||||
|
|
||||||
- "-march=native" for a native build
|
|
||||||
- "-march=armv8-a+crc" for older ARM Cortex A53/A72/A73 (such as RPi 3B/4B)
|
|
||||||
- "-march=armv8.1-a" for modern ARM Cortex A55/A76, Graviton2+, RPi 5
|
|
||||||
- "-march=armv8-a+crc -moutline-atomics" to support older ARM with better
|
|
||||||
support of modern cores with gcc-10+
|
|
||||||
- "-mavx", "-mavx2", "-mavx512", to enable certain x86 SIMD instruction sets
|
|
||||||
- "-march=i586" to support almost all 32-bit x86 systems
|
|
||||||
- "-march=i686" to support only the latest 32-bit x86 systems
|
|
||||||
- "-march=i386" to support even the oldest 32-bit x86 systems
|
|
||||||
- "-mlittle-endian -march=armv5te" for some little-endian ARMv5 systems
|
|
||||||
- "-mcpu=v9 -mtune=ultrasparc -m64" for a 64-bit Solaris SPARC build
|
|
||||||
- "-march=1004kc -mtune=1004kc" for some multi-core 32-bit MIPS 1004Kc
|
|
||||||
- "-march=24kc -mtune=24kc" for some single-core 32-bit MIPS 24Kc
|
|
||||||
|
|
||||||
If you are building for a different system than the one you're building on,
|
If you are building for a different system than the one you're building on,
|
||||||
this is called "cross-compiling". HAProxy supports cross-compilation pretty
|
this is called "cross-compiling". HAProxy supports cross-compilation pretty
|
||||||
@ -655,49 +586,20 @@ flags are passed to the compiler nor what compiler is involved. Simply append
|
|||||||
details again. It is recommended to use this option when cross-compiling to
|
details again. It is recommended to use this option when cross-compiling to
|
||||||
verify that the paths are correct and that /usr/include is never involved.
|
verify that the paths are correct and that /usr/include is never involved.
|
||||||
|
|
||||||
If you need to pass some defines to the preprocessor or compiler, you may pass
|
You may want to build specific target binaries which do not match your native
|
||||||
them all in the DEFINE variable. Example:
|
compiler's target. This is particularly true on 64-bit systems when you want
|
||||||
|
to build a 32-bit binary. Use the ARCH variable for this purpose. Right now
|
||||||
$ make TARGET=generic DEFINE="-DDEBUG_DONT_SHARE_POOLS"
|
it only knows about a few x86 variants (i386,i486,i586,i686,x86_64), two
|
||||||
|
generic ones (32,64) and sets -m32/-m64 as well as -march=<arch> accordingly.
|
||||||
The ADDINC variable may be used to add some extra include paths; this is
|
This variable is only used to set ARCH_FLAGS to preset values, so if you know
|
||||||
sometimes needed when cross-compiling. Similarly the ADDLIB variable may be
|
the arch-specific flags that your system needs, you may prefer to set
|
||||||
used to specify extra paths to library files. Example :
|
ARCH_FLAGS instead. Note that these flags are passed both to the compiler and
|
||||||
|
to the linker. For example, in order to build a 32-bit binary on an x86_64
|
||||||
$ make TARGET=generic ADDINC=-I/opt/cross/include ADDLIB=-L/opt/cross/lib64
|
Linux system with SSL support without support for compression but when OpenSSL
|
||||||
|
|
||||||
|
|
||||||
5.3) Adding extra LDFLAGS for linking
|
|
||||||
-------------------------------------
|
|
||||||
If a particular target requires specific link-time flags, these can be passed
|
|
||||||
via the LDFLAGS variable. This variable is passed to the linker immediately
|
|
||||||
after ARCH_FLAGS. One of the common use cases is to add some run time search
|
|
||||||
paths for a dynamic library that's not part of the default system search path:
|
|
||||||
|
|
||||||
$ make -j $(nproc) TARGET=generic USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
|
|
||||||
SSL_INC=/opt/aws-lc/include SSL_LIB=/opt/aws-lc/lib \
|
|
||||||
LDFLAGS="-Wl,-rpath,/opt/aws-lc/lib"
|
|
||||||
|
|
||||||
Some options require to be consistent between the compilation stage and the
|
|
||||||
linking stage. This is the case for options which enable debugging (e.g. "-g"),
|
|
||||||
profiling ("-pg"), link-time optimization ("-flto"), endianness ("-EB", "-EL"),
|
|
||||||
bit width ("-m32", "-m64"), or code analyzers ("-fsanitize=address"). These
|
|
||||||
options can be passed via the ARCH_FLAGS variable, which will be used at both
|
|
||||||
stages during the build process, thus avoiding the risk of inconsistencies. By
|
|
||||||
default, ARCH_FLAGS only contains "-g" to enable the generation of debug
|
|
||||||
symbols. For example, in order to build a 32-bit binary on an x86_64 Linux
|
|
||||||
system with SSL support without support for compression but when OpenSSL
|
|
||||||
requires ZLIB anyway :
|
requires ZLIB anyway :
|
||||||
|
|
||||||
$ make TARGET=linux-glibc ARCH_FLAGS="-m32 -g" USE_OPENSSL=1 ADDLIB=-lz
|
$ make TARGET=linux-glibc ARCH=i386 USE_OPENSSL=1 ADDLIB=-lz
|
||||||
|
|
||||||
and building with the address sanitizer (ASAN) simply requires:
|
|
||||||
|
|
||||||
$ make TARGET=linux-glibc ARCH_FLAGS="-fsanitize=address -g"
|
|
||||||
|
|
||||||
|
|
||||||
5.4) Other common OS-specific options
|
|
||||||
-------------------------------------
|
|
||||||
Recent systems can resolve IPv6 host names using getaddrinfo(). This primitive
|
Recent systems can resolve IPv6 host names using getaddrinfo(). This primitive
|
||||||
is not present in all libcs and does not work in all of them either. Support in
|
is not present in all libcs and does not work in all of them either. Support in
|
||||||
glibc was broken before 2.3. Some embedded libs may not properly work either,
|
glibc was broken before 2.3. Some embedded libs may not properly work either,
|
||||||
@ -724,63 +626,16 @@ section 4 about dependencies for more information on how to build with OpenSSL.
|
|||||||
HAProxy can compress HTTP responses to save bandwidth. Please see section 4
|
HAProxy can compress HTTP responses to save bandwidth. Please see section 4
|
||||||
about dependencies to see the available libraries and associated options.
|
about dependencies to see the available libraries and associated options.
|
||||||
|
|
||||||
If you need to pass other defines, includes, libraries, etc... then please
|
By default, the DEBUG_CFLAGS variable is set to '-g' to enable debug symbols.
|
||||||
check the Makefile to see which ones will be available in your case, and
|
It is not wise to disable it on uncommon systems, because it's often the only
|
||||||
use/override the USE_* variables from the Makefile.
|
way to get a usable core when you need one. Otherwise, you can set DEBUG to
|
||||||
|
'-s' to strip the binary.
|
||||||
|
|
||||||
|
If the ERR variable is set to any non-empty value, then -Werror will be added
|
||||||
|
to the compiler so that any build warning will trigger an error. This is the
|
||||||
|
recommended way to build when developing, and it is expected that contributed
|
||||||
|
patches were tested with ERR=1.
|
||||||
|
|
||||||
5.5) Adjusting the build error / warning behavior
|
|
||||||
-------------------------------------------------
|
|
||||||
If the ERR variable is set to any non-empty value other than "0", then -Werror
|
|
||||||
will be added to the compiler so that any build warning will trigger an error.
|
|
||||||
This is the recommended way to build when developing, and it is expected that
|
|
||||||
contributed patches were tested with ERR=1. Similarly, for developers, another
|
|
||||||
variable, FAILFAST enables -Wfatal-errors when set to non-empty except 0, and
|
|
||||||
makes the compiler stop at the first error instead of scrolling pages. It's
|
|
||||||
essentially a matter of taste.
|
|
||||||
|
|
||||||
Packagers who want to achieve the cleanest warning-free builds may be
|
|
||||||
interested in knowing that all enabled warnings are normally placed into
|
|
||||||
the WARN_CFLAGS variable. The variable contains a list of pre-established
|
|
||||||
warnings and a list of some that are dynamically detected on the compiler.
|
|
||||||
If the build environment or toolchain doesn't even support some of the basic
|
|
||||||
ones, it is then possible to just redefine them by passing the main ones in
|
|
||||||
WARN_CFLAGS (e.g. at the very least -W -Wall). Similarly, it may sometimes
|
|
||||||
be desirable not to disable certain warnings when porting to new platforms
|
|
||||||
or during code audits, or simply because the toolchain doesn't support some
|
|
||||||
of the most basic -Wno options. In this case, the list of automatic -Wno
|
|
||||||
variables is specified by variable NOWARN_CFLAGS, which is passed after
|
|
||||||
WARN_CFLAGS (i.e. it can undo some of the WARN_CFLAGS settings). Be careful
|
|
||||||
with it, as clearing this list can yield many warnings depending on the
|
|
||||||
compiler and options.
|
|
||||||
|
|
||||||
The DEP variable is automatically set to the list of include files and also
|
|
||||||
designates a file that contains the last build options used. It is used during
|
|
||||||
the build process to compute dependencies and decide whether or not to rebuild
|
|
||||||
everything (we do rebuild everything when .h files are touched or when build
|
|
||||||
options change). Sometimes when performing fast build iterations on inline
|
|
||||||
functions it may be desirable to avoid a full rebuild. Forcing this variable
|
|
||||||
to be empty will be sufficient to achieve this. This variable must never be
|
|
||||||
forced to produce final binaries, and must not be used during bisect sessions,
|
|
||||||
as it will often lead to the wrong commit.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
# silence strict-aliasing warnings with old gcc-5.5:
|
|
||||||
$ make -j$(nproc) TARGET=linux-glibc CC=gcc-55 CFLAGS=-fno-strict-aliasing
|
|
||||||
|
|
||||||
# disable all warning options:
|
|
||||||
$ make -j$(nproc) TARGET=linux-glibc CC=mycc WARN_CFLAGS= NOWARN_CFLAGS=
|
|
||||||
|
|
||||||
# enable -Werror and -Wfatal-errors to immediately stop on error
|
|
||||||
$ make -j$(nproc) TARGET=linux-glibc ERR=1 FAILFAST=1
|
|
||||||
|
|
||||||
# try to restart the build where it was after hacking an include file, to
|
|
||||||
# check if that was sufficient or not:
|
|
||||||
$ make -j$(nproc) TARGET=linux-glibc ERR=1 DEP=
|
|
||||||
|
|
||||||
|
|
||||||
5.6) Enabling a DEBUG build
|
|
||||||
---------------------------
|
|
||||||
The DEBUG variable is used to extend the CFLAGS and is preset to a list of
|
The DEBUG variable is used to extend the CFLAGS and is preset to a list of
|
||||||
build-time options that are known for providing significant reliability
|
build-time options that are known for providing significant reliability
|
||||||
improvements and a barely perceptible performance cost. Unless instructed to do
|
improvements and a barely perceptible performance cost. Unless instructed to do
|
||||||
@ -791,8 +646,8 @@ these options should not be changed. Among the usable ones are:
|
|||||||
conditions are not met, and whose violation will result in a misbehaving
|
conditions are not met, and whose violation will result in a misbehaving
|
||||||
process due to memory corruption or other significant trouble, possibly
|
process due to memory corruption or other significant trouble, possibly
|
||||||
caused by an attempt to exploit a bug in the program or a library it relies
|
caused by an attempt to exploit a bug in the program or a library it relies
|
||||||
on. The option knows 3 values: 0 (disable all such assertions, not
|
on. The option knows 3 values: 0 (disable all such assertions, the default
|
||||||
recommended), 1 (enable all inexpensive assertions, the default), and
|
when the option is not set), 1 (enable all inexpensive assertions), and
|
||||||
2 (enable all assertions even in fast paths). Setting the option with no
|
2 (enable all assertions even in fast paths). Setting the option with no
|
||||||
value corresponds to 1, which is the recommended value for production.
|
value corresponds to 1, which is the recommended value for production.
|
||||||
|
|
||||||
@ -824,7 +679,7 @@ these options should not be changed. Among the usable ones are:
|
|||||||
overflows, which may have security implications. The cost is extremely low
|
overflows, which may have security implications. The cost is extremely low
|
||||||
(less than 1% increase in memory footprint). This is equivalent to adding
|
(less than 1% increase in memory footprint). This is equivalent to adding
|
||||||
"-dMtag" on the command line. This option is enabled in the default build
|
"-dMtag" on the command line. This option is enabled in the default build
|
||||||
options and may be disabled with -DDEBUG_MEMORY_POOLS=0.
|
options.
|
||||||
|
|
||||||
- -DDEBUG_DONT_SHARE_POOLS: this will keep separate pools for same-sized
|
- -DDEBUG_DONT_SHARE_POOLS: this will keep separate pools for same-sized
|
||||||
objects of different types. Using this increases the memory usage a little
|
objects of different types. Using this increases the memory usage a little
|
||||||
@ -844,34 +699,58 @@ these options should not be changed. Among the usable ones are:
|
|||||||
are encouraged to use it, in combination with -DDEBUG_DONT_SHARE_POOLS and
|
are encouraged to use it, in combination with -DDEBUG_DONT_SHARE_POOLS and
|
||||||
-DDEBUG_MEMORY_POOLS, as this could catch dangerous regressions.
|
-DDEBUG_MEMORY_POOLS, as this could catch dangerous regressions.
|
||||||
|
|
||||||
As such, "-DDEBUG_STRICT -DDEBUG_MEMORY_POOLS" is implicit and recommended for
|
As such, for regular production, "-DDEBUG_STRICT -DDEBUG_MEMORY_POOLS" is
|
||||||
production. For security sensitive environments, it is recommended to use
|
recommended. For security sensitive environments, it is recommended to use
|
||||||
"-DDEBUG_STRICT_ACTION=2 -DDEBUG_DONT_SHARE_POOLS". When testing new versions
|
"-DDEBUG_STRICT -DDEBUG_STRICT_ACTION=2 -DDEBUG_MEMORY_POOLS \
|
||||||
or trying to nail a bug down, use "-DDEBUG_STRICT=2 -DDEBUG_STRICT_ACTION=2 \
|
-DDEBUG_DONT_SHARE_POOLS". For deployments dedicated to testing new versions or
|
||||||
-DDEBUG_DONT_SHARE_POOLS -DDEBUG_POOL_INTEGRITY". Finally in order to minimize
|
when trying to nail a bug down, use "-DDEBUG_STRICT=2 -DDEBUG_STRICT_ACTION=2 \
|
||||||
memory usage by disabling these integrity features, it is also possible to use
|
-DDEBUG_MEMORY_POOLS -DDEBUG_DONT_SHARE_POOLS -DDEBUG_POOL_INTEGRITY".
|
||||||
"-DDEBUG_STRICT=0 -DDEBUG_MEMORY_POOLS=0".
|
|
||||||
|
|
||||||
|
The DEP variable is automatically set to the list of include files and also
|
||||||
|
designates a file that contains the last build options used. It is used during
|
||||||
|
the build process to compute dependencies and decide whether or not to rebuild
|
||||||
|
everything (we do rebuild everything when .h files are touched or when build
|
||||||
|
options change). Sometimes when performing fast build iterations on inline
|
||||||
|
functions it may be desirable to avoid a full rebuild. Forcing this variable
|
||||||
|
to be empty will be sufficient to achieve this. This variable must never be
|
||||||
|
forced to produce final binaries, and must not be used during bisect sessions,
|
||||||
|
as it will often lead to the wrong commit.
|
||||||
|
|
||||||
5.7) Summary of the Makefile's main variables
|
If you need to pass other defines, includes, libraries, etc... then please
|
||||||
---------------------------------------------
|
check the Makefile to see which ones will be available in your case, and
|
||||||
|
use/override the USE_* variables from the Makefile.
|
||||||
|
|
||||||
The following variables are commonly used:
|
AIX 5.3 is known to work with the generic target. However, for the binary to
|
||||||
- TARGET platform name, empty by default, see help
|
also run on 5.2 or earlier, you need to build with DEFINE="-D_MSGQSUPPORT",
|
||||||
- CC path to the C compiler, defaults to "cc"
|
otherwise __fd_select() will be used while not being present in the libc, but
|
||||||
- LD path to the linker, defaults to "$CC"
|
this is easily addressed using the "aix52" target. If you get build errors
|
||||||
- CFLAGS CFLAGS to append at the end, empty by default
|
because of strange symbols or section mismatches, simply remove -g from
|
||||||
- LDFLAGS LDFLAGS to append at the end, empty by default
|
DEBUG_CFLAGS.
|
||||||
- ARCH_FLAGS flags common to CC and LD (-fsanitize, etc). Defaults to "-g"
|
|
||||||
- OPT_CFLAGS C compiler optimization level. Defaults to "-O2"
|
Building on AIX 7.2 works fine using the "aix72-gcc" TARGET. It adds two
|
||||||
- WARN_CFLAGS list of autodetected C compiler warnings to enable
|
special CFLAGS to prevent the loading of AIX's xmem.h and var.h. This is done
|
||||||
- NOWARN_CFLAGS list of autodetected C compiler warnings to disable
|
by defining the corresponding include-guards _H_XMEM and _H_VAR. Without
|
||||||
- ADDINC include directives to append at the end, empty by default
|
excluding those header-files the build fails because of redefinition errors.
|
||||||
- ADDLIB lib directives to append at the end, empty by default
|
Furthermore, the atomic library is added to the LDFLAGS to allow for
|
||||||
- DEFINE extra macros definitions for compiler, empty by default
|
multithreading via USE_THREAD.
|
||||||
- DEBUG extra DEBUG options for compiler, empty by default
|
|
||||||
- ERR enables -Werror if non-zero, empty by default
|
You can easily define your own target with the GNU Makefile. Unknown targets
|
||||||
- FAILFAST enables -Wfatal-error if non-zero, empty by default
|
are processed with no default option except USE_POLL=default. So you can very
|
||||||
|
well use that property to define your own set of options. USE_POLL and USE_SLZ
|
||||||
|
can even be disabled by setting them to an empty string. For example :
|
||||||
|
|
||||||
|
$ gmake TARGET=tiny USE_POLL="" USE_SLZ="" TARGET_CFLAGS=-fomit-frame-pointer
|
||||||
|
|
||||||
|
If you need to pass some defines to the preprocessor or compiler, you may pass
|
||||||
|
them all in the DEFINE variable. Example:
|
||||||
|
|
||||||
|
$ make TARGET=generic DEFINE="-DDEBUG_DONT_SHARE_POOLS -DDEBUG_MEMORY_POOLS"
|
||||||
|
|
||||||
|
The ADDINC variable may be used to add some extra include paths; this is
|
||||||
|
sometimes needed when cross-compiling. Similarly the ADDLIB variable may be
|
||||||
|
used to specify extra paths to library files. Example :
|
||||||
|
|
||||||
|
$ make TARGET=generic ADDINC=-I/opt/cross/include ADDLIB=-L/opt/cross/lib64
|
||||||
|
|
||||||
|
|
||||||
6) How to install HAProxy
|
6) How to install HAProxy
|
||||||
|
|||||||
22
README
Normal file
22
README
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
The HAProxy documentation has been split into a number of different files for
|
||||||
|
ease of use.
|
||||||
|
|
||||||
|
Please refer to the following files depending on what you're looking for :
|
||||||
|
|
||||||
|
- INSTALL for instructions on how to build and install HAProxy
|
||||||
|
- BRANCHES to understand the project's life cycle and what version to use
|
||||||
|
- LICENSE for the project's license
|
||||||
|
- CONTRIBUTING for the process to follow to submit contributions
|
||||||
|
|
||||||
|
The more detailed documentation is located into the doc/ directory :
|
||||||
|
|
||||||
|
- doc/intro.txt for a quick introduction on HAProxy
|
||||||
|
- doc/configuration.txt for the configuration's reference manual
|
||||||
|
- doc/lua.txt for the Lua's reference manual
|
||||||
|
- doc/SPOE.txt for how to use the SPOE engine
|
||||||
|
- doc/network-namespaces.txt for how to use network namespaces under Linux
|
||||||
|
- doc/management.txt for the management guide
|
||||||
|
- doc/regression-testing.txt for how to use the regression testing suite
|
||||||
|
- doc/peers.txt for the peers protocol reference
|
||||||
|
- doc/coding-style.txt for how to adopt HAProxy's coding style
|
||||||
|
- doc/internals for developer-specific documentation (not all up to date)
|
||||||
62
README.md
62
README.md
@ -1,62 +0,0 @@
|
|||||||
# HAProxy
|
|
||||||
|
|
||||||
[](https://github.com/haproxy/haproxy/actions/workflows/musl.yml)
|
|
||||||
[](https://github.com/haproxy/haproxy/actions/workflows/aws-lc.yml)
|
|
||||||
[](https://github.com/haproxy/haproxy/actions/workflows/openssl-nodeprecated.yml)
|
|
||||||
[](https://github.com/haproxy/haproxy/actions/workflows/illumos.yml)
|
|
||||||
[](https://github.com/haproxy/haproxy/actions/workflows/netbsd.yml)
|
|
||||||
[](https://cirrus-ci.com/github/haproxy/haproxy/)
|
|
||||||
[](https://github.com/haproxy/haproxy/actions/workflows/vtest.yml)
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
HAProxy is a free, very fast and reliable reverse-proxy offering high availability, load balancing, and proxying for TCP
|
|
||||||
and HTTP-based applications.
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
The [INSTALL](INSTALL) file describes how to build HAProxy.
|
|
||||||
A [list of packages](https://github.com/haproxy/wiki/wiki/Packages) is also available on the wiki.
|
|
||||||
|
|
||||||
## Getting help
|
|
||||||
|
|
||||||
The [discourse](https://discourse.haproxy.org/) and the [mailing-list](https://www.mail-archive.com/haproxy@formilux.org/)
|
|
||||||
are available for questions or configuration assistance. You can also use the [slack](https://slack.haproxy.org/) or
|
|
||||||
[IRC](irc://irc.libera.chat/%23haproxy) channel. Please don't use the issue tracker for these.
|
|
||||||
|
|
||||||
The [issue tracker](https://github.com/haproxy/haproxy/issues/) is only for bug reports or feature requests.
|
|
||||||
|
|
||||||
## Documentation
|
|
||||||
|
|
||||||
The HAProxy documentation has been split into a number of different files for
|
|
||||||
ease of use. It is available in text format as well as HTML. The wiki is also meant to replace the old architecture
|
|
||||||
guide.
|
|
||||||
|
|
||||||
- [HTML documentation](http://docs.haproxy.org/)
|
|
||||||
- [HTML HAProxy LUA API Documentation](https://www.arpalert.org/haproxy-api.html)
|
|
||||||
- [Wiki](https://github.com/haproxy/wiki/wiki)
|
|
||||||
|
|
||||||
Please refer to the following files depending on what you're looking for:
|
|
||||||
|
|
||||||
- [INSTALL](INSTALL) for instructions on how to build and install HAProxy
|
|
||||||
- [BRANCHES](BRANCHES) to understand the project's life cycle and what version to use
|
|
||||||
- [LICENSE](LICENSE) for the project's license
|
|
||||||
- [CONTRIBUTING](CONTRIBUTING) for the process to follow to submit contributions
|
|
||||||
|
|
||||||
The more detailed documentation is located into the doc/ directory:
|
|
||||||
|
|
||||||
- [ doc/intro.txt ](doc/intro.txt) for a quick introduction on HAProxy
|
|
||||||
- [ doc/configuration.txt ](doc/configuration.txt) for the configuration's reference manual
|
|
||||||
- [ doc/lua.txt ](doc/lua.txt) for the Lua's reference manual
|
|
||||||
- [ doc/SPOE.txt ](doc/SPOE.txt) for how to use the SPOE engine
|
|
||||||
- [ doc/network-namespaces.txt ](doc/network-namespaces.txt) for how to use network namespaces under Linux
|
|
||||||
- [ doc/management.txt ](doc/management.txt) for the management guide
|
|
||||||
- [ doc/regression-testing.txt ](doc/regression-testing.txt) for how to use the regression testing suite
|
|
||||||
- [ doc/peers.txt ](doc/peers.txt) for the peers protocol reference
|
|
||||||
- [ doc/coding-style.txt ](doc/coding-style.txt) for how to adopt HAProxy's coding style
|
|
||||||
- [ doc/internals ](doc/internals) for developer-specific documentation (not all up to date)
|
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
HAProxy is licensed under [GPL 2](doc/gpl.txt) or any later version, the headers under [LGPL 2.1](doc/lgpl.txt). See the
|
|
||||||
[LICENSE](LICENSE) file for a more detailed explanation.
|
|
||||||
48
addons/deviceatlas/Makefile
Normal file
48
addons/deviceatlas/Makefile
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
# DEVICEATLAS_SRC : DeviceAtlas API source root path
|
||||||
|
|
||||||
|
|
||||||
|
OS := $(shell uname -s)
|
||||||
|
OBJS := dadwsch.o
|
||||||
|
CFLAGS := -g -O2
|
||||||
|
LDFLAGS :=
|
||||||
|
|
||||||
|
CURL_CONFIG := curl-config
|
||||||
|
CURLDIR := $(shell $(CURL_CONFIG) --prefix 2>/dev/null || echo /usr/local)
|
||||||
|
CURL_INC := $(CURLDIR)/include
|
||||||
|
CURL_LIB := $(CURLDIR)/lib
|
||||||
|
CURL_LDFLAGS := $(shell $(CURL_CONFIG) --libs 2>/dev/null || echo -L /usr/local/lib -lcurl)
|
||||||
|
|
||||||
|
PCRE2_CONFIG := pcre2-config
|
||||||
|
PCRE2DIR := $(shell $(PCRE2_CONFIG) --prefix 2>/dev/null || echo /usr/local)
|
||||||
|
PCRE2_INC := $(PCRE2DIR)/include
|
||||||
|
PCRE2_LIB := $(PCRE2DIR)/lib
|
||||||
|
PCRE2_LDFLAGS := $(shell $(PCRE2_CONFIG) --libs8 2>/dev/null || echo /usr/local)
|
||||||
|
|
||||||
|
ifeq ($(DEVICEATLAS_SRC),)
|
||||||
|
dadwsch: dadwsch.c
|
||||||
|
$(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS)
|
||||||
|
|
||||||
|
LDFLAGS += -lda
|
||||||
|
else
|
||||||
|
DEVICEATLAS_INC = $(DEVICEATLAS_SRC)
|
||||||
|
DEVICEATLAS_LIB = $(DEVICEATLAS_SRC)
|
||||||
|
CFLAGS += -DDA_REGEX_HDR=\"dac_pcre2.c\" -DDA_REGEX_TAG=2
|
||||||
|
CFLAGS += -DMOBI_CURL -DMOBI_CURLSSET -DMOBI_GZ -DMOBI_ZIP
|
||||||
|
CFLAGS += -I$(DEVICEATLAS_INC) -I$(CURL_INC) -I$(PCRE2DIR)
|
||||||
|
LDFLAGS += $(CURL_LDFLAGS) $(PCRE2_LDFLAGS) -lz -lzip -lpthread
|
||||||
|
|
||||||
|
dadwsch: dadwsch.c $(DEVICEATLAS_SRC)/dac.c $(DEVICEATLAS_SRC)/dasch.c $(DEVICEATLAS_SRC)/dadwarc.c $(DEVICEATLAS_SRC)/dadwcom.c $(DEVICEATLAS_SRC)/dadwcurl.c $(DEVICEATLAS_SRC)/json.c $(DEVICEATLAS_SRC)/Os/daunix.c
|
||||||
|
$(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS)
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifeq ($(OS), Linux)
|
||||||
|
LDFLAGS += -lrt
|
||||||
|
endif
|
||||||
|
ifeq ($(OS), SunOS)
|
||||||
|
LDFLAGS += -lrt
|
||||||
|
endif
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm -f *.o
|
||||||
|
rm -f $(DEVICEATLAS_LIB)*.o
|
||||||
|
rm -f dadwsch
|
||||||
@ -1,32 +0,0 @@
|
|||||||
# DEVICEATLAS_SRC : DeviceAtlas API source root path
|
|
||||||
|
|
||||||
|
|
||||||
CXX := c++
|
|
||||||
CXXLIB := -lstdc++
|
|
||||||
|
|
||||||
ifeq ($(DEVICEATLAS_SRC),)
|
|
||||||
OPTIONS_CFLAGS += -I$(DEVICEATLAS_INC)
|
|
||||||
OPTIONS_LDFLAGS += -Wl,-rpath,$(DEVICEATLAS_LIB) -L$(DEVICEATLAS_LIB) -lda
|
|
||||||
else
|
|
||||||
DEVICEATLAS_INC = $(DEVICEATLAS_SRC)
|
|
||||||
DEVICEATLAS_LIB = $(DEVICEATLAS_SRC)
|
|
||||||
OPTIONS_LDFLAGS += -lpthread
|
|
||||||
OPTIONS_CFLAGS += -I$(DEVICEATLAS_INC)
|
|
||||||
ifeq ($(DEVICEATLAS_NOCACHE),)
|
|
||||||
CXXFLAGS := $(OPTIONS_CFLAGS) -std=gnu++11
|
|
||||||
OPTIONS_OBJS += $(DEVICEATLAS_SRC)/dacache.o
|
|
||||||
OPTIONS_LDFLAGS += $(CXXLIB)
|
|
||||||
else
|
|
||||||
OPTIONS_CFLAGS += -DAPINOCACHE
|
|
||||||
endif
|
|
||||||
OPTIONS_OBJS += $(DEVICEATLAS_SRC)/dac.o
|
|
||||||
OPTIONS_OBJS += $(DEVICEATLAS_SRC)/json.o
|
|
||||||
OPTIONS_OBJS += $(DEVICEATLAS_SRC)/dasch.o
|
|
||||||
OPTIONS_OBJS += $(DEVICEATLAS_SRC)/dadwarc.o
|
|
||||||
OPTIONS_OBJS += $(DEVICEATLAS_SRC)/dadwcom.o
|
|
||||||
OPTIONS_OBJS += $(DEVICEATLAS_SRC)/dadwcurl.o
|
|
||||||
OPTIONS_OBJS += $(DEVICEATLAS_SRC)/Os/daunix.o
|
|
||||||
endif
|
|
||||||
|
|
||||||
addons/deviceatlas/dummy/%.o: addons/deviceatlas/dummy/%.cpp
|
|
||||||
$(cmd_CXX) $(CXXFLAGS) -c -o $@ $<
|
|
||||||
@ -18,7 +18,7 @@
|
|||||||
#include <dac.h>
|
#include <dac.h>
|
||||||
|
|
||||||
#define ATLASTOKSZ PATH_MAX
|
#define ATLASTOKSZ PATH_MAX
|
||||||
#define ATLASMAPNM "/da_map_sch_data"
|
#define ATLASMAPNM "/hapdeviceatlas"
|
||||||
|
|
||||||
static struct {
|
static struct {
|
||||||
void *atlasimgptr;
|
void *atlasimgptr;
|
||||||
@ -26,7 +26,6 @@ static struct {
|
|||||||
char *jsonpath;
|
char *jsonpath;
|
||||||
char *cookiename;
|
char *cookiename;
|
||||||
size_t cookienamelen;
|
size_t cookienamelen;
|
||||||
size_t cachesize;
|
|
||||||
int atlasfd;
|
int atlasfd;
|
||||||
da_atlas_t atlas;
|
da_atlas_t atlas;
|
||||||
da_evidence_id_t useragentid;
|
da_evidence_id_t useragentid;
|
||||||
@ -38,7 +37,6 @@ static struct {
|
|||||||
.jsonpath = 0,
|
.jsonpath = 0,
|
||||||
.cookiename = 0,
|
.cookiename = 0,
|
||||||
.cookienamelen = 0,
|
.cookienamelen = 0,
|
||||||
.cachesize = 0,
|
|
||||||
.atlasmap = NULL,
|
.atlasmap = NULL,
|
||||||
.atlasfd = -1,
|
.atlasfd = -1,
|
||||||
.useragentid = 0,
|
.useragentid = 0,
|
||||||
@ -106,29 +104,6 @@ static int da_properties_cookie(char **args, int section_type, struct proxy *cur
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int da_cache_size(char **args, int section_type, struct proxy *curpx,
|
|
||||||
const struct proxy *defpx, const char *file, int line,
|
|
||||||
char **err)
|
|
||||||
{
|
|
||||||
int cachesize;
|
|
||||||
if (*(args[1]) == 0) {
|
|
||||||
memprintf(err, "deviceatlas cache size : expects an integer argument.\n");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
cachesize = atol(args[1]);
|
|
||||||
if (cachesize < 0 || cachesize > DA_CACHE_MAX) {
|
|
||||||
memprintf(err, "deviceatlas cache size : expects a cache size between 0 and %d, %s given.\n", DA_CACHE_MAX, args[1]);
|
|
||||||
} else {
|
|
||||||
#ifdef APINOCACHE
|
|
||||||
fprintf(stdout, "deviceatlas cache size : no-op, its support is disabled.\n");
|
|
||||||
#endif
|
|
||||||
global_deviceatlas.cachesize = (size_t)cachesize;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static size_t da_haproxy_read(void *ctx, size_t len, char *buf)
|
static size_t da_haproxy_read(void *ctx, size_t len, char *buf)
|
||||||
{
|
{
|
||||||
return fread(buf, 1, len, ctx);
|
return fread(buf, 1, len, ctx);
|
||||||
@ -193,8 +168,6 @@ static int init_deviceatlas(void)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
global_deviceatlas.atlas.config.cache_size = global_deviceatlas.cachesize;
|
|
||||||
|
|
||||||
if (global_deviceatlas.cookiename == 0) {
|
if (global_deviceatlas.cookiename == 0) {
|
||||||
global_deviceatlas.cookiename = strdup(DA_COOKIENAME_DEFAULT);
|
global_deviceatlas.cookiename = strdup(DA_COOKIENAME_DEFAULT);
|
||||||
global_deviceatlas.cookienamelen = strlen(global_deviceatlas.cookiename);
|
global_deviceatlas.cookienamelen = strlen(global_deviceatlas.cookiename);
|
||||||
@ -249,57 +222,48 @@ static void da_haproxy_checkinst(void)
|
|||||||
base = (char *)global_deviceatlas.atlasmap;
|
base = (char *)global_deviceatlas.atlasmap;
|
||||||
|
|
||||||
if (base[0] != 0) {
|
if (base[0] != 0) {
|
||||||
FILE *jsonp;
|
void *cnew;
|
||||||
void *cnew;
|
size_t atlassz;
|
||||||
da_status_t status;
|
char atlasp[ATLASTOKSZ] = {0};
|
||||||
size_t atlassz;
|
da_atlas_t inst;
|
||||||
char atlasp[ATLASTOKSZ] = {0};
|
da_property_decl_t extraprops[1] = {{NULL, 0}};
|
||||||
da_atlas_t inst;
|
|
||||||
da_property_decl_t extraprops[1] = {{NULL, 0}};
|
|
||||||
#ifdef USE_THREAD
|
#ifdef USE_THREAD
|
||||||
HA_SPIN_LOCK(OTHER_LOCK, &dadwsch_lock);
|
HA_SPIN_LOCK(OTHER_LOCK, &dadwsch_lock);
|
||||||
#endif
|
#endif
|
||||||
strlcpy2(atlasp, base + sizeof(char), sizeof(atlasp));
|
strlcpy2(atlasp, base, sizeof(atlasp));
|
||||||
jsonp = fopen(atlasp, "r");
|
if (da_atlas_read_mapped(atlasp, NULL, &cnew, &atlassz) == DA_OK) {
|
||||||
if (jsonp == 0) {
|
if (da_atlas_open(&inst, extraprops, cnew, atlassz) == DA_OK) {
|
||||||
ha_alert("deviceatlas : '%s' json file has invalid path or is not readable.\n",
|
char jsonbuf[26];
|
||||||
atlasp);
|
time_t jsond;
|
||||||
#ifdef USE_THREAD
|
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &dadwsch_lock);
|
|
||||||
#endif
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
status = da_atlas_compile(jsonp, da_haproxy_read, da_haproxy_seek,
|
da_atlas_close(&global_deviceatlas.atlas);
|
||||||
&cnew, &atlassz);
|
free(global_deviceatlas.atlasimgptr);
|
||||||
fclose(jsonp);
|
global_deviceatlas.atlasimgptr = cnew;
|
||||||
if (status == DA_OK) {
|
global_deviceatlas.atlas = inst;
|
||||||
if (da_atlas_open(&inst, extraprops, cnew, atlassz) == DA_OK) {
|
memset(base, 0, ATLASTOKSZ);
|
||||||
da_atlas_close(&global_deviceatlas.atlas);
|
jsond = da_getdatacreation(&global_deviceatlas.atlas);
|
||||||
free(global_deviceatlas.atlasimgptr);
|
ctime_r(&jsond, jsonbuf);
|
||||||
global_deviceatlas.atlasimgptr = cnew;
|
jsonbuf[24] = 0;
|
||||||
global_deviceatlas.atlas = inst;
|
printf("deviceatlas: new instance, data file date `%s`.\n", jsonbuf);
|
||||||
base[0] = 0;
|
} else {
|
||||||
ha_notice("deviceatlas : new instance, data file date `%s`.\n",
|
ha_warning("deviceatlas: instance update failed.\n");
|
||||||
da_getdatacreationiso8601(&global_deviceatlas.atlas));
|
memset(base, 0, ATLASTOKSZ);
|
||||||
} else {
|
free(cnew);
|
||||||
ha_alert("deviceatlas : instance update failed.\n");
|
}
|
||||||
free(cnew);
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
#ifdef USE_THREAD
|
#ifdef USE_THREAD
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &dadwsch_lock);
|
HA_SPIN_UNLOCK(OTHER_LOCK, &dadwsch_lock);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int da_haproxy(const struct arg *args, struct sample *smp, da_deviceinfo_t *devinfo)
|
static int da_haproxy(const struct arg *args, struct sample *smp, da_deviceinfo_t *devinfo)
|
||||||
{
|
{
|
||||||
struct buffer *tmp;
|
struct buffer *tmp;
|
||||||
da_propid_t prop, *pprop;
|
da_propid_t prop, *pprop;
|
||||||
da_status_t status;
|
da_status_t status;
|
||||||
da_type_t proptype;
|
da_type_t proptype;
|
||||||
const char *propname;
|
const char *propname;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@ -499,7 +463,6 @@ static struct cfg_kw_list dacfg_kws = {{ }, {
|
|||||||
{ CFG_GLOBAL, "deviceatlas-log-level", da_log_level },
|
{ CFG_GLOBAL, "deviceatlas-log-level", da_log_level },
|
||||||
{ CFG_GLOBAL, "deviceatlas-property-separator", da_property_separator },
|
{ CFG_GLOBAL, "deviceatlas-property-separator", da_property_separator },
|
||||||
{ CFG_GLOBAL, "deviceatlas-properties-cookie", da_properties_cookie },
|
{ CFG_GLOBAL, "deviceatlas-properties-cookie", da_properties_cookie },
|
||||||
{ CFG_GLOBAL, "deviceatlas-cache-size", da_cache_size },
|
|
||||||
{ 0, NULL, NULL },
|
{ 0, NULL, NULL },
|
||||||
}};
|
}};
|
||||||
|
|
||||||
@ -523,10 +486,10 @@ static void da_haproxy_register_build_options()
|
|||||||
{
|
{
|
||||||
char *ptr = NULL;
|
char *ptr = NULL;
|
||||||
|
|
||||||
#ifdef DATLAS_DA_DUMMY_LIBRARY
|
#ifdef MOBI_DA_DUMMY_LIBRARY
|
||||||
memprintf(&ptr, "Built with DeviceAtlas support (dummy library only).");
|
memprintf(&ptr, "Built with DeviceAtlas support (dummy library only).");
|
||||||
#else
|
#else
|
||||||
memprintf(&ptr, "Built with DeviceAtlas support (library version %u.%u).", DATLAS_DA_MAJOR, DATLAS_DA_MINOR);
|
memprintf(&ptr, "Built with DeviceAtlas support (library version %u.%u).", MOBI_DA_MAJOR, MOBI_DA_MINOR);
|
||||||
#endif
|
#endif
|
||||||
hap_register_build_opts(ptr, 1);
|
hap_register_build_opts(ptr, 1);
|
||||||
}
|
}
|
||||||
|
|||||||
195
addons/deviceatlas/dadwsch.c
Normal file
195
addons/deviceatlas/dadwsch.c
Normal file
@ -0,0 +1,195 @@
|
|||||||
|
#define _GNU_SOURCE
|
||||||
|
#include <dac.h>
|
||||||
|
#include <dadwcurl.h>
|
||||||
|
#include <dadwarc.h>
|
||||||
|
#include <getopt.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <signal.h>
|
||||||
|
#include <errno.h>
|
||||||
|
#include <fcntl.h>
|
||||||
|
#include <sys/mman.h>
|
||||||
|
|
||||||
|
#define ATLASTOKSZ PATH_MAX
|
||||||
|
#define ATLASMAPNM "/hapdeviceatlas"
|
||||||
|
|
||||||
|
const char *__pgname;
|
||||||
|
|
||||||
|
static struct {
|
||||||
|
da_dwatlas_t o;
|
||||||
|
int ofd;
|
||||||
|
void* atlasmap;
|
||||||
|
} global_deviceatlassch = {
|
||||||
|
.ofd = -1,
|
||||||
|
.atlasmap = NULL
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
void usage(void)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "%s -u download URL [-d hour (in H:M:S format) current hour by default] [-p path for the downloaded file, /tmp by default]\n", __pgname);
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t jsonread(void *ctx, size_t count, char *buf)
|
||||||
|
{
|
||||||
|
return fread(buf, 1, count, ctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
static da_status_t jsonseek(void *ctx, off_t pos)
|
||||||
|
{
|
||||||
|
return fseek(ctx, pos, SEEK_SET) != -1 ? DA_OK : DA_SYS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dadwlog(dw_config_t cfg, const char* msg)
|
||||||
|
{
|
||||||
|
time_t now = time(NULL);
|
||||||
|
char buf[26] = {0};
|
||||||
|
ctime_r(&now, buf);
|
||||||
|
buf[24] = 0;
|
||||||
|
fprintf(stderr, "%s: %s\n", buf, msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
static dw_status_t dadwnot(void *a, dw_config_t *cfg)
|
||||||
|
{
|
||||||
|
da_dwatlas_t *o = (da_dwatlas_t *)a;
|
||||||
|
if (!o)
|
||||||
|
return DW_ERR;
|
||||||
|
char *e;
|
||||||
|
char jsondbuf[26] = {0}, buf[26] = {0}, atlasp[ATLASTOKSZ] = {0};
|
||||||
|
time_t now = time(NULL);
|
||||||
|
time_t jsond;
|
||||||
|
int fd = -1;
|
||||||
|
(void)a;
|
||||||
|
jsond = da_getdatacreation(&o->atlas);
|
||||||
|
dwgetfinalp(o->dcfg.info, atlasp, sizeof(atlasp));
|
||||||
|
ctime_r(&jsond, jsondbuf);
|
||||||
|
ctime_r(&now, buf);
|
||||||
|
jsondbuf[24] = 0;
|
||||||
|
buf[24] = 0;
|
||||||
|
|
||||||
|
printf("%s: data file generated on `%s`\n", buf, jsondbuf);
|
||||||
|
int val = 1;
|
||||||
|
unsigned char *ptr = (unsigned char *)global_deviceatlassch.atlasmap;
|
||||||
|
memset(ptr, 0, sizeof(atlasp));
|
||||||
|
strcpy(ptr, atlasp);
|
||||||
|
return DW_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static da_status_t dadwinit(void)
|
||||||
|
{
|
||||||
|
if ((global_deviceatlassch.ofd = shm_open(ATLASMAPNM, O_RDWR | O_CREAT, 0660)) == -1) {
|
||||||
|
fprintf(stderr, "%s\n", strerror(errno));
|
||||||
|
return DA_SYS;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ftruncate(global_deviceatlassch.ofd, ATLASTOKSZ) == -1) {
|
||||||
|
close(global_deviceatlassch.ofd);
|
||||||
|
return DA_SYS;
|
||||||
|
}
|
||||||
|
lseek(global_deviceatlassch.ofd, 0, SEEK_SET);
|
||||||
|
global_deviceatlassch.atlasmap = mmap(0, ATLASTOKSZ, PROT_READ | PROT_WRITE, MAP_SHARED, global_deviceatlassch.ofd, 0);
|
||||||
|
if (global_deviceatlassch.atlasmap == MAP_FAILED) {
|
||||||
|
fprintf(stderr, "%s\n", strerror(errno));
|
||||||
|
return DA_SYS;
|
||||||
|
} else {
|
||||||
|
memset(global_deviceatlassch.atlasmap, 0, ATLASTOKSZ);
|
||||||
|
return DA_OK;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dadwexit(int sig __attribute__((unused)), siginfo_t *s __attribute__((unused)), void *ctx __attribute__((unused)))
|
||||||
|
{
|
||||||
|
ssize_t w;
|
||||||
|
|
||||||
|
fprintf(stderr, "%s: exit\n", __pgname);
|
||||||
|
dw_daatlas_close(&global_deviceatlassch.o);
|
||||||
|
da_fini();
|
||||||
|
munmap(global_deviceatlassch.atlasmap, ATLASTOKSZ);
|
||||||
|
close(global_deviceatlassch.ofd);
|
||||||
|
shm_unlink(ATLASMAPNM);
|
||||||
|
exit(EXIT_SUCCESS);
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char **argv)
|
||||||
|
{
|
||||||
|
const char *opts = "u:p:d:h";
|
||||||
|
bool dset = false;
|
||||||
|
size_t i;
|
||||||
|
int ch;
|
||||||
|
|
||||||
|
da_property_decl_t extraprops[1] = {
|
||||||
|
{ 0, 0 }
|
||||||
|
};
|
||||||
|
|
||||||
|
__pgname = argv[0];
|
||||||
|
|
||||||
|
dw_df_dainit_fn = curldwinit;
|
||||||
|
dw_df_dacleanup_fn = curldwcleanup;
|
||||||
|
|
||||||
|
da_init();
|
||||||
|
memset(&global_deviceatlassch.o.dcfg, 0, sizeof(global_deviceatlassch.o.dcfg));
|
||||||
|
while ((ch = getopt(argc, argv, opts)) != -1) {
|
||||||
|
switch (ch) {
|
||||||
|
case 'u':
|
||||||
|
global_deviceatlassch.o.dcfg.info.url = strdup(optarg);
|
||||||
|
break;
|
||||||
|
case 'p':
|
||||||
|
global_deviceatlassch.o.dcfg.info.path = strdup(optarg);
|
||||||
|
break;
|
||||||
|
case 'd':
|
||||||
|
if (strptime(optarg, "%H:%M:%S", &global_deviceatlassch.o.dcfg.info.rtm) != NULL)
|
||||||
|
dset = true;
|
||||||
|
else
|
||||||
|
usage();
|
||||||
|
break;
|
||||||
|
case 'h':
|
||||||
|
default:
|
||||||
|
usage();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!dset) {
|
||||||
|
time_t now = time(NULL);
|
||||||
|
struct tm *cnow = gmtime(&now);
|
||||||
|
memcpy(&global_deviceatlassch.o.dcfg.info.rtm, cnow, offsetof(struct tm, tm_mday));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!global_deviceatlassch.o.dcfg.info.url)
|
||||||
|
usage();
|
||||||
|
|
||||||
|
struct sigaction sa;
|
||||||
|
memset(&sa, 0, sizeof(sa));
|
||||||
|
sa.sa_flags = SA_SIGINFO | SA_RESTART;
|
||||||
|
sa.sa_sigaction = dadwexit;
|
||||||
|
|
||||||
|
global_deviceatlassch.o.dcfg.info.datatm = 1;
|
||||||
|
global_deviceatlassch.o.dcfg.info.chksum = 1;
|
||||||
|
global_deviceatlassch.o.dcfg.info.reload = 1;
|
||||||
|
global_deviceatlassch.o.dcfg.info.tobin = 1;
|
||||||
|
global_deviceatlassch.o.dcfg.ep = extraprops;
|
||||||
|
global_deviceatlassch.o.dcfg.dwproc = curldwproc;
|
||||||
|
global_deviceatlassch.o.dcfg.dwextract = dadwextract;
|
||||||
|
global_deviceatlassch.o.dcfg.lptr = (void *)stderr;
|
||||||
|
global_deviceatlassch.o.dcfg.dwlog = &dadwlog;
|
||||||
|
global_deviceatlassch.o.dcfg.dwnotify_n = &dadwnot;
|
||||||
|
global_deviceatlassch.o.rfn = jsonread;
|
||||||
|
global_deviceatlassch.o.posfn = jsonseek;
|
||||||
|
|
||||||
|
if (dadwinit() != DA_OK) {
|
||||||
|
fprintf(stderr, "%s init failed\n", __pgname);
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (da_atlas_open_schedule(&global_deviceatlassch.o) != DA_OK) {
|
||||||
|
fprintf(stderr, "%s scheduling failed\n", __pgname);
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
|
||||||
|
sigaction(SIGINT, &sa, NULL);
|
||||||
|
sigaction(SIGQUIT, &sa, NULL);
|
||||||
|
sigaction(SIGTERM, &sa, NULL);
|
||||||
|
|
||||||
|
while (true) sleep(1);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
@ -1,7 +1,7 @@
|
|||||||
# makefile for dummy DeviceAtlas library
|
# makefile for dummy DeviceAtlas library
|
||||||
#
|
#
|
||||||
# To enable the DeviceAtlas module support, the following are needed
|
# To enable the DeviceAtlas module support, the following are needed
|
||||||
# make TARGET=<target> DEVICEATLAS_SRC=addons/deviceatlas/dummy USE_DEVICEATLAS=1
|
# make TARGET=<target> DEVICEATLAS_SRC=addons/deviceatlas/dummy USE_PCRE=1 USE_DEVICEATLAS=1
|
||||||
|
|
||||||
build: libda.a
|
build: libda.a
|
||||||
|
|
||||||
|
|||||||
@ -37,21 +37,21 @@ da_typename(da_type_t fieldtype)
|
|||||||
}
|
}
|
||||||
|
|
||||||
char *
|
char *
|
||||||
da_getdataversion(const da_atlas_t *atlas)
|
da_getdataversion(da_atlas_t *atlas)
|
||||||
{
|
{
|
||||||
return "dummy library version 1.0";
|
return "dummy library version 1.0";
|
||||||
}
|
}
|
||||||
|
|
||||||
time_t
|
time_t
|
||||||
da_getdatacreation(const da_atlas_t *atlas)
|
da_getdatacreation(da_atlas_t *atlas)
|
||||||
{
|
{
|
||||||
return time(NULL);
|
return time(NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
char *
|
int
|
||||||
da_getdatacreationiso8601(const da_atlas_t *atlas)
|
da_getdatarevision(da_atlas_t *atlas)
|
||||||
{
|
{
|
||||||
return "20000123T012345.678+0900";
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
da_status_t
|
da_status_t
|
||||||
@ -118,6 +118,11 @@ da_atlas_getpropcount(const da_atlas_t *atlas)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
da_atlas_setconfig(da_atlas_t *atlas, da_config_t *config)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
da_status_t
|
da_status_t
|
||||||
da_searchv(const da_atlas_t *atlas, da_deviceinfo_t *result, da_evidence_t *evidence, size_t count)
|
da_searchv(const da_atlas_t *atlas, da_deviceinfo_t *result, da_evidence_t *evidence, size_t count)
|
||||||
{
|
{
|
||||||
|
|||||||
@ -26,8 +26,9 @@ typedef int _Bool;
|
|||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define DATLAS_DA_MAJOR 3
|
#define MOBI_DA_MAJOR 2
|
||||||
#define DATLAS_DA_DUMMY_LIBRARY 1
|
#define MOBI_DA_MINOR 1
|
||||||
|
#define MOBI_DA_DUMMY_LIBRARY 1
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -133,7 +134,6 @@ typedef void (*da_errorfunc_t)(da_severity_t severity, da_status_t status, const
|
|||||||
|
|
||||||
/* Manifest constants. */
|
/* Manifest constants. */
|
||||||
enum {
|
enum {
|
||||||
DA_CACHE_MAX = 50000,
|
|
||||||
/*
|
/*
|
||||||
* used as the initial guess for the compiled size of an atlas.
|
* used as the initial guess for the compiled size of an atlas.
|
||||||
* If atlas sizes grow more beyond this, it can be expanded to avoid multiple scans of the data.
|
* If atlas sizes grow more beyond this, it can be expanded to avoid multiple scans of the data.
|
||||||
@ -142,8 +142,9 @@ enum {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct da_config {
|
struct da_config {
|
||||||
unsigned int cache_size;
|
unsigned int ua_props;
|
||||||
unsigned int __reserved[15]; /* enough reserved keywords for future use */
|
unsigned int lang_props;
|
||||||
|
unsigned int __reserved[14]; /* enough reserved keywords for future use */
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -212,7 +213,7 @@ da_status_t da_atlas_compile(void *ctx, da_read_fn readfn, da_setpos_fn setposfn
|
|||||||
* da_getpropid on the atlas, and if generated by the search, the ID will be consistent across
|
* da_getpropid on the atlas, and if generated by the search, the ID will be consistent across
|
||||||
* different calls to search.
|
* different calls to search.
|
||||||
* Properties added by a search that are neither in the compiled atlas, nor in the extra_props list
|
* Properties added by a search that are neither in the compiled atlas, nor in the extra_props list
|
||||||
* Are assigned an ID within the context that is not transferable through different search results
|
* Are assigned an ID within the context that is not transferrable through different search results
|
||||||
* within the same atlas.
|
* within the same atlas.
|
||||||
* @param atlas Atlas instance
|
* @param atlas Atlas instance
|
||||||
* @param extra_props properties
|
* @param extra_props properties
|
||||||
@ -450,22 +451,21 @@ const char *da_typename(da_type_t type);
|
|||||||
* @param atlas
|
* @param atlas
|
||||||
* @return version
|
* @return version
|
||||||
*/
|
*/
|
||||||
char *da_getdataversion(const da_atlas_t *atlas);
|
char *da_getdataversion(da_atlas_t *atlas);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief returns the date creation's timestamp from the JSON in memory
|
* @brief returns the date creation's timestamp from the JSON in memory
|
||||||
* @param atlas
|
* @param atlas
|
||||||
* @return version
|
* @return version
|
||||||
*/
|
*/
|
||||||
time_t da_getdatacreation(const da_atlas_t *atlas);
|
time_t da_getdatacreation(da_atlas_t *atlas);
|
||||||
char *da_getdatacreationiso8601(const da_atlas_t *atlas);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief returns the revision's number from the JSON in memory
|
* @brief returns the revision's number from the JSON in memory
|
||||||
* @param atlas
|
* @param atlas
|
||||||
* @return version
|
* @return version
|
||||||
*/
|
*/
|
||||||
int da_getdatarevision(const da_atlas_t *atlas);
|
int da_getdatarevision(da_atlas_t *atlas);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief returns the name of a global property
|
* @brief returns the name of a global property
|
||||||
|
|||||||
@ -1,26 +0,0 @@
|
|||||||
#include "dac.h"
|
|
||||||
|
|
||||||
extern "C" {
|
|
||||||
void da_atlas_cache_init(const da_atlas_t *atlas) {
|
|
||||||
(void)atlas;
|
|
||||||
}
|
|
||||||
|
|
||||||
da_status_t da_atlas_cache_insert(const da_atlas_t *atlas, unsigned long long h, da_deviceinfo_t *info) {
|
|
||||||
(void)atlas;
|
|
||||||
(void)h;
|
|
||||||
(void)info;
|
|
||||||
return DA_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
da_status_t da_atlas_cache_search(const da_atlas_t *atlas, unsigned long long h, da_deviceinfo_t **info) {
|
|
||||||
(void)atlas;
|
|
||||||
(void)h;
|
|
||||||
(void)info;
|
|
||||||
return DA_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
void da_atlas_cache_close(da_atlas_t *atlas) {
|
|
||||||
(void)atlas;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@ -1 +0,0 @@
|
|||||||
#include <stdio.h>
|
|
||||||
@ -1 +0,0 @@
|
|||||||
#include <stdio.h>
|
|
||||||
@ -47,12 +47,6 @@ via the OpenTracing API with OpenTracing compatible servers (tracers).
|
|||||||
Currently, tracers that support this API include Datadog, Jaeger, LightStep
|
Currently, tracers that support this API include Datadog, Jaeger, LightStep
|
||||||
and Zipkin.
|
and Zipkin.
|
||||||
|
|
||||||
Note: The OpenTracing filter shouldn't be used for new designs as OpenTracing
|
|
||||||
itself is no longer maintained nor supported by its authors. A
|
|
||||||
replacement filter base on OpenTelemetry is currently under development
|
|
||||||
and is expected to be ready around HAProxy 3.2. As such OpenTracing will
|
|
||||||
be deprecated in 3.3 and removed in 3.5.
|
|
||||||
|
|
||||||
The OT filter was primarily tested with the Jaeger tracer, while configurations
|
The OT filter was primarily tested with the Jaeger tracer, while configurations
|
||||||
for both Datadog and Zipkin tracers were also set in the test directory.
|
for both Datadog and Zipkin tracers were also set in the test directory.
|
||||||
|
|
||||||
|
|||||||
@ -60,7 +60,7 @@
|
|||||||
#define FLT_OT_DBG_CONF_TRACER(f,a) \
|
#define FLT_OT_DBG_CONF_TRACER(f,a) \
|
||||||
FLT_OT_DBG(3, f FLT_OT_CONF_HDR_FMT "'%s' %p '%s' %p %u %hhu %hhu 0x%02hhx %p:%s 0x%08x %s %s %s }", \
|
FLT_OT_DBG(3, f FLT_OT_CONF_HDR_FMT "'%s' %p '%s' %p %u %hhu %hhu 0x%02hhx %p:%s 0x%08x %s %s %s }", \
|
||||||
FLT_OT_CONF_HDR_ARGS(a, id), (a)->config, (a)->cfgbuf, (a)->plugin, (a)->tracer, (a)->rate_limit, (a)->flag_harderr, \
|
FLT_OT_CONF_HDR_ARGS(a, id), (a)->config, (a)->cfgbuf, (a)->plugin, (a)->tracer, (a)->rate_limit, (a)->flag_harderr, \
|
||||||
(a)->flag_disabled, (a)->logging, &((a)->proxy_log), flt_ot_list_debug(&((a)->proxy_log.loggers)), (a)->analyzers, \
|
(a)->flag_disabled, (a)->logging, &((a)->proxy_log), flt_ot_list_debug(&((a)->proxy_log.logsrvs)), (a)->analyzers, \
|
||||||
flt_ot_list_debug(&((a)->acls)), flt_ot_list_debug(&((a)->ph_groups)), flt_ot_list_debug(&((a)->ph_scopes)))
|
flt_ot_list_debug(&((a)->acls)), flt_ot_list_debug(&((a)->ph_groups)), flt_ot_list_debug(&((a)->ph_scopes)))
|
||||||
|
|
||||||
#define FLT_OT_DBG_CONF(f,a) \
|
#define FLT_OT_DBG_CONF(f,a) \
|
||||||
|
|||||||
@ -718,7 +718,7 @@ static void flt_ot_check_timeouts(struct stream *s, struct filter *f)
|
|||||||
if (flt_ot_is_disabled(f FLT_OT_DBG_ARGS(, -1)))
|
if (flt_ot_is_disabled(f FLT_OT_DBG_ARGS(, -1)))
|
||||||
FLT_OT_RETURN();
|
FLT_OT_RETURN();
|
||||||
|
|
||||||
s->pending_events |= STRM_EVT_MSG;
|
s->pending_events |= TASK_WOKEN_MSG;
|
||||||
|
|
||||||
flt_ot_return_void(f, &err);
|
flt_ot_return_void(f, &err);
|
||||||
|
|
||||||
|
|||||||
@ -1074,9 +1074,8 @@ static int flt_ot_post_parse_cfg_scope(void)
|
|||||||
*/
|
*/
|
||||||
static int flt_ot_parse_cfg(struct flt_ot_conf *conf, const char *flt_name, char **err)
|
static int flt_ot_parse_cfg(struct flt_ot_conf *conf, const char *flt_name, char **err)
|
||||||
{
|
{
|
||||||
struct list backup_sections;
|
struct list backup_sections;
|
||||||
struct cfgfile cfg_file = {0};
|
int retval = ERR_ABORT | ERR_ALERT;
|
||||||
int retval = ERR_ABORT | ERR_ALERT;
|
|
||||||
|
|
||||||
FLT_OT_FUNC("%p, \"%s\", %p:%p", conf, flt_name, FLT_OT_DPTR_ARGS(err));
|
FLT_OT_FUNC("%p, \"%s\", %p:%p", conf, flt_name, FLT_OT_DPTR_ARGS(err));
|
||||||
|
|
||||||
@ -1095,16 +1094,8 @@ static int flt_ot_parse_cfg(struct flt_ot_conf *conf, const char *flt_name, char
|
|||||||
/* Do nothing. */;
|
/* Do nothing. */;
|
||||||
else if (access(conf->cfg_file, R_OK) == -1)
|
else if (access(conf->cfg_file, R_OK) == -1)
|
||||||
FLT_OT_PARSE_ERR(err, "'%s' : %s", conf->cfg_file, strerror(errno));
|
FLT_OT_PARSE_ERR(err, "'%s' : %s", conf->cfg_file, strerror(errno));
|
||||||
else {
|
else
|
||||||
cfg_file.filename = conf->cfg_file;
|
retval = readcfgfile(conf->cfg_file);
|
||||||
cfg_file.size = load_cfg_in_mem(cfg_file.filename, &cfg_file.content);
|
|
||||||
if (cfg_file.size < 0) {
|
|
||||||
ha_free(&cfg_file.content);
|
|
||||||
FLT_OT_RETURN_INT(retval);
|
|
||||||
}
|
|
||||||
retval = parse_cfg(&cfg_file);
|
|
||||||
ha_free(&cfg_file.content);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Unregister OT sections and restore previous sections. */
|
/* Unregister OT sections and restore previous sections. */
|
||||||
cfg_unregister_sections();
|
cfg_unregister_sections();
|
||||||
|
|||||||
@ -113,7 +113,7 @@ struct flt_ot_runtime_context *flt_ot_runtime_context_init(struct stream *s, str
|
|||||||
LIST_INIT(&(retptr->contexts));
|
LIST_INIT(&(retptr->contexts));
|
||||||
|
|
||||||
uuid = b_make(retptr->uuid, sizeof(retptr->uuid), 0, 0);
|
uuid = b_make(retptr->uuid, sizeof(retptr->uuid), 0, 0);
|
||||||
ha_generate_uuid_v4(&uuid);
|
ha_generate_uuid(&uuid);
|
||||||
|
|
||||||
#ifdef USE_OT_VARS
|
#ifdef USE_OT_VARS
|
||||||
/*
|
/*
|
||||||
|
|||||||
@ -39,21 +39,14 @@
|
|||||||
*/
|
*/
|
||||||
static void flt_ot_vars_scope_dump(struct vars *vars, const char *scope)
|
static void flt_ot_vars_scope_dump(struct vars *vars, const char *scope)
|
||||||
{
|
{
|
||||||
int i;
|
const struct var *var;
|
||||||
|
|
||||||
if (vars == NULL)
|
if (vars == NULL)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
vars_rdlock(vars);
|
vars_rdlock(vars);
|
||||||
for (i = 0; i < VAR_NAME_ROOTS; i++) {
|
list_for_each_entry(var, &(vars->head), l)
|
||||||
struct ceb_node *node = cebu64_first(&(vars->name_root[i]));
|
FLT_OT_DBG(2, "'%s.%016" PRIx64 "' -> '%.*s'", scope, var->name_hash, (int)b_data(&(var->data.u.str)), b_orig(&(var->data.u.str)));
|
||||||
|
|
||||||
for ( ; node != NULL; node = cebu64_next(&(vars->name_root[i]), node)) {
|
|
||||||
struct var *var = container_of(node, struct var, node);
|
|
||||||
|
|
||||||
FLT_OT_DBG(2, "'%s.%016" PRIx64 "' -> '%.*s'", scope, var->name_hash, (int)b_data(&(var->data.u.str)), b_orig(&(var->data.u.str)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
vars_rdunlock(vars);
|
vars_rdunlock(vars);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -75,52 +75,6 @@ exported. Here are examples:
|
|||||||
/metrics?scope=&scope=global # ==> global metrics will be exported
|
/metrics?scope=&scope=global # ==> global metrics will be exported
|
||||||
/metrics?scope=sticktable # ==> stick tables metrics will be exported
|
/metrics?scope=sticktable # ==> stick tables metrics will be exported
|
||||||
|
|
||||||
* Filtering on metrics name
|
|
||||||
|
|
||||||
It is possible to filter metrics dumped by the exporter. To to so, multiple
|
|
||||||
"metrics" parameters may be passed to specify all metrics to include or exclude,
|
|
||||||
as a comma-separated list of filter. By default, there is no filter and all
|
|
||||||
metrics are dumped. By specifying at least one metric to be included in the
|
|
||||||
dump, this disables the default behavior and only explicitly mentioned metrics
|
|
||||||
are dumped. To include a metric, its name must be specified. To exclude it, its
|
|
||||||
name must be preceded by a minus character ('-'). Here are examples:
|
|
||||||
|
|
||||||
# Dumped all metrics, except "haproxy_server_check_status"
|
|
||||||
/metrics?metrics=-haproxy_server_check_status
|
|
||||||
|
|
||||||
# Only dump frontends, backends and servers status
|
|
||||||
/metrics?metrics=haproxy_frontend_status,haproxy_backend_status,haproxy_server_status
|
|
||||||
|
|
||||||
* Add section description as label for all metrics
|
|
||||||
|
|
||||||
It is possible to set a description in global and proxy sections, via the
|
|
||||||
"description" directive. The global description is exposed if it is define via
|
|
||||||
the "haproxy_process_description" metric. But the descriptions provided in proxy
|
|
||||||
sections are not dumped. However, it is possible to add it as a label for all
|
|
||||||
metrics of the corresponding section, including the global one. To do so,
|
|
||||||
"desc-labels" parameter must be set:
|
|
||||||
|
|
||||||
/metrics?desc-labels
|
|
||||||
|
|
||||||
/ metrics?scope=frontend&desc-labels
|
|
||||||
|
|
||||||
* Dump extra counters
|
|
||||||
|
|
||||||
Internally, some modules can register to frontends, backends, servers or
|
|
||||||
listeners to export extra counters. For instance, some multiplexers do so on
|
|
||||||
frontends or backends. To display extra counters for all regiestered modules,
|
|
||||||
"extra-counters" parameter must be pass. It can be cumulated with "scope"
|
|
||||||
parameters:
|
|
||||||
|
|
||||||
/metrics?extra-counters # ==> export all extra counters in
|
|
||||||
# addition to main ones, for all
|
|
||||||
# scopes
|
|
||||||
/metrics?scope=frontend&extra-counters # ==> export extra counters for
|
|
||||||
# frontends
|
|
||||||
|
|
||||||
There are extra counters are only for frontends, backends, servers and
|
|
||||||
listeners.
|
|
||||||
|
|
||||||
* How do I prevent my prometheus instance to explode?
|
* How do I prevent my prometheus instance to explode?
|
||||||
|
|
||||||
** Filtering on servers state
|
** Filtering on servers state
|
||||||
@ -155,8 +109,7 @@ except the server_check_status, you may configure prometheus that way:
|
|||||||
Exported metrics
|
Exported metrics
|
||||||
------------------
|
------------------
|
||||||
|
|
||||||
See prometheus export for the description of each field. Only main metrics are
|
See prometheus export for the description of each field.
|
||||||
listed below. Metrics from extra counters are not listed.
|
|
||||||
|
|
||||||
* Globals metrics
|
* Globals metrics
|
||||||
|
|
||||||
@ -205,8 +158,6 @@ listed below. Metrics from extra counters are not listed.
|
|||||||
| haproxy_process_current_tasks |
|
| haproxy_process_current_tasks |
|
||||||
| haproxy_process_current_run_queue |
|
| haproxy_process_current_run_queue |
|
||||||
| haproxy_process_idle_time_percent |
|
| haproxy_process_idle_time_percent |
|
||||||
| haproxy_process_node |
|
|
||||||
| haproxy_process_description |
|
|
||||||
| haproxy_process_stopping |
|
| haproxy_process_stopping |
|
||||||
| haproxy_process_jobs |
|
| haproxy_process_jobs |
|
||||||
| haproxy_process_unstoppable_jobs |
|
| haproxy_process_unstoppable_jobs |
|
||||||
@ -359,8 +310,6 @@ listed below. Metrics from extra counters are not listed.
|
|||||||
| haproxy_server_redispatch_warnings_total |
|
| haproxy_server_redispatch_warnings_total |
|
||||||
| haproxy_server_status |
|
| haproxy_server_status |
|
||||||
| haproxy_server_weight |
|
| haproxy_server_weight |
|
||||||
| haproxy_server_active |
|
|
||||||
| haproxy_server_backup |
|
|
||||||
| haproxy_server_check_failures_total |
|
| haproxy_server_check_failures_total |
|
||||||
| haproxy_server_check_up_down_total |
|
| haproxy_server_check_up_down_total |
|
||||||
| haproxy_server_check_last_change_seconds |
|
| haproxy_server_check_last_change_seconds |
|
||||||
@ -389,9 +338,6 @@ listed below. Metrics from extra counters are not listed.
|
|||||||
| haproxy_server_max_connect_time_seconds |
|
| haproxy_server_max_connect_time_seconds |
|
||||||
| haproxy_server_max_response_time_seconds |
|
| haproxy_server_max_response_time_seconds |
|
||||||
| haproxy_server_max_total_time_seconds |
|
| haproxy_server_max_total_time_seconds |
|
||||||
| haproxy_server_agent_status |
|
|
||||||
| haproxy_server_agent_code |
|
|
||||||
| haproxy_server_agent_duration_seconds |
|
|
||||||
| haproxy_server_internal_errors_total |
|
| haproxy_server_internal_errors_total |
|
||||||
| haproxy_server_unsafe_idle_connections_current |
|
| haproxy_server_unsafe_idle_connections_current |
|
||||||
| haproxy_server_safe_idle_connections_current |
|
| haproxy_server_safe_idle_connections_current |
|
||||||
@ -408,24 +354,3 @@ listed below. Metrics from extra counters are not listed.
|
|||||||
| haproxy_sticktable_size |
|
| haproxy_sticktable_size |
|
||||||
| haproxy_sticktable_used |
|
| haproxy_sticktable_used |
|
||||||
+----------------------------------------------------+
|
+----------------------------------------------------+
|
||||||
|
|
||||||
* Resolvers metrics
|
|
||||||
|
|
||||||
+----------------------------------------------------+
|
|
||||||
| Metric name |
|
|
||||||
+----------------------------------------------------+
|
|
||||||
| haproxy_resolver_sent |
|
|
||||||
| haproxy_resolver_send_error |
|
|
||||||
| haproxy_resolver_valid |
|
|
||||||
| haproxy_resolver_update |
|
|
||||||
| haproxy_resolver_cname |
|
|
||||||
| haproxy_resolver_cname_error |
|
|
||||||
| haproxy_resolver_any_err |
|
|
||||||
| haproxy_resolver_nx |
|
|
||||||
| haproxy_resolver_timeout |
|
|
||||||
| haproxy_resolver_refused |
|
|
||||||
| haproxy_resolver_other |
|
|
||||||
| haproxy_resolver_invalid |
|
|
||||||
| haproxy_resolver_too_big |
|
|
||||||
| haproxy_resolver_outdated |
|
|
||||||
+----------------------------------------------------+
|
|
||||||
|
|||||||
@ -1,128 +0,0 @@
|
|||||||
/*
|
|
||||||
* include/promex/promex.h
|
|
||||||
* This file contains definitions, macros and inline functions dedicated to
|
|
||||||
* the prometheus exporter for HAProxy.
|
|
||||||
*
|
|
||||||
* Copyright 2024 Christopher Faulet <cfaulet@haproxy.com>
|
|
||||||
*
|
|
||||||
* This library is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU Lesser General Public
|
|
||||||
* License as published by the Free Software Foundation, version 2.1
|
|
||||||
* exclusively.
|
|
||||||
*
|
|
||||||
* This library is distributed in the hope that it will be useful,
|
|
||||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
||||||
* Lesser General Public License for more details.
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU Lesser General Public
|
|
||||||
* License along with this library; if not, write to the Free Software
|
|
||||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _PROMEX_PROMEX_H
|
|
||||||
#define _PROMEX_PROMEX_H
|
|
||||||
|
|
||||||
#include <import/ist.h>
|
|
||||||
|
|
||||||
#include <haproxy/api-t.h>
|
|
||||||
#include <haproxy/list-t.h>
|
|
||||||
|
|
||||||
#include <haproxy/stats.h>
|
|
||||||
|
|
||||||
/* Prometheus exporter flags (ctx->flags) */
|
|
||||||
#define PROMEX_FL_METRIC_HDR 0x00000001
|
|
||||||
#define PROMEX_FL_BODYLESS_RESP 0x00000002
|
|
||||||
/* unused: 0x00000004 */
|
|
||||||
/* unused: 0x00000008 */
|
|
||||||
/* unused: 0x00000010 */
|
|
||||||
/* unused: 0x00000020 */
|
|
||||||
#define PROMEX_FL_MODULE_METRIC 0x00000040
|
|
||||||
#define PROMEX_FL_SCOPE_GLOBAL 0x00000080
|
|
||||||
#define PROMEX_FL_SCOPE_FRONT 0x00000100
|
|
||||||
#define PROMEX_FL_SCOPE_BACK 0x00000200
|
|
||||||
#define PROMEX_FL_SCOPE_SERVER 0x00000400
|
|
||||||
#define PROMEX_FL_SCOPE_LI 0x00000800
|
|
||||||
#define PROMEX_FL_SCOPE_MODULE 0x00001000
|
|
||||||
#define PROMEX_FL_NO_MAINT_SRV 0x00002000
|
|
||||||
#define PROMEX_FL_EXTRA_COUNTERS 0x00004000
|
|
||||||
#define PROMEX_FL_INC_METRIC_BY_DEFAULT 0x00008000
|
|
||||||
#define PROMEX_FL_DESC_LABELS 0x00010000
|
|
||||||
|
|
||||||
#define PROMEX_FL_SCOPE_ALL (PROMEX_FL_SCOPE_GLOBAL | PROMEX_FL_SCOPE_FRONT | \
|
|
||||||
PROMEX_FL_SCOPE_LI | PROMEX_FL_SCOPE_BACK | \
|
|
||||||
PROMEX_FL_SCOPE_SERVER | PROMEX_FL_SCOPE_MODULE)
|
|
||||||
|
|
||||||
/* The max number of labels per metric */
|
|
||||||
#define PROMEX_MAX_LABELS 8
|
|
||||||
|
|
||||||
/* Promtheus metric type (gauge or counter) */
|
|
||||||
enum promex_mt_type {
|
|
||||||
PROMEX_MT_GAUGE = 1,
|
|
||||||
PROMEX_MT_COUNTER = 2,
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Describe a prometheus metric */
|
|
||||||
struct promex_metric {
|
|
||||||
struct ist n; /* The metric name */
|
|
||||||
enum promex_mt_type type; /* The metric type (gauge or counter) */
|
|
||||||
unsigned int flags; /* PROMEX_FL_* flags */
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Describe a prometheus metric label. It is just a key/value pair */
|
|
||||||
struct promex_label {
|
|
||||||
struct ist name;
|
|
||||||
struct ist value;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Entity used to expose custom metrics on HAProxy.
|
|
||||||
*
|
|
||||||
* * start_metric_dump(): It is an optional callback function. If defined, it
|
|
||||||
* is responsible to initialize the dump context use
|
|
||||||
* as the first restart point.
|
|
||||||
*
|
|
||||||
* * stop_metric_dump(): It is an optional callback function. If defined, it
|
|
||||||
* is responsible to deinit the dump context.
|
|
||||||
*
|
|
||||||
* * metric_info(): This one is mandatory. It returns the info about the
|
|
||||||
* metric: name, type and flags and description.
|
|
||||||
*
|
|
||||||
* * start_ts(): This one is mandatory, it initializes the context for a time
|
|
||||||
* series for a given metric. This context is the second
|
|
||||||
* restart point.
|
|
||||||
*
|
|
||||||
* * next_ts(): This one is mandatory. It iterates on time series for a
|
|
||||||
* given metrics. It is also responsible to handle end of a
|
|
||||||
* time series and deinit the context.
|
|
||||||
*
|
|
||||||
* * fill_ts(): It fills info on the time series for a given metric : the
|
|
||||||
* labels and the value.
|
|
||||||
*/
|
|
||||||
struct promex_module {
|
|
||||||
struct list list;
|
|
||||||
struct ist name; /* The promex module name */
|
|
||||||
int (*metric_info)(unsigned int id, /* Return info for the given id */
|
|
||||||
struct promex_metric *metric,
|
|
||||||
struct ist *desc);
|
|
||||||
void *(*start_metrics_dump)(); /* Start a dump (may be NULL) */
|
|
||||||
void (*stop_metrics_dump)(void *ctx); /* Stop a dump (may be NULL) */
|
|
||||||
void *(*start_ts)(void *ctx, unsigned int id); /* Start a time series for the given metric */
|
|
||||||
void *(*next_ts)(void *ctx, void *ts_ctx, unsigned int id); /* move to the next time series for the given metric */
|
|
||||||
int (*fill_ts)(void *ctx, void *ts_ctx, unsigned int id, /* fill the time series for the given metric */
|
|
||||||
struct promex_label *labels, struct field *field);
|
|
||||||
|
|
||||||
size_t nb_metrics; /* # of metrics */
|
|
||||||
};
|
|
||||||
|
|
||||||
extern struct list promex_module_list;
|
|
||||||
|
|
||||||
void promex_register_module(struct promex_module *m);
|
|
||||||
|
|
||||||
#endif /* _PROMEX_PROMEX_H */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Local variables:
|
|
||||||
* c-indent-level: 8
|
|
||||||
* c-basic-offset: 8
|
|
||||||
* End:
|
|
||||||
*/
|
|
||||||
File diff suppressed because it is too large
Load Diff
674
admin/acme.sh/LICENSE
Normal file
674
admin/acme.sh/LICENSE
Normal file
@ -0,0 +1,674 @@
|
|||||||
|
GNU GENERAL PUBLIC LICENSE
|
||||||
|
Version 3, 29 June 2007
|
||||||
|
|
||||||
|
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies
|
||||||
|
of this license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
Preamble
|
||||||
|
|
||||||
|
The GNU General Public License is a free, copyleft license for
|
||||||
|
software and other kinds of works.
|
||||||
|
|
||||||
|
The licenses for most software and other practical works are designed
|
||||||
|
to take away your freedom to share and change the works. By contrast,
|
||||||
|
the GNU General Public License is intended to guarantee your freedom to
|
||||||
|
share and change all versions of a program--to make sure it remains free
|
||||||
|
software for all its users. We, the Free Software Foundation, use the
|
||||||
|
GNU General Public License for most of our software; it applies also to
|
||||||
|
any other work released this way by its authors. You can apply it to
|
||||||
|
your programs, too.
|
||||||
|
|
||||||
|
When we speak of free software, we are referring to freedom, not
|
||||||
|
price. Our General Public Licenses are designed to make sure that you
|
||||||
|
have the freedom to distribute copies of free software (and charge for
|
||||||
|
them if you wish), that you receive source code or can get it if you
|
||||||
|
want it, that you can change the software or use pieces of it in new
|
||||||
|
free programs, and that you know you can do these things.
|
||||||
|
|
||||||
|
To protect your rights, we need to prevent others from denying you
|
||||||
|
these rights or asking you to surrender the rights. Therefore, you have
|
||||||
|
certain responsibilities if you distribute copies of the software, or if
|
||||||
|
you modify it: responsibilities to respect the freedom of others.
|
||||||
|
|
||||||
|
For example, if you distribute copies of such a program, whether
|
||||||
|
gratis or for a fee, you must pass on to the recipients the same
|
||||||
|
freedoms that you received. You must make sure that they, too, receive
|
||||||
|
or can get the source code. And you must show them these terms so they
|
||||||
|
know their rights.
|
||||||
|
|
||||||
|
Developers that use the GNU GPL protect your rights with two steps:
|
||||||
|
(1) assert copyright on the software, and (2) offer you this License
|
||||||
|
giving you legal permission to copy, distribute and/or modify it.
|
||||||
|
|
||||||
|
For the developers' and authors' protection, the GPL clearly explains
|
||||||
|
that there is no warranty for this free software. For both users' and
|
||||||
|
authors' sake, the GPL requires that modified versions be marked as
|
||||||
|
changed, so that their problems will not be attributed erroneously to
|
||||||
|
authors of previous versions.
|
||||||
|
|
||||||
|
Some devices are designed to deny users access to install or run
|
||||||
|
modified versions of the software inside them, although the manufacturer
|
||||||
|
can do so. This is fundamentally incompatible with the aim of
|
||||||
|
protecting users' freedom to change the software. The systematic
|
||||||
|
pattern of such abuse occurs in the area of products for individuals to
|
||||||
|
use, which is precisely where it is most unacceptable. Therefore, we
|
||||||
|
have designed this version of the GPL to prohibit the practice for those
|
||||||
|
products. If such problems arise substantially in other domains, we
|
||||||
|
stand ready to extend this provision to those domains in future versions
|
||||||
|
of the GPL, as needed to protect the freedom of users.
|
||||||
|
|
||||||
|
Finally, every program is threatened constantly by software patents.
|
||||||
|
States should not allow patents to restrict development and use of
|
||||||
|
software on general-purpose computers, but in those that do, we wish to
|
||||||
|
avoid the special danger that patents applied to a free program could
|
||||||
|
make it effectively proprietary. To prevent this, the GPL assures that
|
||||||
|
patents cannot be used to render the program non-free.
|
||||||
|
|
||||||
|
The precise terms and conditions for copying, distribution and
|
||||||
|
modification follow.
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
0. Definitions.
|
||||||
|
|
||||||
|
"This License" refers to version 3 of the GNU General Public License.
|
||||||
|
|
||||||
|
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||||
|
works, such as semiconductor masks.
|
||||||
|
|
||||||
|
"The Program" refers to any copyrightable work licensed under this
|
||||||
|
License. Each licensee is addressed as "you". "Licensees" and
|
||||||
|
"recipients" may be individuals or organizations.
|
||||||
|
|
||||||
|
To "modify" a work means to copy from or adapt all or part of the work
|
||||||
|
in a fashion requiring copyright permission, other than the making of an
|
||||||
|
exact copy. The resulting work is called a "modified version" of the
|
||||||
|
earlier work or a work "based on" the earlier work.
|
||||||
|
|
||||||
|
A "covered work" means either the unmodified Program or a work based
|
||||||
|
on the Program.
|
||||||
|
|
||||||
|
To "propagate" a work means to do anything with it that, without
|
||||||
|
permission, would make you directly or secondarily liable for
|
||||||
|
infringement under applicable copyright law, except executing it on a
|
||||||
|
computer or modifying a private copy. Propagation includes copying,
|
||||||
|
distribution (with or without modification), making available to the
|
||||||
|
public, and in some countries other activities as well.
|
||||||
|
|
||||||
|
To "convey" a work means any kind of propagation that enables other
|
||||||
|
parties to make or receive copies. Mere interaction with a user through
|
||||||
|
a computer network, with no transfer of a copy, is not conveying.
|
||||||
|
|
||||||
|
An interactive user interface displays "Appropriate Legal Notices"
|
||||||
|
to the extent that it includes a convenient and prominently visible
|
||||||
|
feature that (1) displays an appropriate copyright notice, and (2)
|
||||||
|
tells the user that there is no warranty for the work (except to the
|
||||||
|
extent that warranties are provided), that licensees may convey the
|
||||||
|
work under this License, and how to view a copy of this License. If
|
||||||
|
the interface presents a list of user commands or options, such as a
|
||||||
|
menu, a prominent item in the list meets this criterion.
|
||||||
|
|
||||||
|
1. Source Code.
|
||||||
|
|
||||||
|
The "source code" for a work means the preferred form of the work
|
||||||
|
for making modifications to it. "Object code" means any non-source
|
||||||
|
form of a work.
|
||||||
|
|
||||||
|
A "Standard Interface" means an interface that either is an official
|
||||||
|
standard defined by a recognized standards body, or, in the case of
|
||||||
|
interfaces specified for a particular programming language, one that
|
||||||
|
is widely used among developers working in that language.
|
||||||
|
|
||||||
|
The "System Libraries" of an executable work include anything, other
|
||||||
|
than the work as a whole, that (a) is included in the normal form of
|
||||||
|
packaging a Major Component, but which is not part of that Major
|
||||||
|
Component, and (b) serves only to enable use of the work with that
|
||||||
|
Major Component, or to implement a Standard Interface for which an
|
||||||
|
implementation is available to the public in source code form. A
|
||||||
|
"Major Component", in this context, means a major essential component
|
||||||
|
(kernel, window system, and so on) of the specific operating system
|
||||||
|
(if any) on which the executable work runs, or a compiler used to
|
||||||
|
produce the work, or an object code interpreter used to run it.
|
||||||
|
|
||||||
|
The "Corresponding Source" for a work in object code form means all
|
||||||
|
the source code needed to generate, install, and (for an executable
|
||||||
|
work) run the object code and to modify the work, including scripts to
|
||||||
|
control those activities. However, it does not include the work's
|
||||||
|
System Libraries, or general-purpose tools or generally available free
|
||||||
|
programs which are used unmodified in performing those activities but
|
||||||
|
which are not part of the work. For example, Corresponding Source
|
||||||
|
includes interface definition files associated with source files for
|
||||||
|
the work, and the source code for shared libraries and dynamically
|
||||||
|
linked subprograms that the work is specifically designed to require,
|
||||||
|
such as by intimate data communication or control flow between those
|
||||||
|
subprograms and other parts of the work.
|
||||||
|
|
||||||
|
The Corresponding Source need not include anything that users
|
||||||
|
can regenerate automatically from other parts of the Corresponding
|
||||||
|
Source.
|
||||||
|
|
||||||
|
The Corresponding Source for a work in source code form is that
|
||||||
|
same work.
|
||||||
|
|
||||||
|
2. Basic Permissions.
|
||||||
|
|
||||||
|
All rights granted under this License are granted for the term of
|
||||||
|
copyright on the Program, and are irrevocable provided the stated
|
||||||
|
conditions are met. This License explicitly affirms your unlimited
|
||||||
|
permission to run the unmodified Program. The output from running a
|
||||||
|
covered work is covered by this License only if the output, given its
|
||||||
|
content, constitutes a covered work. This License acknowledges your
|
||||||
|
rights of fair use or other equivalent, as provided by copyright law.
|
||||||
|
|
||||||
|
You may make, run and propagate covered works that you do not
|
||||||
|
convey, without conditions so long as your license otherwise remains
|
||||||
|
in force. You may convey covered works to others for the sole purpose
|
||||||
|
of having them make modifications exclusively for you, or provide you
|
||||||
|
with facilities for running those works, provided that you comply with
|
||||||
|
the terms of this License in conveying all material for which you do
|
||||||
|
not control copyright. Those thus making or running the covered works
|
||||||
|
for you must do so exclusively on your behalf, under your direction
|
||||||
|
and control, on terms that prohibit them from making any copies of
|
||||||
|
your copyrighted material outside their relationship with you.
|
||||||
|
|
||||||
|
Conveying under any other circumstances is permitted solely under
|
||||||
|
the conditions stated below. Sublicensing is not allowed; section 10
|
||||||
|
makes it unnecessary.
|
||||||
|
|
||||||
|
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||||
|
|
||||||
|
No covered work shall be deemed part of an effective technological
|
||||||
|
measure under any applicable law fulfilling obligations under article
|
||||||
|
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||||
|
similar laws prohibiting or restricting circumvention of such
|
||||||
|
measures.
|
||||||
|
|
||||||
|
When you convey a covered work, you waive any legal power to forbid
|
||||||
|
circumvention of technological measures to the extent such circumvention
|
||||||
|
is effected by exercising rights under this License with respect to
|
||||||
|
the covered work, and you disclaim any intention to limit operation or
|
||||||
|
modification of the work as a means of enforcing, against the work's
|
||||||
|
users, your or third parties' legal rights to forbid circumvention of
|
||||||
|
technological measures.
|
||||||
|
|
||||||
|
4. Conveying Verbatim Copies.
|
||||||
|
|
||||||
|
You may convey verbatim copies of the Program's source code as you
|
||||||
|
receive it, in any medium, provided that you conspicuously and
|
||||||
|
appropriately publish on each copy an appropriate copyright notice;
|
||||||
|
keep intact all notices stating that this License and any
|
||||||
|
non-permissive terms added in accord with section 7 apply to the code;
|
||||||
|
keep intact all notices of the absence of any warranty; and give all
|
||||||
|
recipients a copy of this License along with the Program.
|
||||||
|
|
||||||
|
You may charge any price or no price for each copy that you convey,
|
||||||
|
and you may offer support or warranty protection for a fee.
|
||||||
|
|
||||||
|
5. Conveying Modified Source Versions.
|
||||||
|
|
||||||
|
You may convey a work based on the Program, or the modifications to
|
||||||
|
produce it from the Program, in the form of source code under the
|
||||||
|
terms of section 4, provided that you also meet all of these conditions:
|
||||||
|
|
||||||
|
a) The work must carry prominent notices stating that you modified
|
||||||
|
it, and giving a relevant date.
|
||||||
|
|
||||||
|
b) The work must carry prominent notices stating that it is
|
||||||
|
released under this License and any conditions added under section
|
||||||
|
7. This requirement modifies the requirement in section 4 to
|
||||||
|
"keep intact all notices".
|
||||||
|
|
||||||
|
c) You must license the entire work, as a whole, under this
|
||||||
|
License to anyone who comes into possession of a copy. This
|
||||||
|
License will therefore apply, along with any applicable section 7
|
||||||
|
additional terms, to the whole of the work, and all its parts,
|
||||||
|
regardless of how they are packaged. This License gives no
|
||||||
|
permission to license the work in any other way, but it does not
|
||||||
|
invalidate such permission if you have separately received it.
|
||||||
|
|
||||||
|
d) If the work has interactive user interfaces, each must display
|
||||||
|
Appropriate Legal Notices; however, if the Program has interactive
|
||||||
|
interfaces that do not display Appropriate Legal Notices, your
|
||||||
|
work need not make them do so.
|
||||||
|
|
||||||
|
A compilation of a covered work with other separate and independent
|
||||||
|
works, which are not by their nature extensions of the covered work,
|
||||||
|
and which are not combined with it such as to form a larger program,
|
||||||
|
in or on a volume of a storage or distribution medium, is called an
|
||||||
|
"aggregate" if the compilation and its resulting copyright are not
|
||||||
|
used to limit the access or legal rights of the compilation's users
|
||||||
|
beyond what the individual works permit. Inclusion of a covered work
|
||||||
|
in an aggregate does not cause this License to apply to the other
|
||||||
|
parts of the aggregate.
|
||||||
|
|
||||||
|
6. Conveying Non-Source Forms.
|
||||||
|
|
||||||
|
You may convey a covered work in object code form under the terms
|
||||||
|
of sections 4 and 5, provided that you also convey the
|
||||||
|
machine-readable Corresponding Source under the terms of this License,
|
||||||
|
in one of these ways:
|
||||||
|
|
||||||
|
a) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by the
|
||||||
|
Corresponding Source fixed on a durable physical medium
|
||||||
|
customarily used for software interchange.
|
||||||
|
|
||||||
|
b) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by a
|
||||||
|
written offer, valid for at least three years and valid for as
|
||||||
|
long as you offer spare parts or customer support for that product
|
||||||
|
model, to give anyone who possesses the object code either (1) a
|
||||||
|
copy of the Corresponding Source for all the software in the
|
||||||
|
product that is covered by this License, on a durable physical
|
||||||
|
medium customarily used for software interchange, for a price no
|
||||||
|
more than your reasonable cost of physically performing this
|
||||||
|
conveying of source, or (2) access to copy the
|
||||||
|
Corresponding Source from a network server at no charge.
|
||||||
|
|
||||||
|
c) Convey individual copies of the object code with a copy of the
|
||||||
|
written offer to provide the Corresponding Source. This
|
||||||
|
alternative is allowed only occasionally and noncommercially, and
|
||||||
|
only if you received the object code with such an offer, in accord
|
||||||
|
with subsection 6b.
|
||||||
|
|
||||||
|
d) Convey the object code by offering access from a designated
|
||||||
|
place (gratis or for a charge), and offer equivalent access to the
|
||||||
|
Corresponding Source in the same way through the same place at no
|
||||||
|
further charge. You need not require recipients to copy the
|
||||||
|
Corresponding Source along with the object code. If the place to
|
||||||
|
copy the object code is a network server, the Corresponding Source
|
||||||
|
may be on a different server (operated by you or a third party)
|
||||||
|
that supports equivalent copying facilities, provided you maintain
|
||||||
|
clear directions next to the object code saying where to find the
|
||||||
|
Corresponding Source. Regardless of what server hosts the
|
||||||
|
Corresponding Source, you remain obligated to ensure that it is
|
||||||
|
available for as long as needed to satisfy these requirements.
|
||||||
|
|
||||||
|
e) Convey the object code using peer-to-peer transmission, provided
|
||||||
|
you inform other peers where the object code and Corresponding
|
||||||
|
Source of the work are being offered to the general public at no
|
||||||
|
charge under subsection 6d.
|
||||||
|
|
||||||
|
A separable portion of the object code, whose source code is excluded
|
||||||
|
from the Corresponding Source as a System Library, need not be
|
||||||
|
included in conveying the object code work.
|
||||||
|
|
||||||
|
A "User Product" is either (1) a "consumer product", which means any
|
||||||
|
tangible personal property which is normally used for personal, family,
|
||||||
|
or household purposes, or (2) anything designed or sold for incorporation
|
||||||
|
into a dwelling. In determining whether a product is a consumer product,
|
||||||
|
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||||
|
product received by a particular user, "normally used" refers to a
|
||||||
|
typical or common use of that class of product, regardless of the status
|
||||||
|
of the particular user or of the way in which the particular user
|
||||||
|
actually uses, or expects or is expected to use, the product. A product
|
||||||
|
is a consumer product regardless of whether the product has substantial
|
||||||
|
commercial, industrial or non-consumer uses, unless such uses represent
|
||||||
|
the only significant mode of use of the product.
|
||||||
|
|
||||||
|
"Installation Information" for a User Product means any methods,
|
||||||
|
procedures, authorization keys, or other information required to install
|
||||||
|
and execute modified versions of a covered work in that User Product from
|
||||||
|
a modified version of its Corresponding Source. The information must
|
||||||
|
suffice to ensure that the continued functioning of the modified object
|
||||||
|
code is in no case prevented or interfered with solely because
|
||||||
|
modification has been made.
|
||||||
|
|
||||||
|
If you convey an object code work under this section in, or with, or
|
||||||
|
specifically for use in, a User Product, and the conveying occurs as
|
||||||
|
part of a transaction in which the right of possession and use of the
|
||||||
|
User Product is transferred to the recipient in perpetuity or for a
|
||||||
|
fixed term (regardless of how the transaction is characterized), the
|
||||||
|
Corresponding Source conveyed under this section must be accompanied
|
||||||
|
by the Installation Information. But this requirement does not apply
|
||||||
|
if neither you nor any third party retains the ability to install
|
||||||
|
modified object code on the User Product (for example, the work has
|
||||||
|
been installed in ROM).
|
||||||
|
|
||||||
|
The requirement to provide Installation Information does not include a
|
||||||
|
requirement to continue to provide support service, warranty, or updates
|
||||||
|
for a work that has been modified or installed by the recipient, or for
|
||||||
|
the User Product in which it has been modified or installed. Access to a
|
||||||
|
network may be denied when the modification itself materially and
|
||||||
|
adversely affects the operation of the network or violates the rules and
|
||||||
|
protocols for communication across the network.
|
||||||
|
|
||||||
|
Corresponding Source conveyed, and Installation Information provided,
|
||||||
|
in accord with this section must be in a format that is publicly
|
||||||
|
documented (and with an implementation available to the public in
|
||||||
|
source code form), and must require no special password or key for
|
||||||
|
unpacking, reading or copying.
|
||||||
|
|
||||||
|
7. Additional Terms.
|
||||||
|
|
||||||
|
"Additional permissions" are terms that supplement the terms of this
|
||||||
|
License by making exceptions from one or more of its conditions.
|
||||||
|
Additional permissions that are applicable to the entire Program shall
|
||||||
|
be treated as though they were included in this License, to the extent
|
||||||
|
that they are valid under applicable law. If additional permissions
|
||||||
|
apply only to part of the Program, that part may be used separately
|
||||||
|
under those permissions, but the entire Program remains governed by
|
||||||
|
this License without regard to the additional permissions.
|
||||||
|
|
||||||
|
When you convey a copy of a covered work, you may at your option
|
||||||
|
remove any additional permissions from that copy, or from any part of
|
||||||
|
it. (Additional permissions may be written to require their own
|
||||||
|
removal in certain cases when you modify the work.) You may place
|
||||||
|
additional permissions on material, added by you to a covered work,
|
||||||
|
for which you have or can give appropriate copyright permission.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, for material you
|
||||||
|
add to a covered work, you may (if authorized by the copyright holders of
|
||||||
|
that material) supplement the terms of this License with terms:
|
||||||
|
|
||||||
|
a) Disclaiming warranty or limiting liability differently from the
|
||||||
|
terms of sections 15 and 16 of this License; or
|
||||||
|
|
||||||
|
b) Requiring preservation of specified reasonable legal notices or
|
||||||
|
author attributions in that material or in the Appropriate Legal
|
||||||
|
Notices displayed by works containing it; or
|
||||||
|
|
||||||
|
c) Prohibiting misrepresentation of the origin of that material, or
|
||||||
|
requiring that modified versions of such material be marked in
|
||||||
|
reasonable ways as different from the original version; or
|
||||||
|
|
||||||
|
d) Limiting the use for publicity purposes of names of licensors or
|
||||||
|
authors of the material; or
|
||||||
|
|
||||||
|
e) Declining to grant rights under trademark law for use of some
|
||||||
|
trade names, trademarks, or service marks; or
|
||||||
|
|
||||||
|
f) Requiring indemnification of licensors and authors of that
|
||||||
|
material by anyone who conveys the material (or modified versions of
|
||||||
|
it) with contractual assumptions of liability to the recipient, for
|
||||||
|
any liability that these contractual assumptions directly impose on
|
||||||
|
those licensors and authors.
|
||||||
|
|
||||||
|
All other non-permissive additional terms are considered "further
|
||||||
|
restrictions" within the meaning of section 10. If the Program as you
|
||||||
|
received it, or any part of it, contains a notice stating that it is
|
||||||
|
governed by this License along with a term that is a further
|
||||||
|
restriction, you may remove that term. If a license document contains
|
||||||
|
a further restriction but permits relicensing or conveying under this
|
||||||
|
License, you may add to a covered work material governed by the terms
|
||||||
|
of that license document, provided that the further restriction does
|
||||||
|
not survive such relicensing or conveying.
|
||||||
|
|
||||||
|
If you add terms to a covered work in accord with this section, you
|
||||||
|
must place, in the relevant source files, a statement of the
|
||||||
|
additional terms that apply to those files, or a notice indicating
|
||||||
|
where to find the applicable terms.
|
||||||
|
|
||||||
|
Additional terms, permissive or non-permissive, may be stated in the
|
||||||
|
form of a separately written license, or stated as exceptions;
|
||||||
|
the above requirements apply either way.
|
||||||
|
|
||||||
|
8. Termination.
|
||||||
|
|
||||||
|
You may not propagate or modify a covered work except as expressly
|
||||||
|
provided under this License. Any attempt otherwise to propagate or
|
||||||
|
modify it is void, and will automatically terminate your rights under
|
||||||
|
this License (including any patent licenses granted under the third
|
||||||
|
paragraph of section 11).
|
||||||
|
|
||||||
|
However, if you cease all violation of this License, then your
|
||||||
|
license from a particular copyright holder is reinstated (a)
|
||||||
|
provisionally, unless and until the copyright holder explicitly and
|
||||||
|
finally terminates your license, and (b) permanently, if the copyright
|
||||||
|
holder fails to notify you of the violation by some reasonable means
|
||||||
|
prior to 60 days after the cessation.
|
||||||
|
|
||||||
|
Moreover, your license from a particular copyright holder is
|
||||||
|
reinstated permanently if the copyright holder notifies you of the
|
||||||
|
violation by some reasonable means, this is the first time you have
|
||||||
|
received notice of violation of this License (for any work) from that
|
||||||
|
copyright holder, and you cure the violation prior to 30 days after
|
||||||
|
your receipt of the notice.
|
||||||
|
|
||||||
|
Termination of your rights under this section does not terminate the
|
||||||
|
licenses of parties who have received copies or rights from you under
|
||||||
|
this License. If your rights have been terminated and not permanently
|
||||||
|
reinstated, you do not qualify to receive new licenses for the same
|
||||||
|
material under section 10.
|
||||||
|
|
||||||
|
9. Acceptance Not Required for Having Copies.
|
||||||
|
|
||||||
|
You are not required to accept this License in order to receive or
|
||||||
|
run a copy of the Program. Ancillary propagation of a covered work
|
||||||
|
occurring solely as a consequence of using peer-to-peer transmission
|
||||||
|
to receive a copy likewise does not require acceptance. However,
|
||||||
|
nothing other than this License grants you permission to propagate or
|
||||||
|
modify any covered work. These actions infringe copyright if you do
|
||||||
|
not accept this License. Therefore, by modifying or propagating a
|
||||||
|
covered work, you indicate your acceptance of this License to do so.
|
||||||
|
|
||||||
|
10. Automatic Licensing of Downstream Recipients.
|
||||||
|
|
||||||
|
Each time you convey a covered work, the recipient automatically
|
||||||
|
receives a license from the original licensors, to run, modify and
|
||||||
|
propagate that work, subject to this License. You are not responsible
|
||||||
|
for enforcing compliance by third parties with this License.
|
||||||
|
|
||||||
|
An "entity transaction" is a transaction transferring control of an
|
||||||
|
organization, or substantially all assets of one, or subdividing an
|
||||||
|
organization, or merging organizations. If propagation of a covered
|
||||||
|
work results from an entity transaction, each party to that
|
||||||
|
transaction who receives a copy of the work also receives whatever
|
||||||
|
licenses to the work the party's predecessor in interest had or could
|
||||||
|
give under the previous paragraph, plus a right to possession of the
|
||||||
|
Corresponding Source of the work from the predecessor in interest, if
|
||||||
|
the predecessor has it or can get it with reasonable efforts.
|
||||||
|
|
||||||
|
You may not impose any further restrictions on the exercise of the
|
||||||
|
rights granted or affirmed under this License. For example, you may
|
||||||
|
not impose a license fee, royalty, or other charge for exercise of
|
||||||
|
rights granted under this License, and you may not initiate litigation
|
||||||
|
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||||
|
any patent claim is infringed by making, using, selling, offering for
|
||||||
|
sale, or importing the Program or any portion of it.
|
||||||
|
|
||||||
|
11. Patents.
|
||||||
|
|
||||||
|
A "contributor" is a copyright holder who authorizes use under this
|
||||||
|
License of the Program or a work on which the Program is based. The
|
||||||
|
work thus licensed is called the contributor's "contributor version".
|
||||||
|
|
||||||
|
A contributor's "essential patent claims" are all patent claims
|
||||||
|
owned or controlled by the contributor, whether already acquired or
|
||||||
|
hereafter acquired, that would be infringed by some manner, permitted
|
||||||
|
by this License, of making, using, or selling its contributor version,
|
||||||
|
but do not include claims that would be infringed only as a
|
||||||
|
consequence of further modification of the contributor version. For
|
||||||
|
purposes of this definition, "control" includes the right to grant
|
||||||
|
patent sublicenses in a manner consistent with the requirements of
|
||||||
|
this License.
|
||||||
|
|
||||||
|
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||||
|
patent license under the contributor's essential patent claims, to
|
||||||
|
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||||
|
propagate the contents of its contributor version.
|
||||||
|
|
||||||
|
In the following three paragraphs, a "patent license" is any express
|
||||||
|
agreement or commitment, however denominated, not to enforce a patent
|
||||||
|
(such as an express permission to practice a patent or covenant not to
|
||||||
|
sue for patent infringement). To "grant" such a patent license to a
|
||||||
|
party means to make such an agreement or commitment not to enforce a
|
||||||
|
patent against the party.
|
||||||
|
|
||||||
|
If you convey a covered work, knowingly relying on a patent license,
|
||||||
|
and the Corresponding Source of the work is not available for anyone
|
||||||
|
to copy, free of charge and under the terms of this License, through a
|
||||||
|
publicly available network server or other readily accessible means,
|
||||||
|
then you must either (1) cause the Corresponding Source to be so
|
||||||
|
available, or (2) arrange to deprive yourself of the benefit of the
|
||||||
|
patent license for this particular work, or (3) arrange, in a manner
|
||||||
|
consistent with the requirements of this License, to extend the patent
|
||||||
|
license to downstream recipients. "Knowingly relying" means you have
|
||||||
|
actual knowledge that, but for the patent license, your conveying the
|
||||||
|
covered work in a country, or your recipient's use of the covered work
|
||||||
|
in a country, would infringe one or more identifiable patents in that
|
||||||
|
country that you have reason to believe are valid.
|
||||||
|
|
||||||
|
If, pursuant to or in connection with a single transaction or
|
||||||
|
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||||
|
covered work, and grant a patent license to some of the parties
|
||||||
|
receiving the covered work authorizing them to use, propagate, modify
|
||||||
|
or convey a specific copy of the covered work, then the patent license
|
||||||
|
you grant is automatically extended to all recipients of the covered
|
||||||
|
work and works based on it.
|
||||||
|
|
||||||
|
A patent license is "discriminatory" if it does not include within
|
||||||
|
the scope of its coverage, prohibits the exercise of, or is
|
||||||
|
conditioned on the non-exercise of one or more of the rights that are
|
||||||
|
specifically granted under this License. You may not convey a covered
|
||||||
|
work if you are a party to an arrangement with a third party that is
|
||||||
|
in the business of distributing software, under which you make payment
|
||||||
|
to the third party based on the extent of your activity of conveying
|
||||||
|
the work, and under which the third party grants, to any of the
|
||||||
|
parties who would receive the covered work from you, a discriminatory
|
||||||
|
patent license (a) in connection with copies of the covered work
|
||||||
|
conveyed by you (or copies made from those copies), or (b) primarily
|
||||||
|
for and in connection with specific products or compilations that
|
||||||
|
contain the covered work, unless you entered into that arrangement,
|
||||||
|
or that patent license was granted, prior to 28 March 2007.
|
||||||
|
|
||||||
|
Nothing in this License shall be construed as excluding or limiting
|
||||||
|
any implied license or other defenses to infringement that may
|
||||||
|
otherwise be available to you under applicable patent law.
|
||||||
|
|
||||||
|
12. No Surrender of Others' Freedom.
|
||||||
|
|
||||||
|
If conditions are imposed on you (whether by court order, agreement or
|
||||||
|
otherwise) that contradict the conditions of this License, they do not
|
||||||
|
excuse you from the conditions of this License. If you cannot convey a
|
||||||
|
covered work so as to satisfy simultaneously your obligations under this
|
||||||
|
License and any other pertinent obligations, then as a consequence you may
|
||||||
|
not convey it at all. For example, if you agree to terms that obligate you
|
||||||
|
to collect a royalty for further conveying from those to whom you convey
|
||||||
|
the Program, the only way you could satisfy both those terms and this
|
||||||
|
License would be to refrain entirely from conveying the Program.
|
||||||
|
|
||||||
|
13. Use with the GNU Affero General Public License.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, you have
|
||||||
|
permission to link or combine any covered work with a work licensed
|
||||||
|
under version 3 of the GNU Affero General Public License into a single
|
||||||
|
combined work, and to convey the resulting work. The terms of this
|
||||||
|
License will continue to apply to the part which is the covered work,
|
||||||
|
but the special requirements of the GNU Affero General Public License,
|
||||||
|
section 13, concerning interaction through a network will apply to the
|
||||||
|
combination as such.
|
||||||
|
|
||||||
|
14. Revised Versions of this License.
|
||||||
|
|
||||||
|
The Free Software Foundation may publish revised and/or new versions of
|
||||||
|
the GNU General Public License from time to time. Such new versions will
|
||||||
|
be similar in spirit to the present version, but may differ in detail to
|
||||||
|
address new problems or concerns.
|
||||||
|
|
||||||
|
Each version is given a distinguishing version number. If the
|
||||||
|
Program specifies that a certain numbered version of the GNU General
|
||||||
|
Public License "or any later version" applies to it, you have the
|
||||||
|
option of following the terms and conditions either of that numbered
|
||||||
|
version or of any later version published by the Free Software
|
||||||
|
Foundation. If the Program does not specify a version number of the
|
||||||
|
GNU General Public License, you may choose any version ever published
|
||||||
|
by the Free Software Foundation.
|
||||||
|
|
||||||
|
If the Program specifies that a proxy can decide which future
|
||||||
|
versions of the GNU General Public License can be used, that proxy's
|
||||||
|
public statement of acceptance of a version permanently authorizes you
|
||||||
|
to choose that version for the Program.
|
||||||
|
|
||||||
|
Later license versions may give you additional or different
|
||||||
|
permissions. However, no additional obligations are imposed on any
|
||||||
|
author or copyright holder as a result of your choosing to follow a
|
||||||
|
later version.
|
||||||
|
|
||||||
|
15. Disclaimer of Warranty.
|
||||||
|
|
||||||
|
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||||
|
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||||
|
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||||
|
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||||
|
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||||
|
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||||
|
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||||
|
|
||||||
|
16. Limitation of Liability.
|
||||||
|
|
||||||
|
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||||
|
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||||
|
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||||
|
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||||
|
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||||
|
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||||
|
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||||
|
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||||
|
SUCH DAMAGES.
|
||||||
|
|
||||||
|
17. Interpretation of Sections 15 and 16.
|
||||||
|
|
||||||
|
If the disclaimer of warranty and limitation of liability provided
|
||||||
|
above cannot be given local legal effect according to their terms,
|
||||||
|
reviewing courts shall apply local law that most closely approximates
|
||||||
|
an absolute waiver of all civil liability in connection with the
|
||||||
|
Program, unless a warranty or assumption of liability accompanies a
|
||||||
|
copy of the Program in return for a fee.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
How to Apply These Terms to Your New Programs
|
||||||
|
|
||||||
|
If you develop a new program, and you want it to be of the greatest
|
||||||
|
possible use to the public, the best way to achieve this is to make it
|
||||||
|
free software which everyone can redistribute and change under these terms.
|
||||||
|
|
||||||
|
To do so, attach the following notices to the program. It is safest
|
||||||
|
to attach them to the start of each source file to most effectively
|
||||||
|
state the exclusion of warranty; and each file should have at least
|
||||||
|
the "copyright" line and a pointer to where the full notice is found.
|
||||||
|
|
||||||
|
<one line to give the program's name and a brief idea of what it does.>
|
||||||
|
Copyright (C) <year> <name of author>
|
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation, either version 3 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
Also add information on how to contact you by electronic and paper mail.
|
||||||
|
|
||||||
|
If the program does terminal interaction, make it output a short
|
||||||
|
notice like this when it starts in an interactive mode:
|
||||||
|
|
||||||
|
<program> Copyright (C) <year> <name of author>
|
||||||
|
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||||
|
This is free software, and you are welcome to redistribute it
|
||||||
|
under certain conditions; type `show c' for details.
|
||||||
|
|
||||||
|
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||||
|
parts of the General Public License. Of course, your program's commands
|
||||||
|
might be different; for a GUI interface, you would use an "about box".
|
||||||
|
|
||||||
|
You should also get your employer (if you work as a programmer) or school,
|
||||||
|
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||||
|
For more information on this, and how to apply and follow the GNU GPL, see
|
||||||
|
<https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
The GNU General Public License does not permit incorporating your program
|
||||||
|
into proprietary programs. If your program is a subroutine library, you
|
||||||
|
may consider it more useful to permit linking proprietary applications with
|
||||||
|
the library. If this is what you want to do, use the GNU Lesser General
|
||||||
|
Public License instead of this License. But first, please read
|
||||||
|
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
||||||
13
admin/acme.sh/README
Normal file
13
admin/acme.sh/README
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
This directory contains a fork of the acme.sh deploy script for haproxy which
|
||||||
|
allow acme.sh to run as non-root and don't require to reload haproxy.
|
||||||
|
|
||||||
|
The content of this directory is licensed under GPLv3 as explained in the
|
||||||
|
LICENSE file.
|
||||||
|
|
||||||
|
This was originally written for this pull request
|
||||||
|
https://github.com/acmesh-official/acme.sh/pull/4581.
|
||||||
|
|
||||||
|
The documentation is available on the haproxy wiki:
|
||||||
|
https://github.com/haproxy/wiki/wiki/Letsencrypt-integration-with-HAProxy-and-acme.sh
|
||||||
|
|
||||||
|
The haproxy.sh script must replace the one provided by acme.sh.
|
||||||
403
admin/acme.sh/haproxy.sh
Normal file
403
admin/acme.sh/haproxy.sh
Normal file
@ -0,0 +1,403 @@
|
|||||||
|
#!/usr/bin/env sh
|
||||||
|
|
||||||
|
# Script for acme.sh to deploy certificates to haproxy
|
||||||
|
#
|
||||||
|
# The following variables can be exported:
|
||||||
|
#
|
||||||
|
# export DEPLOY_HAPROXY_PEM_NAME="${domain}.pem"
|
||||||
|
#
|
||||||
|
# Defines the name of the PEM file.
|
||||||
|
# Defaults to "<domain>.pem"
|
||||||
|
#
|
||||||
|
# export DEPLOY_HAPROXY_PEM_PATH="/etc/haproxy"
|
||||||
|
#
|
||||||
|
# Defines location of PEM file for HAProxy.
|
||||||
|
# Defaults to /etc/haproxy
|
||||||
|
#
|
||||||
|
# export DEPLOY_HAPROXY_RELOAD="systemctl reload haproxy"
|
||||||
|
#
|
||||||
|
# OPTIONAL: Reload command used post deploy
|
||||||
|
# This defaults to be a no-op (ie "true").
|
||||||
|
# It is strongly recommended to set this something that makes sense
|
||||||
|
# for your distro.
|
||||||
|
#
|
||||||
|
# export DEPLOY_HAPROXY_ISSUER="no"
|
||||||
|
#
|
||||||
|
# OPTIONAL: Places CA file as "${DEPLOY_HAPROXY_PEM}.issuer"
|
||||||
|
# Note: Required for OCSP stapling to work
|
||||||
|
#
|
||||||
|
# export DEPLOY_HAPROXY_BUNDLE="no"
|
||||||
|
#
|
||||||
|
# OPTIONAL: Deploy this certificate as part of a multi-cert bundle
|
||||||
|
# This adds a suffix to the certificate based on the certificate type
|
||||||
|
# eg RSA certificates will have .rsa as a suffix to the file name
|
||||||
|
# HAProxy will load all certificates and provide one or the other
|
||||||
|
# depending on client capabilities
|
||||||
|
# Note: This functionality requires HAProxy was compiled against
|
||||||
|
# a version of OpenSSL that supports this.
|
||||||
|
#
|
||||||
|
# export DEPLOY_HAPROXY_HOT_UPDATE="yes"
|
||||||
|
# export DEPLOY_HAPROXY_STATS_SOCKET="UNIX:/run/haproxy/admin.sock"
|
||||||
|
#
|
||||||
|
# OPTIONAL: Deploy the certificate over the HAProxy stats socket without
|
||||||
|
# needing to reload HAProxy. Default is "no".
|
||||||
|
#
|
||||||
|
# Require the socat binary. DEPLOY_HAPROXY_STATS_SOCKET variable uses the socat
|
||||||
|
# address format.
|
||||||
|
#
|
||||||
|
# export DEPLOY_HAPROXY_MASTER_CLI="UNIX:/run/haproxy-master.sock"
|
||||||
|
#
|
||||||
|
# OPTIONAL: To use the master CLI with DEPLOY_HAPROXY_HOT_UPDATE="yes" instead
|
||||||
|
# of a stats socket, use this variable.
|
||||||
|
|
||||||
|
######## Public functions #####################
|
||||||
|
|
||||||
|
#domain keyfile certfile cafile fullchain
|
||||||
|
haproxy_deploy() {
|
||||||
|
_cdomain="$1"
|
||||||
|
_ckey="$2"
|
||||||
|
_ccert="$3"
|
||||||
|
_cca="$4"
|
||||||
|
_cfullchain="$5"
|
||||||
|
_cmdpfx=""
|
||||||
|
|
||||||
|
# Some defaults
|
||||||
|
DEPLOY_HAPROXY_PEM_PATH_DEFAULT="/etc/haproxy"
|
||||||
|
DEPLOY_HAPROXY_PEM_NAME_DEFAULT="${_cdomain}.pem"
|
||||||
|
DEPLOY_HAPROXY_BUNDLE_DEFAULT="no"
|
||||||
|
DEPLOY_HAPROXY_ISSUER_DEFAULT="no"
|
||||||
|
DEPLOY_HAPROXY_RELOAD_DEFAULT="true"
|
||||||
|
DEPLOY_HAPROXY_HOT_UPDATE_DEFAULT="no"
|
||||||
|
DEPLOY_HAPROXY_STATS_SOCKET_DEFAULT="UNIX:/run/haproxy/admin.sock"
|
||||||
|
|
||||||
|
_debug _cdomain "${_cdomain}"
|
||||||
|
_debug _ckey "${_ckey}"
|
||||||
|
_debug _ccert "${_ccert}"
|
||||||
|
_debug _cca "${_cca}"
|
||||||
|
_debug _cfullchain "${_cfullchain}"
|
||||||
|
|
||||||
|
# PEM_PATH is optional. If not provided then assume "${DEPLOY_HAPROXY_PEM_PATH_DEFAULT}"
|
||||||
|
_getdeployconf DEPLOY_HAPROXY_PEM_PATH
|
||||||
|
_debug2 DEPLOY_HAPROXY_PEM_PATH "${DEPLOY_HAPROXY_PEM_PATH}"
|
||||||
|
if [ -n "${DEPLOY_HAPROXY_PEM_PATH}" ]; then
|
||||||
|
Le_Deploy_haproxy_pem_path="${DEPLOY_HAPROXY_PEM_PATH}"
|
||||||
|
_savedomainconf Le_Deploy_haproxy_pem_path "${Le_Deploy_haproxy_pem_path}"
|
||||||
|
elif [ -z "${Le_Deploy_haproxy_pem_path}" ]; then
|
||||||
|
Le_Deploy_haproxy_pem_path="${DEPLOY_HAPROXY_PEM_PATH_DEFAULT}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Ensure PEM_PATH exists
|
||||||
|
if [ -d "${Le_Deploy_haproxy_pem_path}" ]; then
|
||||||
|
_debug "PEM_PATH ${Le_Deploy_haproxy_pem_path} exists"
|
||||||
|
else
|
||||||
|
_err "PEM_PATH ${Le_Deploy_haproxy_pem_path} does not exist"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# PEM_NAME is optional. If not provided then assume "${DEPLOY_HAPROXY_PEM_NAME_DEFAULT}"
|
||||||
|
_getdeployconf DEPLOY_HAPROXY_PEM_NAME
|
||||||
|
_debug2 DEPLOY_HAPROXY_PEM_NAME "${DEPLOY_HAPROXY_PEM_NAME}"
|
||||||
|
if [ -n "${DEPLOY_HAPROXY_PEM_NAME}" ]; then
|
||||||
|
Le_Deploy_haproxy_pem_name="${DEPLOY_HAPROXY_PEM_NAME}"
|
||||||
|
_savedomainconf Le_Deploy_haproxy_pem_name "${Le_Deploy_haproxy_pem_name}"
|
||||||
|
elif [ -z "${Le_Deploy_haproxy_pem_name}" ]; then
|
||||||
|
Le_Deploy_haproxy_pem_name="${DEPLOY_HAPROXY_PEM_NAME_DEFAULT}"
|
||||||
|
# We better not have '*' as the first character
|
||||||
|
if [ "${Le_Deploy_haproxy_pem_name%%"${Le_Deploy_haproxy_pem_name#?}"}" = '*' ]; then
|
||||||
|
# removes the first characters and add a _ instead
|
||||||
|
Le_Deploy_haproxy_pem_name="_${Le_Deploy_haproxy_pem_name#?}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# BUNDLE is optional. If not provided then assume "${DEPLOY_HAPROXY_BUNDLE_DEFAULT}"
|
||||||
|
_getdeployconf DEPLOY_HAPROXY_BUNDLE
|
||||||
|
_debug2 DEPLOY_HAPROXY_BUNDLE "${DEPLOY_HAPROXY_BUNDLE}"
|
||||||
|
if [ -n "${DEPLOY_HAPROXY_BUNDLE}" ]; then
|
||||||
|
Le_Deploy_haproxy_bundle="${DEPLOY_HAPROXY_BUNDLE}"
|
||||||
|
_savedomainconf Le_Deploy_haproxy_bundle "${Le_Deploy_haproxy_bundle}"
|
||||||
|
elif [ -z "${Le_Deploy_haproxy_bundle}" ]; then
|
||||||
|
Le_Deploy_haproxy_bundle="${DEPLOY_HAPROXY_BUNDLE_DEFAULT}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ISSUER is optional. If not provided then assume "${DEPLOY_HAPROXY_ISSUER_DEFAULT}"
|
||||||
|
_getdeployconf DEPLOY_HAPROXY_ISSUER
|
||||||
|
_debug2 DEPLOY_HAPROXY_ISSUER "${DEPLOY_HAPROXY_ISSUER}"
|
||||||
|
if [ -n "${DEPLOY_HAPROXY_ISSUER}" ]; then
|
||||||
|
Le_Deploy_haproxy_issuer="${DEPLOY_HAPROXY_ISSUER}"
|
||||||
|
_savedomainconf Le_Deploy_haproxy_issuer "${Le_Deploy_haproxy_issuer}"
|
||||||
|
elif [ -z "${Le_Deploy_haproxy_issuer}" ]; then
|
||||||
|
Le_Deploy_haproxy_issuer="${DEPLOY_HAPROXY_ISSUER_DEFAULT}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# RELOAD is optional. If not provided then assume "${DEPLOY_HAPROXY_RELOAD_DEFAULT}"
|
||||||
|
_getdeployconf DEPLOY_HAPROXY_RELOAD
|
||||||
|
_debug2 DEPLOY_HAPROXY_RELOAD "${DEPLOY_HAPROXY_RELOAD}"
|
||||||
|
if [ -n "${DEPLOY_HAPROXY_RELOAD}" ]; then
|
||||||
|
Le_Deploy_haproxy_reload="${DEPLOY_HAPROXY_RELOAD}"
|
||||||
|
_savedomainconf Le_Deploy_haproxy_reload "${Le_Deploy_haproxy_reload}"
|
||||||
|
elif [ -z "${Le_Deploy_haproxy_reload}" ]; then
|
||||||
|
Le_Deploy_haproxy_reload="${DEPLOY_HAPROXY_RELOAD_DEFAULT}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# HOT_UPDATE is optional. If not provided then assume "${DEPLOY_HAPROXY_HOT_UPDATE_DEFAULT}"
|
||||||
|
_getdeployconf DEPLOY_HAPROXY_HOT_UPDATE
|
||||||
|
_debug2 DEPLOY_HAPROXY_HOT_UPDATE "${DEPLOY_HAPROXY_HOT_UPDATE}"
|
||||||
|
if [ -n "${DEPLOY_HAPROXY_HOT_UPDATE}" ]; then
|
||||||
|
Le_Deploy_haproxy_hot_update="${DEPLOY_HAPROXY_HOT_UPDATE}"
|
||||||
|
_savedomainconf Le_Deploy_haproxy_hot_update "${Le_Deploy_haproxy_hot_update}"
|
||||||
|
elif [ -z "${Le_Deploy_haproxy_hot_update}" ]; then
|
||||||
|
Le_Deploy_haproxy_hot_update="${DEPLOY_HAPROXY_HOT_UPDATE_DEFAULT}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# STATS_SOCKET is optional. If not provided then assume "${DEPLOY_HAPROXY_STATS_SOCKET_DEFAULT}"
|
||||||
|
_getdeployconf DEPLOY_HAPROXY_STATS_SOCKET
|
||||||
|
_debug2 DEPLOY_HAPROXY_STATS_SOCKET "${DEPLOY_HAPROXY_STATS_SOCKET}"
|
||||||
|
if [ -n "${DEPLOY_HAPROXY_STATS_SOCKET}" ]; then
|
||||||
|
Le_Deploy_haproxy_stats_socket="${DEPLOY_HAPROXY_STATS_SOCKET}"
|
||||||
|
_savedomainconf Le_Deploy_haproxy_stats_socket "${Le_Deploy_haproxy_stats_socket}"
|
||||||
|
elif [ -z "${Le_Deploy_haproxy_stats_socket}" ]; then
|
||||||
|
Le_Deploy_haproxy_stats_socket="${DEPLOY_HAPROXY_STATS_SOCKET_DEFAULT}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# MASTER_CLI is optional. No defaults are used. When the master CLI is used,
|
||||||
|
# all commands are sent with a prefix.
|
||||||
|
_getdeployconf DEPLOY_HAPROXY_MASTER_CLI
|
||||||
|
_debug2 DEPLOY_HAPROXY_MASTER_CLI "${DEPLOY_HAPROXY_MASTER_CLI}"
|
||||||
|
if [ -n "${DEPLOY_HAPROXY_MASTER_CLI}" ]; then
|
||||||
|
Le_Deploy_haproxy_stats_socket="${DEPLOY_HAPROXY_MASTER_CLI}"
|
||||||
|
_savedomainconf Le_Deploy_haproxy_stats_socket "${Le_Deploy_haproxy_stats_socket}"
|
||||||
|
_cmdpfx="@1 " # command prefix used for master CLI only.
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set the suffix depending if we are creating a bundle or not
|
||||||
|
if [ "${Le_Deploy_haproxy_bundle}" = "yes" ]; then
|
||||||
|
_info "Bundle creation requested"
|
||||||
|
# Initialise $Le_Keylength if its not already set
|
||||||
|
if [ -z "${Le_Keylength}" ]; then
|
||||||
|
Le_Keylength=""
|
||||||
|
fi
|
||||||
|
if _isEccKey "${Le_Keylength}"; then
|
||||||
|
_info "ECC key type detected"
|
||||||
|
_suffix=".ecdsa"
|
||||||
|
else
|
||||||
|
_info "RSA key type detected"
|
||||||
|
_suffix=".rsa"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
_suffix=""
|
||||||
|
fi
|
||||||
|
_debug _suffix "${_suffix}"
|
||||||
|
|
||||||
|
# Set variables for later
|
||||||
|
_pem="${Le_Deploy_haproxy_pem_path}/${Le_Deploy_haproxy_pem_name}${_suffix}"
|
||||||
|
_issuer="${_pem}.issuer"
|
||||||
|
_ocsp="${_pem}.ocsp"
|
||||||
|
_reload="${Le_Deploy_haproxy_reload}"
|
||||||
|
_statssock="${Le_Deploy_haproxy_stats_socket}"
|
||||||
|
|
||||||
|
_info "Deploying PEM file"
|
||||||
|
# Create a temporary PEM file
|
||||||
|
_temppem="$(_mktemp)"
|
||||||
|
_debug _temppem "${_temppem}"
|
||||||
|
cat "${_ccert}" "${_cca}" "${_ckey}" | grep . >"${_temppem}"
|
||||||
|
_ret="$?"
|
||||||
|
|
||||||
|
# Check that we could create the temporary file
|
||||||
|
if [ "${_ret}" != "0" ]; then
|
||||||
|
_err "Error code ${_ret} returned during PEM file creation"
|
||||||
|
[ -f "${_temppem}" ] && rm -f "${_temppem}"
|
||||||
|
return ${_ret}
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Move PEM file into place
|
||||||
|
_info "Moving new certificate into place"
|
||||||
|
_debug _pem "${_pem}"
|
||||||
|
cat "${_temppem}" >"${_pem}"
|
||||||
|
_ret=$?
|
||||||
|
|
||||||
|
# Clean up temp file
|
||||||
|
[ -f "${_temppem}" ] && rm -f "${_temppem}"
|
||||||
|
|
||||||
|
# Deal with any failure of moving PEM file into place
|
||||||
|
if [ "${_ret}" != "0" ]; then
|
||||||
|
_err "Error code ${_ret} returned while moving new certificate into place"
|
||||||
|
return ${_ret}
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Update .issuer file if requested
|
||||||
|
if [ "${Le_Deploy_haproxy_issuer}" = "yes" ]; then
|
||||||
|
_info "Updating .issuer file"
|
||||||
|
_debug _issuer "${_issuer}"
|
||||||
|
cat "${_cca}" >"${_issuer}"
|
||||||
|
_ret="$?"
|
||||||
|
|
||||||
|
if [ "${_ret}" != "0" ]; then
|
||||||
|
_err "Error code ${_ret} returned while copying issuer/CA certificate into place"
|
||||||
|
return ${_ret}
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
[ -f "${_issuer}" ] && _err "Issuer file update not requested but .issuer file exists"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Update .ocsp file if certificate was requested with --ocsp/--ocsp-must-staple option
|
||||||
|
if [ -z "${Le_OCSP_Staple}" ]; then
|
||||||
|
Le_OCSP_Staple="0"
|
||||||
|
fi
|
||||||
|
if [ "${Le_OCSP_Staple}" = "1" ]; then
|
||||||
|
_info "Updating OCSP stapling info"
|
||||||
|
_debug _ocsp "${_ocsp}"
|
||||||
|
_info "Extracting OCSP URL"
|
||||||
|
_ocsp_url=$(${ACME_OPENSSL_BIN:-openssl} x509 -noout -ocsp_uri -in "${_pem}")
|
||||||
|
_debug _ocsp_url "${_ocsp_url}"
|
||||||
|
|
||||||
|
# Only process OCSP if URL was present
|
||||||
|
if [ "${_ocsp_url}" != "" ]; then
|
||||||
|
# Extract the hostname from the OCSP URL
|
||||||
|
_info "Extracting OCSP URL"
|
||||||
|
_ocsp_host=$(echo "${_ocsp_url}" | cut -d/ -f3)
|
||||||
|
_debug _ocsp_host "${_ocsp_host}"
|
||||||
|
|
||||||
|
# Only process the certificate if we have a .issuer file
|
||||||
|
if [ -r "${_issuer}" ]; then
|
||||||
|
# Check if issuer cert is also a root CA cert
|
||||||
|
_subjectdn=$(${ACME_OPENSSL_BIN:-openssl} x509 -in "${_issuer}" -subject -noout | cut -d'/' -f2,3,4,5,6,7,8,9,10)
|
||||||
|
_debug _subjectdn "${_subjectdn}"
|
||||||
|
_issuerdn=$(${ACME_OPENSSL_BIN:-openssl} x509 -in "${_issuer}" -issuer -noout | cut -d'/' -f2,3,4,5,6,7,8,9,10)
|
||||||
|
_debug _issuerdn "${_issuerdn}"
|
||||||
|
_info "Requesting OCSP response"
|
||||||
|
# If the issuer is a CA cert then our command line has "-CAfile" added
|
||||||
|
if [ "${_subjectdn}" = "${_issuerdn}" ]; then
|
||||||
|
_cafile_argument="-CAfile \"${_issuer}\""
|
||||||
|
else
|
||||||
|
_cafile_argument=""
|
||||||
|
fi
|
||||||
|
_debug _cafile_argument "${_cafile_argument}"
|
||||||
|
# if OpenSSL/LibreSSL is v1.1 or above, the format for the -header option has changed
|
||||||
|
_openssl_version=$(${ACME_OPENSSL_BIN:-openssl} version | cut -d' ' -f2)
|
||||||
|
_debug _openssl_version "${_openssl_version}"
|
||||||
|
_openssl_major=$(echo "${_openssl_version}" | cut -d '.' -f1)
|
||||||
|
_openssl_minor=$(echo "${_openssl_version}" | cut -d '.' -f2)
|
||||||
|
if [ "${_openssl_major}" -eq "1" ] && [ "${_openssl_minor}" -ge "1" ] || [ "${_openssl_major}" -ge "2" ]; then
|
||||||
|
_header_sep="="
|
||||||
|
else
|
||||||
|
_header_sep=" "
|
||||||
|
fi
|
||||||
|
# Request the OCSP response from the issuer and store it
|
||||||
|
_openssl_ocsp_cmd="${ACME_OPENSSL_BIN:-openssl} ocsp \
|
||||||
|
-issuer \"${_issuer}\" \
|
||||||
|
-cert \"${_pem}\" \
|
||||||
|
-url \"${_ocsp_url}\" \
|
||||||
|
-header Host${_header_sep}\"${_ocsp_host}\" \
|
||||||
|
-respout \"${_ocsp}\" \
|
||||||
|
-verify_other \"${_issuer}\" \
|
||||||
|
${_cafile_argument} \
|
||||||
|
| grep -q \"${_pem}: good\""
|
||||||
|
_debug _openssl_ocsp_cmd "${_openssl_ocsp_cmd}"
|
||||||
|
eval "${_openssl_ocsp_cmd}"
|
||||||
|
_ret=$?
|
||||||
|
else
|
||||||
|
# Non fatal: No issuer file was present so no OCSP stapling file created
|
||||||
|
_err "OCSP stapling in use but no .issuer file was present"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# Non fatal: No OCSP url was found int the certificate
|
||||||
|
_err "OCSP update requested but no OCSP URL was found in certificate"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Non fatal: Check return code of openssl command
|
||||||
|
if [ "${_ret}" != "0" ]; then
|
||||||
|
_err "Updating OCSP stapling failed with return code ${_ret}"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# An OCSP file was already present but certificate did not have OCSP extension
|
||||||
|
if [ -f "${_ocsp}" ]; then
|
||||||
|
_err "OCSP was not requested but .ocsp file exists."
|
||||||
|
# Could remove the file at this step, although HAProxy just ignores it in this case
|
||||||
|
# rm -f "${_ocsp}" || _err "Problem removing stale .ocsp file"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${Le_Deploy_haproxy_hot_update}" = "yes" ]; then
|
||||||
|
# set the socket name for messages
|
||||||
|
if [ -n "${_cmdpfx}" ]; then
|
||||||
|
_socketname="master CLI"
|
||||||
|
else
|
||||||
|
_socketname="stats socket"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Update certificate over HAProxy stats socket or master CLI.
|
||||||
|
if _exists socat; then
|
||||||
|
# look for the certificate on the stats socket, to chose between updating or creating one
|
||||||
|
_socat_cert_cmd="echo '${_cmdpfx}show ssl cert' | socat '${_statssock}' - | grep -q '^${_pem}$'"
|
||||||
|
_debug _socat_cert_cmd "${_socat_cert_cmd}"
|
||||||
|
eval "${_socat_cert_cmd}"
|
||||||
|
_ret=$?
|
||||||
|
if [ "${_ret}" != "0" ]; then
|
||||||
|
_newcert="1"
|
||||||
|
_info "Creating new certificate '${_pem}' over HAProxy ${_socketname}."
|
||||||
|
# certificate wasn't found, it's a new one. We should check if the crt-list exists and creates/inserts the certificate.
|
||||||
|
_socat_crtlist_show_cmd="echo '${_cmdpfx}show ssl crt-list' | socat '${_statssock}' - | grep -q '^${Le_Deploy_haproxy_pem_path}$'"
|
||||||
|
_debug _socat_crtlist_show_cmd "${_socat_crtlist_show_cmd}"
|
||||||
|
eval "${_socat_crtlist_show_cmd}"
|
||||||
|
_ret=$?
|
||||||
|
if [ "${_ret}" != "0" ]; then
|
||||||
|
_err "Couldn't find '${Le_Deploy_haproxy_pem_path}' in haproxy 'show ssl crt-list'"
|
||||||
|
return "${_ret}"
|
||||||
|
fi
|
||||||
|
# create a new certificate
|
||||||
|
_socat_new_cmd="echo '${_cmdpfx}new ssl cert ${_pem}' | socat '${_statssock}' - | grep -q 'New empty'"
|
||||||
|
_debug _socat_new_cmd "${_socat_new_cmd}"
|
||||||
|
eval "${_socat_new_cmd}"
|
||||||
|
_ret=$?
|
||||||
|
if [ "${_ret}" != "0" ]; then
|
||||||
|
_err "Couldn't create '${_pem}' in haproxy"
|
||||||
|
return "${_ret}"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
_info "Update existing certificate '${_pem}' over HAProxy ${_socketname}."
|
||||||
|
fi
|
||||||
|
_socat_cert_set_cmd="echo -e '${_cmdpfx}set ssl cert ${_pem} <<\n$(cat "${_pem}")\n' | socat '${_statssock}' - | grep -q 'Transaction created'"
|
||||||
|
_debug _socat_cert_set_cmd "${_socat_cert_set_cmd}"
|
||||||
|
eval "${_socat_cert_set_cmd}"
|
||||||
|
_ret=$?
|
||||||
|
if [ "${_ret}" != "0" ]; then
|
||||||
|
_err "Can't update '${_pem}' in haproxy"
|
||||||
|
return "${_ret}"
|
||||||
|
fi
|
||||||
|
_socat_cert_commit_cmd="echo '${_cmdpfx}commit ssl cert ${_pem}' | socat '${_statssock}' - | grep -q '^Success!$'"
|
||||||
|
_debug _socat_cert_commit_cmd "${_socat_cert_commit_cmd}"
|
||||||
|
eval "${_socat_cert_commit_cmd}"
|
||||||
|
_ret=$?
|
||||||
|
if [ "${_ret}" != "0" ]; then
|
||||||
|
_err "Can't commit '${_pem}' in haproxy"
|
||||||
|
return ${_ret}
|
||||||
|
fi
|
||||||
|
if [ "${_newcert}" = "1" ]; then
|
||||||
|
# if this is a new certificate, it needs to be inserted into the crt-list`
|
||||||
|
_socat_cert_add_cmd="echo '${_cmdpfx}add ssl crt-list ${Le_Deploy_haproxy_pem_path} ${_pem}' | socat '${_statssock}' - | grep -q 'Success!'"
|
||||||
|
_debug _socat_cert_add_cmd "${_socat_cert_add_cmd}"
|
||||||
|
eval "${_socat_cert_add_cmd}"
|
||||||
|
_ret=$?
|
||||||
|
if [ "${_ret}" != "0" ]; then
|
||||||
|
_err "Can't update '${_pem}' in haproxy"
|
||||||
|
return "${_ret}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
_err "'socat' is not available, couldn't update over ${_socketname}"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# Reload HAProxy
|
||||||
|
_debug _reload "${_reload}"
|
||||||
|
eval "${_reload}"
|
||||||
|
_ret=$?
|
||||||
|
if [ "${_ret}" != "0" ]; then
|
||||||
|
_err "Error code ${_ret} during reload"
|
||||||
|
return ${_ret}
|
||||||
|
else
|
||||||
|
_info "Reload successful"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
@ -1,235 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# Dump certificates from the HAProxy stats or master socket to the filesystem
|
|
||||||
# Experimental script
|
|
||||||
#
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
export BASEPATH=${BASEPATH:-/etc/haproxy}/
|
|
||||||
export SOCKET=${SOCKET:-/var/run/haproxy-master.sock}
|
|
||||||
export DRY_RUN=0
|
|
||||||
export DEBUG=
|
|
||||||
export VERBOSE=
|
|
||||||
export M="@1 "
|
|
||||||
export TMP
|
|
||||||
|
|
||||||
vecho() {
|
|
||||||
|
|
||||||
[ -n "$VERBOSE" ] && echo "$@"
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
read_certificate() {
|
|
||||||
name=$1
|
|
||||||
crt_filename=
|
|
||||||
key_filename=
|
|
||||||
|
|
||||||
OFS=$IFS
|
|
||||||
IFS=":"
|
|
||||||
|
|
||||||
while read -r key value; do
|
|
||||||
case "$key" in
|
|
||||||
"Crt filename")
|
|
||||||
crt_filename="${value# }"
|
|
||||||
key_filename="${value# }"
|
|
||||||
;;
|
|
||||||
"Key filename")
|
|
||||||
key_filename="${value# }"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done < <(echo "${M}show ssl cert ${name}" | socat "${SOCKET}" -)
|
|
||||||
IFS=$OFS
|
|
||||||
|
|
||||||
if [ -z "$crt_filename" ] || [ -z "$key_filename" ]; then
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# handle fields without a crt-base/key-base
|
|
||||||
[ "${crt_filename:0:1}" != "/" ] && crt_filename="${BASEPATH}${crt_filename}"
|
|
||||||
[ "${key_filename:0:1}" != "/" ] && key_filename="${BASEPATH}${key_filename}"
|
|
||||||
|
|
||||||
vecho "name:$name"
|
|
||||||
vecho "crt:$crt_filename"
|
|
||||||
vecho "key:$key_filename"
|
|
||||||
|
|
||||||
export NAME="$name"
|
|
||||||
export CRT_FILENAME="$crt_filename"
|
|
||||||
export KEY_FILENAME="$key_filename"
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
cmp_certkey() {
|
|
||||||
prev=$1
|
|
||||||
new=$2
|
|
||||||
|
|
||||||
if [ ! -f "$prev" ]; then
|
|
||||||
return 1;
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! cmp -s <(openssl x509 -in "$prev" -noout -fingerprint -sha256) <(openssl x509 -in "$new" -noout -fingerprint -sha256); then
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
dump_certificate() {
|
|
||||||
name=$1
|
|
||||||
prev_crt=$2
|
|
||||||
prev_key=$3
|
|
||||||
r="tmp.${RANDOM}"
|
|
||||||
d="old.$(date +%s)"
|
|
||||||
new_crt="$TMP/$(basename "$prev_crt").${r}"
|
|
||||||
new_key="$TMP/$(basename "$prev_key").${r}"
|
|
||||||
|
|
||||||
if ! touch "${new_crt}" || ! touch "${new_key}"; then
|
|
||||||
echo "[ALERT] ($$) : can't dump \"$name\", can't create tmp files" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "${M}dump ssl cert ${name}" | socat "${SOCKET}" - | openssl pkey >> "${new_key}"
|
|
||||||
# use crl2pkcs7 as a way to dump multiple x509, storeutl could be used in modern versions of openssl
|
|
||||||
echo "${M}dump ssl cert ${name}" | socat "${SOCKET}" - | openssl crl2pkcs7 -nocrl -certfile /dev/stdin | openssl pkcs7 -print_certs >> "${new_crt}"
|
|
||||||
|
|
||||||
if ! cmp -s <(openssl x509 -in "${new_crt}" -pubkey -noout) <(openssl pkey -in "${new_key}" -pubout); then
|
|
||||||
echo "[ALERT] ($$) : Private key \"${new_key}\" and public key \"${new_crt}\" don't match" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if cmp_certkey "${prev_crt}" "${new_crt}"; then
|
|
||||||
echo "[NOTICE] ($$) : ${crt_filename} is already up to date" >&2
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# dry run will just return before trying to move the files
|
|
||||||
if [ "${DRY_RUN}" != "0" ]; then
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# move the current certificates to ".old.timestamp"
|
|
||||||
if [ -f "${prev_crt}" ] && [ -f "${prev_key}" ]; then
|
|
||||||
mv "${prev_crt}" "${prev_crt}.${d}"
|
|
||||||
[ "${prev_crt}" != "${prev_key}" ] && mv "${prev_key}" "${prev_key}.${d}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# move the new certificates to old place
|
|
||||||
mv "${new_crt}" "${prev_crt}"
|
|
||||||
[ "${prev_crt}" != "${prev_key}" ] && mv "${new_key}" "${prev_key}"
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
dump_all_certificates() {
|
|
||||||
echo "${M}show ssl cert" | socat "${SOCKET}" - | grep -v '^#' | grep -v '^$' | while read -r line; do
|
|
||||||
export NAME
|
|
||||||
export CRT_FILENAME
|
|
||||||
export KEY_FILENAME
|
|
||||||
|
|
||||||
if read_certificate "$line"; then
|
|
||||||
dump_certificate "$NAME" "$CRT_FILENAME" "$KEY_FILENAME"
|
|
||||||
else
|
|
||||||
echo "[WARNING] ($$) : can't dump \"$name\", crt/key filename details not found in \"show ssl cert\"" >&2
|
|
||||||
fi
|
|
||||||
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
usage() {
|
|
||||||
echo "Usage:"
|
|
||||||
echo " $0 [options]* [cert]*"
|
|
||||||
echo ""
|
|
||||||
echo " Dump certificates from the HAProxy stats or master socket to the filesystem"
|
|
||||||
echo " Require socat and openssl"
|
|
||||||
echo " EXPERIMENTAL script, backup your files!"
|
|
||||||
echo " The script will move your previous files to FILE.old.unixtimestamp (ex: foo.com.pem.old.1759044998)"
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "Options:"
|
|
||||||
echo " -S, --master-socket <path> Use the master socket at <path> (default: ${SOCKET})"
|
|
||||||
echo " -s, --socket <path> Use the stats socket at <path>"
|
|
||||||
echo " -p, --path <path> Specifiy a base path for relative files (default: ${BASEPATH})"
|
|
||||||
echo " -n, --dry-run Read certificates on the socket but don't dump them"
|
|
||||||
echo " -d, --debug Debug mode, set -x"
|
|
||||||
echo " -v, --verbose Verbose mode"
|
|
||||||
echo " -h, --help This help"
|
|
||||||
echo " -- End of options"
|
|
||||||
echo ""
|
|
||||||
echo "Examples:"
|
|
||||||
echo " $0 -v -p ${BASEPATH} -S ${SOCKET}"
|
|
||||||
echo " $0 -v -p ${BASEPATH} -S ${SOCKET} bar.com.rsa.pem"
|
|
||||||
echo " $0 -v -p ${BASEPATH} -S ${SOCKET} -- foo.com.ecdsa.pem bar.com.rsa.pem"
|
|
||||||
}
|
|
||||||
|
|
||||||
main() {
|
|
||||||
while [ -n "$1" ]; do
|
|
||||||
case "$1" in
|
|
||||||
-S|--master-socket)
|
|
||||||
SOCKET="$2"
|
|
||||||
M="@1 "
|
|
||||||
shift 2
|
|
||||||
;;
|
|
||||||
-s|--socket)
|
|
||||||
SOCKET="$2"
|
|
||||||
M=
|
|
||||||
shift 2
|
|
||||||
;;
|
|
||||||
-p|--path)
|
|
||||||
BASEPATH="$2/"
|
|
||||||
shift 2
|
|
||||||
;;
|
|
||||||
-n|--dry-run)
|
|
||||||
DRY_RUN=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-d|--debug)
|
|
||||||
DEBUG=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-v|--verbose)
|
|
||||||
VERBOSE=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-h|--help)
|
|
||||||
usage "$@"
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
--)
|
|
||||||
shift
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
-*)
|
|
||||||
echo "[ALERT] ($$) : Unknown option '$1'" >&2
|
|
||||||
usage "$@"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ -n "$DEBUG" ]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
|
|
||||||
TMP=${TMP:-$(mktemp -d)}
|
|
||||||
|
|
||||||
if [ -z "$1" ]; then
|
|
||||||
dump_all_certificates
|
|
||||||
else
|
|
||||||
# compute the certificates names at the end of the command
|
|
||||||
while [ -n "$1" ]; do
|
|
||||||
if ! read_certificate "$1"; then
|
|
||||||
echo "[ALERT] ($$) : can't dump \"$1\", crt/key filename details not found in \"show ssl cert\"" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
[ "${DRY_RUN}" = "0" ] && dump_certificate "$NAME" "$CRT_FILENAME" "$KEY_FILENAME"
|
|
||||||
shift
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
trap 'rm -rf -- "$TMP"' EXIT
|
|
||||||
main "$@"
|
|
||||||
@ -1,113 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
export VERBOSE=1
|
|
||||||
export TIMEOUT=90
|
|
||||||
export MASTER_SOCKET=${MASTER_SOCKET:-/var/run/haproxy-master.sock}
|
|
||||||
export RET=
|
|
||||||
|
|
||||||
alert() {
|
|
||||||
if [ "$VERBOSE" -ge "1" ]; then
|
|
||||||
echo "[ALERT] $*" >&2
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
reload() {
|
|
||||||
while read -r line; do
|
|
||||||
|
|
||||||
if [ "$line" = "Success=0" ]; then
|
|
||||||
RET=1
|
|
||||||
elif [ "$line" = "Success=1" ]; then
|
|
||||||
RET=0
|
|
||||||
elif [ "$line" = "Another reload is still in progress." ]; then
|
|
||||||
alert "$line"
|
|
||||||
elif [ "$line" = "--" ]; then
|
|
||||||
continue;
|
|
||||||
else
|
|
||||||
if [ "$RET" = 1 ] && [ "$VERBOSE" = "2" ]; then
|
|
||||||
echo "$line" >&2
|
|
||||||
elif [ "$VERBOSE" = "3" ]; then
|
|
||||||
echo "$line" >&2
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
done < <(echo "reload" | socat -t"${TIMEOUT}" "${MASTER_SOCKET}" -)
|
|
||||||
|
|
||||||
if [ -z "$RET" ]; then
|
|
||||||
alert "Couldn't finish the reload before the timeout (${TIMEOUT})."
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
return "$RET"
|
|
||||||
}
|
|
||||||
|
|
||||||
usage() {
|
|
||||||
echo "Usage:"
|
|
||||||
echo " $0 [options]*"
|
|
||||||
echo ""
|
|
||||||
echo " Trigger a reload from the master socket"
|
|
||||||
echo " Require socat"
|
|
||||||
echo " EXPERIMENTAL script!"
|
|
||||||
echo ""
|
|
||||||
echo "Options:"
|
|
||||||
echo " -S, --master-socket <path> Use the master socket at <path> (default: ${MASTER_SOCKET})"
|
|
||||||
echo " -d, --debug Debug mode, set -x"
|
|
||||||
echo " -t, --timeout Timeout (socat -t) (default: ${TIMEOUT})"
|
|
||||||
echo " -s, --silent Silent mode (no output)"
|
|
||||||
echo " -v, --verbose Verbose output (output from haproxy on failure)"
|
|
||||||
echo " -vv Even more verbose output (output from haproxy on success and failure)"
|
|
||||||
echo " -h, --help This help"
|
|
||||||
echo ""
|
|
||||||
echo "Examples:"
|
|
||||||
echo " $0 -S ${MASTER_SOCKET} -d ${TIMEOUT}"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
main() {
|
|
||||||
while [ -n "$1" ]; do
|
|
||||||
case "$1" in
|
|
||||||
-S|--master-socket)
|
|
||||||
MASTER_SOCKET="$2"
|
|
||||||
shift 2
|
|
||||||
;;
|
|
||||||
-t|--timeout)
|
|
||||||
TIMEOUT="$2"
|
|
||||||
shift 2
|
|
||||||
;;
|
|
||||||
-s|--silent)
|
|
||||||
VERBOSE=0
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-v|--verbose)
|
|
||||||
VERBOSE=2
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-vv|--verbose)
|
|
||||||
VERBOSE=3
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-d|--debug)
|
|
||||||
DEBUG=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-h|--help)
|
|
||||||
usage "$@"
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "[ALERT] ($$) : Unknown option '$1'" >&2
|
|
||||||
usage "$@"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ -n "$DEBUG" ]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
main "$@"
|
|
||||||
reload
|
|
||||||
@ -123,22 +123,6 @@ struct url_stat {
|
|||||||
#define FILT2_PRESERVE_QUERY 0x02
|
#define FILT2_PRESERVE_QUERY 0x02
|
||||||
#define FILT2_EXTRACT_CAPTURE 0x04
|
#define FILT2_EXTRACT_CAPTURE 0x04
|
||||||
|
|
||||||
#define FILT_OUTPUT_FMT (FILT_COUNT_ONLY| \
|
|
||||||
FILT_COUNT_STATUS| \
|
|
||||||
FILT_COUNT_SRV_STATUS| \
|
|
||||||
FILT_COUNT_COOK_CODES| \
|
|
||||||
FILT_COUNT_TERM_CODES| \
|
|
||||||
FILT_COUNT_URL_ONLY| \
|
|
||||||
FILT_COUNT_URL_COUNT| \
|
|
||||||
FILT_COUNT_URL_ERR| \
|
|
||||||
FILT_COUNT_URL_TAVG| \
|
|
||||||
FILT_COUNT_URL_TTOT| \
|
|
||||||
FILT_COUNT_URL_TAVGO| \
|
|
||||||
FILT_COUNT_URL_TTOTO| \
|
|
||||||
FILT_COUNT_URL_BAVG| \
|
|
||||||
FILT_COUNT_URL_BTOT| \
|
|
||||||
FILT_COUNT_IP_COUNT)
|
|
||||||
|
|
||||||
unsigned int filter = 0;
|
unsigned int filter = 0;
|
||||||
unsigned int filter2 = 0;
|
unsigned int filter2 = 0;
|
||||||
unsigned int filter_invert = 0;
|
unsigned int filter_invert = 0;
|
||||||
@ -208,7 +192,7 @@ void help()
|
|||||||
" you can also use -n to start from earlier then field %d\n"
|
" you can also use -n to start from earlier then field %d\n"
|
||||||
" -query preserve the query string for per-URL (-u*) statistics\n"
|
" -query preserve the query string for per-URL (-u*) statistics\n"
|
||||||
"\n"
|
"\n"
|
||||||
"Output format - **only one** may be used at a time\n"
|
"Output format - only one may be used at a time\n"
|
||||||
" -c only report the number of lines that would have been printed\n"
|
" -c only report the number of lines that would have been printed\n"
|
||||||
" -pct output connect and response times percentiles\n"
|
" -pct output connect and response times percentiles\n"
|
||||||
" -st output number of requests per HTTP status code\n"
|
" -st output number of requests per HTTP status code\n"
|
||||||
@ -426,7 +410,7 @@ struct timer *insert_timer(struct eb_root *r, struct timer **alloc, int v)
|
|||||||
struct eb32_node *n;
|
struct eb32_node *n;
|
||||||
|
|
||||||
if (!t) {
|
if (!t) {
|
||||||
t = calloc(1, sizeof(*t));
|
t = calloc(sizeof(*t), 1);
|
||||||
if (unlikely(!t)) {
|
if (unlikely(!t)) {
|
||||||
fprintf(stderr, "%s: not enough memory\n", __FUNCTION__);
|
fprintf(stderr, "%s: not enough memory\n", __FUNCTION__);
|
||||||
exit(1);
|
exit(1);
|
||||||
@ -454,7 +438,7 @@ struct timer *insert_value(struct eb_root *r, struct timer **alloc, int v)
|
|||||||
struct eb32_node *n;
|
struct eb32_node *n;
|
||||||
|
|
||||||
if (!t) {
|
if (!t) {
|
||||||
t = calloc(1, sizeof(*t));
|
t = calloc(sizeof(*t), 1);
|
||||||
if (unlikely(!t)) {
|
if (unlikely(!t)) {
|
||||||
fprintf(stderr, "%s: not enough memory\n", __FUNCTION__);
|
fprintf(stderr, "%s: not enough memory\n", __FUNCTION__);
|
||||||
exit(1);
|
exit(1);
|
||||||
@ -914,9 +898,6 @@ int main(int argc, char **argv)
|
|||||||
if (!filter && !filter2)
|
if (!filter && !filter2)
|
||||||
die("No action specified.\n");
|
die("No action specified.\n");
|
||||||
|
|
||||||
if ((filter & FILT_OUTPUT_FMT) & ((filter & FILT_OUTPUT_FMT) - 1))
|
|
||||||
die("Please, set only one output filter.\n");
|
|
||||||
|
|
||||||
if (filter & FILT_ACC_COUNT && !filter_acc_count)
|
if (filter & FILT_ACC_COUNT && !filter_acc_count)
|
||||||
filter_acc_count=1;
|
filter_acc_count=1;
|
||||||
|
|
||||||
@ -1571,10 +1552,6 @@ void filter_count_srv_status(const char *accept_field, const char *time_field, s
|
|||||||
if (!srv_node) {
|
if (!srv_node) {
|
||||||
/* server not yet in the tree, let's create it */
|
/* server not yet in the tree, let's create it */
|
||||||
srv = (void *)calloc(1, sizeof(struct srv_st) + e - b + 1);
|
srv = (void *)calloc(1, sizeof(struct srv_st) + e - b + 1);
|
||||||
if (unlikely(!srv)) {
|
|
||||||
fprintf(stderr, "%s: not enough memory\n", __FUNCTION__);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
srv_node = &srv->node;
|
srv_node = &srv->node;
|
||||||
memcpy(&srv_node->key, b, e - b);
|
memcpy(&srv_node->key, b, e - b);
|
||||||
srv_node->key[e - b] = '\0';
|
srv_node->key[e - b] = '\0';
|
||||||
@ -1684,10 +1661,6 @@ void filter_count_url(const char *accept_field, const char *time_field, struct t
|
|||||||
*/
|
*/
|
||||||
if (unlikely(!ustat))
|
if (unlikely(!ustat))
|
||||||
ustat = calloc(1, sizeof(*ustat));
|
ustat = calloc(1, sizeof(*ustat));
|
||||||
if (unlikely(!ustat)) {
|
|
||||||
fprintf(stderr, "%s: not enough memory\n", __FUNCTION__);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
ustat->nb_err = err;
|
ustat->nb_err = err;
|
||||||
ustat->nb_req = 1;
|
ustat->nb_req = 1;
|
||||||
|
|||||||
@ -7,21 +7,6 @@ the queue.
|
|||||||
## Requirements
|
## Requirements
|
||||||
- Python 3.x
|
- Python 3.x
|
||||||
- [lxml](https://lxml.de/installation.html)
|
- [lxml](https://lxml.de/installation.html)
|
||||||
- requests
|
|
||||||
- urllib3
|
|
||||||
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
It can be easily installed with venv from python3
|
|
||||||
|
|
||||||
|
|
||||||
$ python3 -m venv ~/.local/venvs/stable-bot/
|
|
||||||
$ source ~/.local/venvs/stable-bot/bin/activate
|
|
||||||
$ pip install -r requirements.txt
|
|
||||||
|
|
||||||
And can be executed with:
|
|
||||||
|
|
||||||
$ ~/.local/venvs/stable-bot/bin/python release-estimator.py
|
|
||||||
|
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/python3
|
||||||
#
|
#
|
||||||
# Release estimator for HAProxy
|
# Release estimator for HAProxy
|
||||||
#
|
#
|
||||||
@ -16,7 +16,6 @@
|
|||||||
#
|
#
|
||||||
|
|
||||||
from lxml import html
|
from lxml import html
|
||||||
from urllib.parse import urljoin
|
|
||||||
import requests
|
import requests
|
||||||
import traceback
|
import traceback
|
||||||
import smtplib
|
import smtplib
|
||||||
@ -191,7 +190,6 @@ This is a friendly bot that watches fixes pending for the next haproxy-stable re
|
|||||||
|
|
||||||
# parse out the CHANGELOG link
|
# parse out the CHANGELOG link
|
||||||
CHANGELOG = tree.xpath('//a[contains(@href,"CHANGELOG")]/@href')[0]
|
CHANGELOG = tree.xpath('//a[contains(@href,"CHANGELOG")]/@href')[0]
|
||||||
CHANGELOG = urljoin("https://", CHANGELOG)
|
|
||||||
|
|
||||||
last_version = tree.xpath('//td[contains(text(), "last")]/../td/a/text()')[0]
|
last_version = tree.xpath('//td[contains(text(), "last")]/../td/a/text()')[0]
|
||||||
first_version = "%s.0" % (version)
|
first_version = "%s.0" % (version)
|
||||||
|
|||||||
@ -1,3 +0,0 @@
|
|||||||
lxml
|
|
||||||
requests
|
|
||||||
urllib3
|
|
||||||
@ -6,9 +6,9 @@ Wants=network-online.target
|
|||||||
[Service]
|
[Service]
|
||||||
EnvironmentFile=-/etc/default/haproxy
|
EnvironmentFile=-/etc/default/haproxy
|
||||||
EnvironmentFile=-/etc/sysconfig/haproxy
|
EnvironmentFile=-/etc/sysconfig/haproxy
|
||||||
Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/run/haproxy.pid" "CFGDIR=/etc/haproxy/conf.d" "EXTRAOPTS=-S /run/haproxy-master.sock"
|
Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/run/haproxy.pid" "EXTRAOPTS=-S /run/haproxy-master.sock"
|
||||||
ExecStart=@SBINDIR@/haproxy -Ws -f $CONFIG -f $CFGDIR -p $PIDFILE $EXTRAOPTS
|
ExecStart=@SBINDIR@/haproxy -Ws -f $CONFIG -p $PIDFILE $EXTRAOPTS
|
||||||
ExecReload=@SBINDIR@/haproxy -Ws -f $CONFIG -f $CFGDIR -c $EXTRAOPTS
|
ExecReload=@SBINDIR@/haproxy -Ws -f $CONFIG -c $EXTRAOPTS
|
||||||
ExecReload=/bin/kill -USR2 $MAINPID
|
ExecReload=/bin/kill -USR2 $MAINPID
|
||||||
KillMode=mixed
|
KillMode=mixed
|
||||||
Restart=always
|
Restart=always
|
||||||
|
|||||||
@ -1,34 +0,0 @@
|
|||||||
// find calls to calloc
|
|
||||||
@call@
|
|
||||||
expression ptr;
|
|
||||||
position p;
|
|
||||||
@@
|
|
||||||
|
|
||||||
ptr@p = calloc(...);
|
|
||||||
|
|
||||||
// find ok calls to calloc
|
|
||||||
@ok@
|
|
||||||
expression ptr;
|
|
||||||
position call.p;
|
|
||||||
@@
|
|
||||||
|
|
||||||
ptr@p = calloc(...);
|
|
||||||
... when != ptr
|
|
||||||
(
|
|
||||||
(ptr == NULL || ...)
|
|
||||||
|
|
|
||||||
(ptr == 0 || ...)
|
|
||||||
|
|
|
||||||
(ptr != NULL || ...)
|
|
||||||
|
|
|
||||||
(ptr != 0 || ...)
|
|
||||||
)
|
|
||||||
|
|
||||||
// fix bad calls to calloc
|
|
||||||
@depends on !ok@
|
|
||||||
expression ptr;
|
|
||||||
position call.p;
|
|
||||||
@@
|
|
||||||
|
|
||||||
ptr@p = calloc(...);
|
|
||||||
+ if (ptr == NULL) return;
|
|
||||||
@ -1,34 +0,0 @@
|
|||||||
// find calls to malloc
|
|
||||||
@call@
|
|
||||||
expression ptr;
|
|
||||||
position p;
|
|
||||||
@@
|
|
||||||
|
|
||||||
ptr@p = malloc(...);
|
|
||||||
|
|
||||||
// find ok calls to malloc
|
|
||||||
@ok@
|
|
||||||
expression ptr;
|
|
||||||
position call.p;
|
|
||||||
@@
|
|
||||||
|
|
||||||
ptr@p = malloc(...);
|
|
||||||
... when != ptr
|
|
||||||
(
|
|
||||||
(ptr == NULL || ...)
|
|
||||||
|
|
|
||||||
(ptr == 0 || ...)
|
|
||||||
|
|
|
||||||
(ptr != NULL || ...)
|
|
||||||
|
|
|
||||||
(ptr != 0 || ...)
|
|
||||||
)
|
|
||||||
|
|
||||||
// fix bad calls to malloc
|
|
||||||
@depends on !ok@
|
|
||||||
expression ptr;
|
|
||||||
position call.p;
|
|
||||||
@@
|
|
||||||
|
|
||||||
ptr@p = malloc(...);
|
|
||||||
+ if (ptr == NULL) return;
|
|
||||||
@ -1,34 +0,0 @@
|
|||||||
// find calls to strdup
|
|
||||||
@call@
|
|
||||||
expression ptr;
|
|
||||||
position p;
|
|
||||||
@@
|
|
||||||
|
|
||||||
ptr@p = strdup(...);
|
|
||||||
|
|
||||||
// find ok calls to strdup
|
|
||||||
@ok@
|
|
||||||
expression ptr;
|
|
||||||
position call.p;
|
|
||||||
@@
|
|
||||||
|
|
||||||
ptr@p = strdup(...);
|
|
||||||
... when != ptr
|
|
||||||
(
|
|
||||||
(ptr == NULL || ...)
|
|
||||||
|
|
|
||||||
(ptr == 0 || ...)
|
|
||||||
|
|
|
||||||
(ptr != NULL || ...)
|
|
||||||
|
|
|
||||||
(ptr != 0 || ...)
|
|
||||||
)
|
|
||||||
|
|
||||||
// fix bad calls to strdup
|
|
||||||
@depends on !ok@
|
|
||||||
expression ptr;
|
|
||||||
position call.p;
|
|
||||||
@@
|
|
||||||
|
|
||||||
ptr@p = strdup(...);
|
|
||||||
+ if (ptr == NULL) return;
|
|
||||||
@ -4,7 +4,6 @@
|
|||||||
/* make the include files below expose their flags */
|
/* make the include files below expose their flags */
|
||||||
#define HA_EXPOSE_FLAGS
|
#define HA_EXPOSE_FLAGS
|
||||||
|
|
||||||
#include <haproxy/applet-t.h>
|
|
||||||
#include <haproxy/channel-t.h>
|
#include <haproxy/channel-t.h>
|
||||||
#include <haproxy/connection-t.h>
|
#include <haproxy/connection-t.h>
|
||||||
#include <haproxy/fd-t.h>
|
#include <haproxy/fd-t.h>
|
||||||
@ -13,10 +12,6 @@
|
|||||||
#include <haproxy/mux_fcgi-t.h>
|
#include <haproxy/mux_fcgi-t.h>
|
||||||
#include <haproxy/mux_h2-t.h>
|
#include <haproxy/mux_h2-t.h>
|
||||||
#include <haproxy/mux_h1-t.h>
|
#include <haproxy/mux_h1-t.h>
|
||||||
#include <haproxy/mux_quic-t.h>
|
|
||||||
#include <haproxy/mux_spop-t.h>
|
|
||||||
#include <haproxy/peers-t.h>
|
|
||||||
#include <haproxy/quic_conn-t.h>
|
|
||||||
#include <haproxy/stconn-t.h>
|
#include <haproxy/stconn-t.h>
|
||||||
#include <haproxy/stream-t.h>
|
#include <haproxy/stream-t.h>
|
||||||
#include <haproxy/task-t.h>
|
#include <haproxy/task-t.h>
|
||||||
@ -41,19 +36,10 @@
|
|||||||
#define SHOW_AS_H1S 0x00010000
|
#define SHOW_AS_H1S 0x00010000
|
||||||
#define SHOW_AS_FCONN 0x00020000
|
#define SHOW_AS_FCONN 0x00020000
|
||||||
#define SHOW_AS_FSTRM 0x00040000
|
#define SHOW_AS_FSTRM 0x00040000
|
||||||
#define SHOW_AS_PEERS 0x00080000
|
|
||||||
#define SHOW_AS_PEER 0x00100000
|
|
||||||
#define SHOW_AS_QC 0x00200000
|
|
||||||
#define SHOW_AS_SPOPC 0x00400000
|
|
||||||
#define SHOW_AS_SPOPS 0x00800000
|
|
||||||
#define SHOW_AS_QCC 0x01000000
|
|
||||||
#define SHOW_AS_QCS 0x02000000
|
|
||||||
#define SHOW_AS_APPCTX 0x04000000
|
|
||||||
|
|
||||||
// command line names, must be in exact same order as the SHOW_AS_* flags above
|
// command line names, must be in exact same order as the SHOW_AS_* flags above
|
||||||
// so that show_as_words[i] matches flag 1U<<i.
|
// so that show_as_words[i] matches flag 1U<<i.
|
||||||
const char *show_as_words[] = { "ana", "chn", "conn", "sc", "stet", "strm", "task", "txn", "sd", "hsl", "htx", "hmsg", "fd", "h2c", "h2s", "h1c", "h1s", "fconn", "fstrm",
|
const char *show_as_words[] = { "ana", "chn", "conn", "sc", "stet", "strm", "task", "txn", "sd", "hsl", "htx", "hmsg", "fd", "h2c", "h2s", "h1c", "h1s", "fconn", "fstrm"};
|
||||||
"peers", "peer", "qc", "spopc", "spops", "qcc", "qcs", "appctx"};
|
|
||||||
|
|
||||||
/* will be sufficient for even largest flag names */
|
/* will be sufficient for even largest flag names */
|
||||||
static char buf[4096];
|
static char buf[4096];
|
||||||
@ -166,14 +152,6 @@ int main(int argc, char **argv)
|
|||||||
if (show_as & SHOW_AS_H1S) printf("h1s->flags = %s\n", (h1s_show_flags (buf, bsz, " | ", flags), buf));
|
if (show_as & SHOW_AS_H1S) printf("h1s->flags = %s\n", (h1s_show_flags (buf, bsz, " | ", flags), buf));
|
||||||
if (show_as & SHOW_AS_FCONN) printf("fconn->flags = %s\n",(fconn_show_flags (buf, bsz, " | ", flags), buf));
|
if (show_as & SHOW_AS_FCONN) printf("fconn->flags = %s\n",(fconn_show_flags (buf, bsz, " | ", flags), buf));
|
||||||
if (show_as & SHOW_AS_FSTRM) printf("fstrm->flags = %s\n",(fstrm_show_flags (buf, bsz, " | ", flags), buf));
|
if (show_as & SHOW_AS_FSTRM) printf("fstrm->flags = %s\n",(fstrm_show_flags (buf, bsz, " | ", flags), buf));
|
||||||
if (show_as & SHOW_AS_PEERS) printf("peers->flags = %s\n",(peers_show_flags (buf, bsz, " | ", flags), buf));
|
|
||||||
if (show_as & SHOW_AS_PEER) printf("peer->flags = %s\n", (peer_show_flags (buf, bsz, " | ", flags), buf));
|
|
||||||
if (show_as & SHOW_AS_QC) printf("qc->flags = %s\n", (qc_show_flags (buf, bsz, " | ", flags), buf));
|
|
||||||
if (show_as & SHOW_AS_SPOPC) printf("spopc->flags = %s\n",(spop_conn_show_flags(buf, bsz, " | ", flags), buf));
|
|
||||||
if (show_as & SHOW_AS_SPOPS) printf("spops->flags = %s\n",(spop_strm_show_flags(buf, bsz, " | ", flags), buf));
|
|
||||||
if (show_as & SHOW_AS_QCC) printf("qcc->flags = %s\n", (qcc_show_flags (buf, bsz, " | ", flags), buf));
|
|
||||||
if (show_as & SHOW_AS_QCS) printf("qcs->flags = %s\n", (qcs_show_flags (buf, bsz, " | ", flags), buf));
|
|
||||||
if (show_as & SHOW_AS_APPCTX) printf("appctx->flags = %s\n", (appctx_show_flags(buf, bsz, " | ", flags), buf));
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,2 +1,2 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
grep -o 'cflg=[0-9a-fx]*' | sort | uniq -c | sort -nr | while read a b; do c=${b##*=}; d=$(${0%/*}/flags conn $c);d=${d##*= }; printf "%6d %s %s\n" $a "$b" "$d";done
|
awk '{print $12}' | grep cflg= | sort | uniq -c | sort -nr | while read a b; do c=${b##*=}; d=$(${0%/*}/flags conn $c);d=${d##*= }; printf "%6d %s %s\n" $a "$b" "$d";done
|
||||||
|
|||||||
@ -195,7 +195,7 @@ while read -r; do
|
|||||||
! [[ "$REPLY" =~ [[:blank:]]h2c.*\.flg=([0-9a-fx]*) ]] || append_flag b.h2c.flg h2c "${BASH_REMATCH[1]}"
|
! [[ "$REPLY" =~ [[:blank:]]h2c.*\.flg=([0-9a-fx]*) ]] || append_flag b.h2c.flg h2c "${BASH_REMATCH[1]}"
|
||||||
elif [ $ctx = cob ]; then
|
elif [ $ctx = cob ]; then
|
||||||
! [[ "$REPLY" =~ [[:blank:]]flags=([0-9a-fx]*) ]] || append_flag b.co.flg conn "${BASH_REMATCH[1]}"
|
! [[ "$REPLY" =~ [[:blank:]]flags=([0-9a-fx]*) ]] || append_flag b.co.flg conn "${BASH_REMATCH[1]}"
|
||||||
! [[ "$REPLY" =~ [[:blank:]]fd.state=([0-9a-fx]*) ]] || append_flag b.co.fd.st fd 0x"${BASH_REMATCH[1]}"
|
! [[ "$REPLY" =~ [[:blank:]]fd.state=([0-9a-fx]*) ]] || append_flag b.co.fd.st fd "${BASH_REMATCH[1]}"
|
||||||
elif [ $ctx = res ]; then
|
elif [ $ctx = res ]; then
|
||||||
! [[ "$REPLY" =~ [[:blank:]]\(f=([0-9a-fx]*) ]] || append_flag res.flg chn "${BASH_REMATCH[1]}"
|
! [[ "$REPLY" =~ [[:blank:]]\(f=([0-9a-fx]*) ]] || append_flag res.flg chn "${BASH_REMATCH[1]}"
|
||||||
! [[ "$REPLY" =~ [[:blank:]]an=([0-9a-fx]*) ]] || append_flag res.ana ana "${BASH_REMATCH[1]}"
|
! [[ "$REPLY" =~ [[:blank:]]an=([0-9a-fx]*) ]] || append_flag res.ana ana "${BASH_REMATCH[1]}"
|
||||||
|
|||||||
@ -1,118 +0,0 @@
|
|||||||
# sets $tag and $node from $arg0, for internal use only
|
|
||||||
define _ebtree_set_tag_node
|
|
||||||
set $tag = (unsigned long)$arg0 & 0x1
|
|
||||||
set $node = (unsigned long)$arg0 & 0xfffffffffffffffe
|
|
||||||
set $node = (struct eb_node *)$node
|
|
||||||
end
|
|
||||||
|
|
||||||
# get root from any node (leaf of node), returns in $node
|
|
||||||
define ebtree_root
|
|
||||||
set $node = (struct eb_root *)$arg0->node_p
|
|
||||||
if $node == 0
|
|
||||||
# sole node
|
|
||||||
set $node = (struct eb_root *)$arg0->leaf_p
|
|
||||||
end
|
|
||||||
# walk up
|
|
||||||
while 1
|
|
||||||
_ebtree_set_tag_node $node
|
|
||||||
if $node->branches.b[1] == 0
|
|
||||||
break
|
|
||||||
end
|
|
||||||
set $node = $node->node_p
|
|
||||||
end
|
|
||||||
# root returned in $node
|
|
||||||
end
|
|
||||||
|
|
||||||
# returns $node filled with the first node of ebroot $arg0
|
|
||||||
define ebtree_first
|
|
||||||
# browse ebtree left until encountering leaf
|
|
||||||
set $node = (struct eb_node *)$arg0->b[0]
|
|
||||||
while 1
|
|
||||||
_ebtree_set_tag_node $node
|
|
||||||
if $tag == 0
|
|
||||||
loop_break
|
|
||||||
end
|
|
||||||
set $node = (struct eb_root *)$node->branches.b[0]
|
|
||||||
end
|
|
||||||
# extract last node
|
|
||||||
_ebtree_set_tag_node $node
|
|
||||||
end
|
|
||||||
|
|
||||||
# finds next ebtree node after $arg0, and returns it in $node
|
|
||||||
define ebtree_next
|
|
||||||
# get parent
|
|
||||||
set $node = (struct eb_root *)$arg0->leaf_p
|
|
||||||
# Walking up from right branch, so we cannot be below root
|
|
||||||
# while (eb_gettag(t) != EB_LEFT) // #define EB_LEFT 0
|
|
||||||
while 1
|
|
||||||
_ebtree_set_tag_node $node
|
|
||||||
if $tag == 0
|
|
||||||
loop_break
|
|
||||||
end
|
|
||||||
set $node = (struct eb_root *)$node->node_p
|
|
||||||
end
|
|
||||||
set $node = (struct eb_root *)$node->branches.b[1]
|
|
||||||
# walk down (left side => 0)
|
|
||||||
# while (eb_gettag(start) == EB_NODE) // #define EB_NODE 1
|
|
||||||
while 1
|
|
||||||
_ebtree_set_tag_node $node
|
|
||||||
if $node == 0
|
|
||||||
loop_break
|
|
||||||
end
|
|
||||||
if $tag != 1
|
|
||||||
loop_break
|
|
||||||
end
|
|
||||||
set $node = (struct eb_root *)$node->branches.b[0]
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
|
|
||||||
# sets $tag and $node from $arg0, for internal use only
|
|
||||||
define _ebsctree_set_tag_node
|
|
||||||
set $tag = (unsigned long)$arg0 & 0x1
|
|
||||||
set $node = (unsigned long)$arg0 & 0xfffffffffffffffe
|
|
||||||
set $node = (struct eb32sc_node *)$node
|
|
||||||
end
|
|
||||||
|
|
||||||
# returns $node filled with the first node of ebroot $arg0
|
|
||||||
define ebsctree_first
|
|
||||||
# browse ebsctree left until encountering leaf
|
|
||||||
set $node = (struct eb32sc_node *)$arg0->b[0]
|
|
||||||
while 1
|
|
||||||
_ebsctree_set_tag_node $node
|
|
||||||
if $tag == 0
|
|
||||||
loop_break
|
|
||||||
end
|
|
||||||
set $node = (struct eb_root *)$node->branches.b[0]
|
|
||||||
end
|
|
||||||
# extract last node
|
|
||||||
_ebsctree_set_tag_node $node
|
|
||||||
end
|
|
||||||
|
|
||||||
# finds next ebtree node after $arg0, and returns it in $node
|
|
||||||
define ebsctree_next
|
|
||||||
# get parent
|
|
||||||
set $node = (struct eb_root *)$arg0->node.leaf_p
|
|
||||||
# Walking up from right branch, so we cannot be below root
|
|
||||||
# while (eb_gettag(t) != EB_LEFT) // #define EB_LEFT 0
|
|
||||||
while 1
|
|
||||||
_ebsctree_set_tag_node $node
|
|
||||||
if $tag == 0
|
|
||||||
loop_break
|
|
||||||
end
|
|
||||||
set $node = (struct eb_root *)$node->node.node_p
|
|
||||||
end
|
|
||||||
set $node = (struct eb_root *)$node->node.branches.b[1]
|
|
||||||
# walk down (left side => 0)
|
|
||||||
# while (eb_gettag(start) == EB_NODE) // #define EB_NODE 1
|
|
||||||
while 1
|
|
||||||
_ebsctree_set_tag_node $node
|
|
||||||
if $node == 0
|
|
||||||
loop_break
|
|
||||||
end
|
|
||||||
if $tag != 1
|
|
||||||
loop_break
|
|
||||||
end
|
|
||||||
set $node = (struct eb_root *)$node->node.branches.b[0]
|
|
||||||
end
|
|
||||||
end
|
|
||||||
@ -1,26 +0,0 @@
|
|||||||
# lists entries starting at list head $arg0
|
|
||||||
define list_dump
|
|
||||||
set $h = $arg0
|
|
||||||
set $p = *(void **)$h
|
|
||||||
while ($p != $h)
|
|
||||||
printf "%#lx\n", $p
|
|
||||||
if ($p == 0)
|
|
||||||
loop_break
|
|
||||||
end
|
|
||||||
set $p = *(void **)$p
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
# list all entries starting at list head $arg0 until meeting $arg1
|
|
||||||
define list_find
|
|
||||||
set $h = $arg0
|
|
||||||
set $k = $arg1
|
|
||||||
set $p = *(void **)$h
|
|
||||||
while ($p != $h)
|
|
||||||
printf "%#lx\n", $p
|
|
||||||
if ($p == 0 || $p == $k)
|
|
||||||
loop_break
|
|
||||||
end
|
|
||||||
set $p = *(void **)$p
|
|
||||||
end
|
|
||||||
end
|
|
||||||
@ -1,19 +0,0 @@
|
|||||||
# show non-null memprofile entries with method, alloc/free counts/tot and caller
|
|
||||||
|
|
||||||
define memprof_dump
|
|
||||||
set $i = 0
|
|
||||||
set $meth={ "UNKN", "MALL", "CALL", "REAL", "STRD", "FREE", "P_AL", "P_FR", "STND", "VALL", "ALAL", "PALG", "MALG", "PVAL" }
|
|
||||||
while $i < sizeof(memprof_stats) / sizeof(memprof_stats[0])
|
|
||||||
if memprof_stats[$i].alloc_calls || memprof_stats[$i].free_calls
|
|
||||||
set $m = memprof_stats[$i].method
|
|
||||||
printf "m:%s ac:%u fc:%u at:%u ft:%u ", $meth[$m], \
|
|
||||||
memprof_stats[$i].alloc_calls, memprof_stats[$i].free_calls, \
|
|
||||||
memprof_stats[$i].alloc_tot, memprof_stats[$i].free_tot
|
|
||||||
output/a memprof_stats[$i].caller
|
|
||||||
printf "\n"
|
|
||||||
end
|
|
||||||
set $i = $i + 1
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
|
|
||||||
@ -1,21 +0,0 @@
|
|||||||
# dump pool contents (2.9 and above, with buckets)
|
|
||||||
define pools_dump
|
|
||||||
set $h = $po
|
|
||||||
set $p = *(void **)$h
|
|
||||||
while ($p != $h)
|
|
||||||
set $e = (struct pool_head *)(((char *)$p) - (unsigned long)&((struct pool_head *)0)->list)
|
|
||||||
|
|
||||||
set $total = 0
|
|
||||||
set $used = 0
|
|
||||||
set $idx = 0
|
|
||||||
while $idx < sizeof($e->buckets) / sizeof($e->buckets[0])
|
|
||||||
set $total=$total + $e->buckets[$idx].allocated
|
|
||||||
set $used=$used + $e->buckets[$idx].used
|
|
||||||
set $idx=$idx + 1
|
|
||||||
end
|
|
||||||
|
|
||||||
set $mem = $total * $e->size
|
|
||||||
printf "list=%#lx pool_head=%p name=%s size=%u alloc=%u used=%u mem=%u\n", $p, $e, $e->name, $e->size, $total, $used, $mem
|
|
||||||
set $p = *(void **)$p
|
|
||||||
end
|
|
||||||
end
|
|
||||||
@ -1,47 +0,0 @@
|
|||||||
# This script will set the post_mortem struct pointer ($pm) from the one found
|
|
||||||
# in the "post_mortem" symbol. If not found or if not correct, it's the same
|
|
||||||
# address as the "_post_mortem" section, which can be found using "info files"
|
|
||||||
# or "objdump -h" on the executable. The guessed value is the by a first call
|
|
||||||
# to pm_init, but if not correct, you just need to call pm_init again with the
|
|
||||||
# correct pointer, e.g:
|
|
||||||
# pm_init 0xcfd400
|
|
||||||
|
|
||||||
define pm_init
|
|
||||||
set $pm = (struct post_mortem*)$arg0
|
|
||||||
set $g = $pm.global
|
|
||||||
set $ti = $pm.thread_info
|
|
||||||
set $tc = $pm.thread_ctx
|
|
||||||
set $tgi = $pm.tgroup_info
|
|
||||||
set $tgc = $pm.tgroup_ctx
|
|
||||||
set $fd = $pm.fdtab
|
|
||||||
set $pxh = *$pm.proxies
|
|
||||||
set $po = $pm.pools
|
|
||||||
set $ac = $pm.activity
|
|
||||||
end
|
|
||||||
|
|
||||||
# show basic info on the running process (OS, uid, etc)
|
|
||||||
define pm_show_info
|
|
||||||
print $pm->platform
|
|
||||||
print $pm->process
|
|
||||||
end
|
|
||||||
|
|
||||||
# show thread IDs to easily map between gdb threads and tid
|
|
||||||
define pm_show_threads
|
|
||||||
set $t = 0
|
|
||||||
while $t < $g.nbthread
|
|
||||||
printf "Tid %4d: pthread_id=%#lx stack_top=%#lx\n", $t, $ti[$t].pth_id, $ti[$t].stack_top
|
|
||||||
set $t = $t + 1
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
# dump all threads' dump buffers
|
|
||||||
define pm_show_thread_dump
|
|
||||||
set $t = 0
|
|
||||||
while $t < $g.nbthread
|
|
||||||
printf "%s\n", $tc[$t].thread_dump_buffer->area
|
|
||||||
set $t = $t + 1
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
# initialize the various pointers
|
|
||||||
pm_init &post_mortem
|
|
||||||
@ -1,25 +0,0 @@
|
|||||||
# list proxies starting with the one in argument (typically $pxh)
|
|
||||||
define px_list
|
|
||||||
set $p = (struct proxy *)$arg0
|
|
||||||
while ($p != 0)
|
|
||||||
printf "%p (", $p
|
|
||||||
if $p->cap & 0x10
|
|
||||||
printf "LB,"
|
|
||||||
end
|
|
||||||
if $p->cap & 0x1
|
|
||||||
printf "FE,"
|
|
||||||
end
|
|
||||||
if $p->cap & 0x2
|
|
||||||
printf "BE,"
|
|
||||||
end
|
|
||||||
printf "%s)", $p->id
|
|
||||||
if $p->cap & 0x1
|
|
||||||
printf " feconn=%u cmax=%u cum_conn=%llu cpsmax=%u", $p->feconn, $p->fe_counters.conn_max, $p->fe_counters.cum_conn, $p->fe_counters.cps_max
|
|
||||||
end
|
|
||||||
if $p->cap & 0x2
|
|
||||||
printf " beconn=%u served=%u queued=%u qmax=%u cum_sess=%llu wact=%u", $p->beconn, $p->served, $p->queue.length, $p->be_counters.nbpend_max, $p->be_counters.cum_sess, $p->lbprm.tot_wact
|
|
||||||
end
|
|
||||||
printf "\n"
|
|
||||||
set $p = ($p)->next
|
|
||||||
end
|
|
||||||
end
|
|
||||||
@ -1,9 +0,0 @@
|
|||||||
# list servers in a proxy whose pointer is passed in argument
|
|
||||||
define px_list_srv
|
|
||||||
set $h = (struct proxy *)$arg0
|
|
||||||
set $p = ($h)->srv
|
|
||||||
while ($p != 0)
|
|
||||||
printf "%#lx %s maxconn=%u cur_sess=%u max_sess=%u served=%u queued=%u st=%u->%u ew=%u sps_max=%u\n", $p, $p->id, $p->maxconn, $p->cur_sess, $p->counters.cur_sess_max, $p->served, $p->queue.length, $p->cur_state, $p->next_state, $p->cur_eweight, $p->counters.sps_max
|
|
||||||
set $p = ($p)->next
|
|
||||||
end
|
|
||||||
end
|
|
||||||
@ -1,18 +0,0 @@
|
|||||||
# list all streams for all threads
|
|
||||||
define stream_dump
|
|
||||||
set $t = 0
|
|
||||||
while $t < $g.nbthread
|
|
||||||
set $h = &$tc[$t].streams
|
|
||||||
printf "Tid %4d: &streams=%p\n", $t, $h
|
|
||||||
set $p = *(void **)$h
|
|
||||||
while ($p != $h)
|
|
||||||
set $s = (struct stream *)(((char *)$p) - (unsigned long)&((struct stream *)0)->list)
|
|
||||||
printf " &list=%#lx strm=%p uid=%u strm.fe=%s strm.flg=%#x strm.list={n=%p,p=%p}\n", $p, $s, $s->uniq_id, $s->sess->fe->id, $s->flags, $s->list.n, $s->list.p
|
|
||||||
if ($p == 0)
|
|
||||||
loop_break
|
|
||||||
end
|
|
||||||
set $p = *(void **)$p
|
|
||||||
end
|
|
||||||
set $t = $t + 1
|
|
||||||
end
|
|
||||||
end
|
|
||||||
@ -1,247 +0,0 @@
|
|||||||
-- This is an HTTP/2 tracer for a TCP proxy. It will decode the frames that are
|
|
||||||
-- exchanged between the client and the server and indicate their direction,
|
|
||||||
-- types, flags and lengths. Lines are prefixed with a connection number modulo
|
|
||||||
-- 4096 that allows to sort out multiplexed exchanges. In order to use this,
|
|
||||||
-- simply load this file in the global section and use it from a TCP proxy:
|
|
||||||
--
|
|
||||||
-- global
|
|
||||||
-- lua-load "dev/h2/h2-tracer.lua"
|
|
||||||
--
|
|
||||||
-- listen h2_sniffer
|
|
||||||
-- mode tcp
|
|
||||||
-- bind :8002
|
|
||||||
-- filter lua.h2-tracer #hex
|
|
||||||
-- server s1 127.0.0.1:8003
|
|
||||||
--
|
|
||||||
|
|
||||||
-- define the decoder's class here
|
|
||||||
Dec = {}
|
|
||||||
Dec.id = "Lua H2 tracer"
|
|
||||||
Dec.flags = 0
|
|
||||||
Dec.__index = Dec
|
|
||||||
Dec.args = {} -- args passed by the filter's declaration
|
|
||||||
Dec.cid = 0 -- next connection ID
|
|
||||||
|
|
||||||
-- prefix to indent responses
|
|
||||||
res_pfx = " | "
|
|
||||||
|
|
||||||
-- H2 frame types
|
|
||||||
h2ft = {
|
|
||||||
[0] = "DATA",
|
|
||||||
[1] = "HEADERS",
|
|
||||||
[2] = "PRIORITY",
|
|
||||||
[3] = "RST_STREAM",
|
|
||||||
[4] = "SETTINGS",
|
|
||||||
[5] = "PUSH_PROMISE",
|
|
||||||
[6] = "PING",
|
|
||||||
[7] = "GOAWAY",
|
|
||||||
[8] = "WINDOW_UPDATE",
|
|
||||||
[9] = "CONTINUATION",
|
|
||||||
}
|
|
||||||
|
|
||||||
h2ff = {
|
|
||||||
[0] = { [0] = "ES", [3] = "PADDED" }, -- data
|
|
||||||
[1] = { [0] = "ES", [2] = "EH", [3] = "PADDED", [5] = "PRIORITY" }, -- headers
|
|
||||||
[2] = { }, -- priority
|
|
||||||
[3] = { }, -- rst_stream
|
|
||||||
[4] = { [0] = "ACK" }, -- settings
|
|
||||||
[5] = { [2] = "EH", [3] = "PADDED" }, -- push_promise
|
|
||||||
[6] = { [0] = "ACK" }, -- ping
|
|
||||||
[7] = { }, -- goaway
|
|
||||||
[8] = { }, -- window_update
|
|
||||||
[9] = { [2] = "EH" }, -- continuation
|
|
||||||
}
|
|
||||||
|
|
||||||
function Dec:new()
|
|
||||||
local dec = {}
|
|
||||||
|
|
||||||
setmetatable(dec, Dec)
|
|
||||||
dec.do_hex = false
|
|
||||||
if (Dec.args[1] == "hex") then
|
|
||||||
dec.do_hex = true
|
|
||||||
end
|
|
||||||
|
|
||||||
Dec.cid = Dec.cid+1
|
|
||||||
-- mix the thread number when multithreading.
|
|
||||||
dec.cid = Dec.cid + 64 * core.thread
|
|
||||||
|
|
||||||
-- state per dir. [1]=req [2]=res
|
|
||||||
dec.st = {
|
|
||||||
[1] = {
|
|
||||||
hdr = { 0, 0, 0, 0, 0, 0, 0, 0, 0 },
|
|
||||||
fofs = 0,
|
|
||||||
flen = 0,
|
|
||||||
ftyp = 0,
|
|
||||||
fflg = 0,
|
|
||||||
sid = 0,
|
|
||||||
tot = 0,
|
|
||||||
},
|
|
||||||
[2] = {
|
|
||||||
hdr = { 0, 0, 0, 0, 0, 0, 0, 0, 0 },
|
|
||||||
fofs = 0,
|
|
||||||
flen = 0,
|
|
||||||
ftyp = 0,
|
|
||||||
fflg = 0,
|
|
||||||
sid = 0,
|
|
||||||
tot = 0,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return dec
|
|
||||||
end
|
|
||||||
|
|
||||||
function Dec:start_analyze(txn, chn)
|
|
||||||
if chn:is_resp() then
|
|
||||||
io.write(string.format("[%03x] ", self.cid % 4096) .. res_pfx .. "### res start\n")
|
|
||||||
else
|
|
||||||
io.write(string.format("[%03x] ", self.cid % 4096) .. "### req start\n")
|
|
||||||
end
|
|
||||||
filter.register_data_filter(self, chn)
|
|
||||||
end
|
|
||||||
|
|
||||||
function Dec:end_analyze(txn, chn)
|
|
||||||
if chn:is_resp() then
|
|
||||||
io.write(string.format("[%03x] ", self.cid % 4096) .. res_pfx .. "### res end: " .. self.st[2].tot .. " bytes total\n")
|
|
||||||
else
|
|
||||||
io.write(string.format("[%03x] ", self.cid % 4096) .. "### req end: " ..self.st[1].tot.. " bytes total\n")
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function Dec:tcp_payload(txn, chn)
|
|
||||||
local data = { }
|
|
||||||
local dofs = 1
|
|
||||||
local pfx = ""
|
|
||||||
local dir = 1
|
|
||||||
local sofs = 0
|
|
||||||
local ft = ""
|
|
||||||
local ff = ""
|
|
||||||
|
|
||||||
if chn:is_resp() then
|
|
||||||
pfx = res_pfx
|
|
||||||
dir = 2
|
|
||||||
end
|
|
||||||
|
|
||||||
pfx = string.format("[%03x] ", self.cid % 4096) .. pfx
|
|
||||||
|
|
||||||
-- stream offset before processing
|
|
||||||
sofs = self.st[dir].tot
|
|
||||||
|
|
||||||
if (chn:input() > 0) then
|
|
||||||
data = chn:data()
|
|
||||||
self.st[dir].tot = self.st[dir].tot + chn:input()
|
|
||||||
end
|
|
||||||
|
|
||||||
if (chn:input() > 0 and self.do_hex ~= false) then
|
|
||||||
io.write("\n" .. pfx .. "Hex:\n")
|
|
||||||
for i = 1, #data do
|
|
||||||
if ((i & 7) == 1) then io.write(pfx) end
|
|
||||||
io.write(string.format("0x%02x ", data:sub(i, i):byte()))
|
|
||||||
if ((i & 7) == 0 or i == #data) then io.write("\n") end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
-- start at byte 1 in the <data> string
|
|
||||||
dofs = 1
|
|
||||||
|
|
||||||
-- the first 24 bytes are expected to be an H2 preface on the request
|
|
||||||
if (dir == 1 and sofs < 24) then
|
|
||||||
-- let's not check it for now
|
|
||||||
local bytes = self.st[dir].tot - sofs
|
|
||||||
if (sofs + self.st[dir].tot >= 24) then
|
|
||||||
-- skip what was missing from the preface
|
|
||||||
dofs = dofs + 24 - sofs
|
|
||||||
sofs = 24
|
|
||||||
io.write(pfx .. "[PREFACE len=24]\n")
|
|
||||||
else
|
|
||||||
-- consume more preface bytes
|
|
||||||
sofs = sofs + self.st[dir].tot
|
|
||||||
return
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
-- parse contents as long as there are pending data
|
|
||||||
|
|
||||||
while true do
|
|
||||||
-- check if we need to consume data from the current frame
|
|
||||||
-- flen is the number of bytes left before the frame's end.
|
|
||||||
if (self.st[dir].flen > 0) then
|
|
||||||
if dofs > #data then return end -- missing data
|
|
||||||
if (#data - dofs + 1 < self.st[dir].flen) then
|
|
||||||
-- insufficient data
|
|
||||||
self.st[dir].flen = self.st[dir].flen - (#data - dofs + 1)
|
|
||||||
io.write(pfx .. string.format("%32s\n", "... -" .. (#data - dofs + 1) .. " = " .. self.st[dir].flen))
|
|
||||||
dofs = #data + 1
|
|
||||||
return
|
|
||||||
else
|
|
||||||
-- enough data to finish
|
|
||||||
if (dofs == 1) then
|
|
||||||
-- only print a partial size if the frame was interrupted
|
|
||||||
io.write(pfx .. string.format("%32s\n", "... -" .. self.st[dir].flen .. " = 0"))
|
|
||||||
end
|
|
||||||
dofs = dofs + self.st[dir].flen
|
|
||||||
self.st[dir].flen = 0
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
-- here, flen = 0, we're at the beginning of a new frame --
|
|
||||||
|
|
||||||
-- read possibly missing header bytes until dec.fofs == 9
|
|
||||||
while self.st[dir].fofs < 9 do
|
|
||||||
if dofs > #data then return end -- missing data
|
|
||||||
self.st[dir].hdr[self.st[dir].fofs + 1] = data:sub(dofs, dofs):byte()
|
|
||||||
dofs = dofs + 1
|
|
||||||
self.st[dir].fofs = self.st[dir].fofs + 1
|
|
||||||
end
|
|
||||||
|
|
||||||
-- we have a full frame header here
|
|
||||||
if (self.do_hex ~= false) then
|
|
||||||
io.write("\n" .. pfx .. string.format("hdr=%02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
|
|
||||||
self.st[dir].hdr[1], self.st[dir].hdr[2], self.st[dir].hdr[3],
|
|
||||||
self.st[dir].hdr[4], self.st[dir].hdr[5], self.st[dir].hdr[6],
|
|
||||||
self.st[dir].hdr[7], self.st[dir].hdr[8], self.st[dir].hdr[9]))
|
|
||||||
end
|
|
||||||
|
|
||||||
-- we have a full frame header, we'll be ready
|
|
||||||
-- for a new frame once the data is gone
|
|
||||||
self.st[dir].flen = self.st[dir].hdr[1] * 65536 +
|
|
||||||
self.st[dir].hdr[2] * 256 +
|
|
||||||
self.st[dir].hdr[3]
|
|
||||||
self.st[dir].ftyp = self.st[dir].hdr[4]
|
|
||||||
self.st[dir].fflg = self.st[dir].hdr[5]
|
|
||||||
self.st[dir].sid = self.st[dir].hdr[6] * 16777216 +
|
|
||||||
self.st[dir].hdr[7] * 65536 +
|
|
||||||
self.st[dir].hdr[8] * 256 +
|
|
||||||
self.st[dir].hdr[9]
|
|
||||||
self.st[dir].fofs = 0
|
|
||||||
|
|
||||||
-- decode frame type
|
|
||||||
if self.st[dir].ftyp <= 9 then
|
|
||||||
ft = h2ft[self.st[dir].ftyp]
|
|
||||||
else
|
|
||||||
ft = string.format("TYPE_0x%02x\n", self.st[dir].ftyp)
|
|
||||||
end
|
|
||||||
|
|
||||||
-- decode frame flags for frame type <ftyp>
|
|
||||||
ff = ""
|
|
||||||
for i = 7, 0, -1 do
|
|
||||||
if (((self.st[dir].fflg >> i) & 1) ~= 0) then
|
|
||||||
if self.st[dir].ftyp <= 9 and h2ff[self.st[dir].ftyp][i] ~= nil then
|
|
||||||
ff = ff .. ((ff == "") and "" or "+")
|
|
||||||
ff = ff .. h2ff[self.st[dir].ftyp][i]
|
|
||||||
else
|
|
||||||
ff = ff .. ((ff == "") and "" or "+")
|
|
||||||
ff = ff .. string.format("0x%02x", 1<<i)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
io.write(pfx .. string.format("[%s %ssid=%u len=%u (bytes=%u)]\n",
|
|
||||||
ft, (ff == "") and "" or ff .. " ",
|
|
||||||
self.st[dir].sid, self.st[dir].flen,
|
|
||||||
(#data - dofs + 1)))
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
core.register_filter("h2-tracer", Dec, function(dec, args)
|
|
||||||
Dec.args = args
|
|
||||||
return dec
|
|
||||||
end)
|
|
||||||
@ -4,13 +4,9 @@
|
|||||||
# All fields are optional. 0 assumed when absent.
|
# All fields are optional. 0 assumed when absent.
|
||||||
|
|
||||||
USAGE=\
|
USAGE=\
|
||||||
"Usage: %s [-l <len> ] [-t <type>] [-f <flags>[,...]] [-i <sid>] [ -d <data> ]
|
"Usage: %s [-l <len> ] [-t <type>] [-f <flags>] [-i <sid>] [ -d <data> ] > hdr.bin
|
||||||
[ -e <name> <value> ]* [ -r|-R raw ] [ -h | --help ] > hdr.bin
|
|
||||||
Numbers are decimal or 0xhex. Not set=0. If <data> is passed, it points
|
Numbers are decimal or 0xhex. Not set=0. If <data> is passed, it points
|
||||||
to a file that is read and chunked into frames of <len> bytes. -e
|
to a file that is read and chunked into frames of <len> bytes.
|
||||||
encodes a headers frame (by default) with all headers at once encoded
|
|
||||||
in literal. Use type 'p' for the preface. Use -r to pass raw data or
|
|
||||||
-R to pass raw hex codes (hex digit pairs, blanks ignored).
|
|
||||||
|
|
||||||
Supported symbolic types (case insensitive prefix match):
|
Supported symbolic types (case insensitive prefix match):
|
||||||
DATA (0x00) PUSH_PROMISE (0x05)
|
DATA (0x00) PUSH_PROMISE (0x05)
|
||||||
@ -29,8 +25,6 @@ LEN=
|
|||||||
TYPE=
|
TYPE=
|
||||||
FLAGS=
|
FLAGS=
|
||||||
ID=
|
ID=
|
||||||
RAW=
|
|
||||||
HDR=( )
|
|
||||||
|
|
||||||
die() {
|
die() {
|
||||||
[ "$#" -eq 0 ] || echo "$*" >&2
|
[ "$#" -eq 0 ] || echo "$*" >&2
|
||||||
@ -54,7 +48,7 @@ mkframe() {
|
|||||||
local T="${2:-0}"
|
local T="${2:-0}"
|
||||||
local F="${3:-0}"
|
local F="${3:-0}"
|
||||||
local I="${4:-0}"
|
local I="${4:-0}"
|
||||||
local t f f2 f3
|
local t f
|
||||||
|
|
||||||
# get the first match in this order
|
# get the first match in this order
|
||||||
for t in DATA:0x00 HEADERS:0x01 RST_STREAM:0x03 SETTINGS:0x04 PING:0x06 \
|
for t in DATA:0x00 HEADERS:0x01 RST_STREAM:0x03 SETTINGS:0x04 PING:0x06 \
|
||||||
@ -72,37 +66,17 @@ mkframe() {
|
|||||||
die
|
die
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# get the first match in this order, for each entry delimited by ','.
|
# get the first match in this order
|
||||||
# E.g.: "-f ES,EH"
|
for f in ES:0x01 EH:0x04 PAD:0x08 PRIO:0x20; do
|
||||||
f2=${F^^*}; F=0
|
if [ -z "${f##${F^^*}*}" ]; then
|
||||||
|
F="${f##*:}"
|
||||||
while [ -n "$f2" ]; do
|
|
||||||
f3="${f2%%,*}"
|
|
||||||
tmp=""
|
|
||||||
for f in ES:0x01 EH:0x04 PAD:0x08 PRIO:0x20; do
|
|
||||||
if [ -n "$f3" -a -z "${f##${f3}*}" ]; then
|
|
||||||
tmp="${f#*:}"
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ -n "$tmp" ]; then
|
|
||||||
F=$(( F | tmp ))
|
|
||||||
f2="${f2#$f3}"
|
|
||||||
f2="${f2#,}"
|
|
||||||
elif [ -z "${f3##[X0-9A-F]*}" ]; then
|
|
||||||
F=$(( F | f3 ))
|
|
||||||
f2="${f2#$f3}"
|
|
||||||
f2="${f2#,}"
|
|
||||||
else
|
|
||||||
echo "Unknown flag(s) '$f3'" >&2
|
|
||||||
usage "${0##*}"
|
|
||||||
die
|
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ -n "$f2" ]; then
|
if [ -n "${F##[0-9]*}" ]; then
|
||||||
F="${f2} | ${F}"
|
echo "Unknown type '$T'" >&2
|
||||||
|
usage "${0##*}"
|
||||||
|
die
|
||||||
fi
|
fi
|
||||||
|
|
||||||
L=$(( L )); T=$(( T )); F=$(( F )); I=$(( I ))
|
L=$(( L )); T=$(( T )); F=$(( F )); I=$(( I ))
|
||||||
@ -136,9 +110,6 @@ while [ -n "$1" -a -z "${1##-*}" ]; do
|
|||||||
-f) FLAGS="$2" ; shift 2 ;;
|
-f) FLAGS="$2" ; shift 2 ;;
|
||||||
-i) ID="$2" ; shift 2 ;;
|
-i) ID="$2" ; shift 2 ;;
|
||||||
-d) DATA="$2" ; shift 2 ;;
|
-d) DATA="$2" ; shift 2 ;;
|
||||||
-r) RAW="$2" ; shift 2 ;;
|
|
||||||
-R) RAW="$(printf $(echo -n "${2// /}" | sed -e 's/\([^ ][^ ]\)/\\\\x\1/g'))" ; shift 2 ;;
|
|
||||||
-e) TYPE=1; HDR[${#HDR[@]}]="$2=$3"; shift 3 ;;
|
|
||||||
-h|--help) usage "${0##*}"; quit;;
|
-h|--help) usage "${0##*}"; quit;;
|
||||||
*) usage "${0##*}"; die ;;
|
*) usage "${0##*}"; die ;;
|
||||||
esac
|
esac
|
||||||
@ -164,35 +135,8 @@ if [ -n "${ID##[0-9]*}" ]; then
|
|||||||
die
|
die
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$TYPE" = "p" ]; then
|
if [ -z "$DATA" ]; then
|
||||||
printf "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
|
|
||||||
elif [ -z "$DATA" ]; then
|
|
||||||
# If we're trying to emit literal headers, let's pre-build the raw data
|
|
||||||
# and measure their total length.
|
|
||||||
if [ ${#HDR[@]} -gt 0 ]; then
|
|
||||||
# limited to 127 bytes for name and value
|
|
||||||
for h in "${HDR[@]}"; do
|
|
||||||
n=${h%%=*}
|
|
||||||
v=${h#*=}
|
|
||||||
nl=${#n}
|
|
||||||
vl=${#v}
|
|
||||||
nl7=$(printf "%02x" $((nl & 127)))
|
|
||||||
vl7=$(printf "%02x" $((vl & 127)))
|
|
||||||
RAW="${RAW}\x40\x${nl7}${n}\x${vl7}${v}"
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
# compute length if RAW set
|
|
||||||
if [ -n "$RAW" ]; then
|
|
||||||
LEN=$(printf "${RAW}" | wc -c)
|
|
||||||
fi
|
|
||||||
|
|
||||||
mkframe "$LEN" "$TYPE" "$FLAGS" "$ID"
|
mkframe "$LEN" "$TYPE" "$FLAGS" "$ID"
|
||||||
|
|
||||||
# now emit the literal data of advertised length
|
|
||||||
if [ -n "$RAW" ]; then
|
|
||||||
printf "${RAW}"
|
|
||||||
fi
|
|
||||||
else
|
else
|
||||||
# read file $DATA in <LEN> chunks and send it in multiple frames
|
# read file $DATA in <LEN> chunks and send it in multiple frames
|
||||||
# advertising their respective lengths.
|
# advertising their respective lengths.
|
||||||
|
|||||||
@ -35,34 +35,12 @@
|
|||||||
|
|
||||||
#include <haproxy/api.h>
|
#include <haproxy/api.h>
|
||||||
#include <haproxy/buf.h>
|
#include <haproxy/buf.h>
|
||||||
#include <haproxy/ring-t.h>
|
#include <haproxy/ring.h>
|
||||||
#include <haproxy/thread.h>
|
|
||||||
|
|
||||||
int force = 0; // force access to a different layout
|
int force = 0; // force access to a different layout
|
||||||
int lfremap = 0; // remap LF in traces
|
int lfremap = 0; // remap LF in traces
|
||||||
int repair = 0; // repair file
|
int repair = 0; // repair file
|
||||||
|
|
||||||
struct ring_v1 {
|
|
||||||
struct buffer buf; // storage area
|
|
||||||
};
|
|
||||||
|
|
||||||
// ring v2 format (not aligned)
|
|
||||||
struct ring_v2 {
|
|
||||||
size_t size; // storage size
|
|
||||||
size_t rsvd; // header length (used for file-backed maps)
|
|
||||||
size_t tail; // storage tail
|
|
||||||
size_t head; // storage head
|
|
||||||
char area[0]; // storage area begins immediately here
|
|
||||||
};
|
|
||||||
|
|
||||||
// ring v2 format (thread aligned)
|
|
||||||
struct ring_v2a {
|
|
||||||
size_t size; // storage size
|
|
||||||
size_t rsvd; // header length (used for file-backed maps)
|
|
||||||
size_t tail ALIGNED(64); // storage tail
|
|
||||||
size_t head ALIGNED(64); // storage head
|
|
||||||
char area[0] ALIGNED(64); // storage area begins immediately here
|
|
||||||
};
|
|
||||||
|
|
||||||
/* display the message and exit with the code */
|
/* display the message and exit with the code */
|
||||||
__attribute__((noreturn)) void die(int code, const char *format, ...)
|
__attribute__((noreturn)) void die(int code, const char *format, ...)
|
||||||
@ -91,21 +69,75 @@ __attribute__((noreturn)) void usage(int code, const char *arg0)
|
|||||||
"", arg0);
|
"", arg0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* dump a ring represented in a pre-initialized buffer, starting from offset
|
/* This function dumps all events from the ring whose pointer is in <p0> into
|
||||||
* <ofs> and with flags <flags>
|
* the appctx's output buffer, and takes from <o0> the seek offset into the
|
||||||
|
* buffer's history (0 for oldest known event). It looks at <i0> for boolean
|
||||||
|
* options: bit0 means it must wait for new data or any key to be pressed. Bit1
|
||||||
|
* means it must seek directly to the end to wait for new contents. It returns
|
||||||
|
* 0 if the output buffer or events are missing is full and it needs to be
|
||||||
|
* called again, otherwise non-zero. It is meant to be used with
|
||||||
|
* cli_release_show_ring() to clean up.
|
||||||
*/
|
*/
|
||||||
int dump_ring_as_buf(struct buffer buf, size_t ofs, int flags)
|
int dump_ring(struct ring *ring, size_t ofs, int flags)
|
||||||
{
|
{
|
||||||
|
struct buffer buf;
|
||||||
uint64_t msg_len = 0;
|
uint64_t msg_len = 0;
|
||||||
size_t len, cnt;
|
size_t len, cnt;
|
||||||
const char *blk1 = NULL, *blk2 = NULL, *p;
|
const char *blk1 = NULL, *blk2 = NULL, *p;
|
||||||
size_t len1 = 0, len2 = 0, bl;
|
size_t len1 = 0, len2 = 0, bl;
|
||||||
|
|
||||||
|
/* Explanation: the storage area in the writing process starts after
|
||||||
|
* the end of the structure. Since the whole area is mmapped(), we know
|
||||||
|
* it starts at 0 mod 4096, hence the buf->area pointer's 12 LSB point
|
||||||
|
* to the relative offset of the storage area. As there will always be
|
||||||
|
* users using the wrong version of the tool with a dump, we need to
|
||||||
|
* run a few checks first. After that we'll create our own buffer
|
||||||
|
* descriptor matching that area.
|
||||||
|
*/
|
||||||
|
if ((((long)ring->buf.area) & 4095) != sizeof(*ring)) {
|
||||||
|
if (!force) {
|
||||||
|
fprintf(stderr, "FATAL: header in file is %ld bytes long vs %ld expected!\n",
|
||||||
|
(((long)ring->buf.area) & 4095),
|
||||||
|
(long)sizeof(*ring));
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
fprintf(stderr, "WARNING: header in file is %ld bytes long vs %ld expected!\n",
|
||||||
|
(((long)ring->buf.area) & 4095),
|
||||||
|
(long)sizeof(*ring));
|
||||||
|
}
|
||||||
|
/* maybe we could emit a warning at least ? */
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Now make our own buffer pointing to that area */
|
||||||
|
buf = b_make(((void *)ring + (((long)ring->buf.area) & 4095)),
|
||||||
|
ring->buf.size, ring->buf.head, ring->buf.data);
|
||||||
|
|
||||||
|
/* explanation for the initialization below: it would be better to do
|
||||||
|
* this in the parsing function but this would occasionally result in
|
||||||
|
* dropped events because we'd take a reference on the oldest message
|
||||||
|
* and keep it while being scheduled. Thus instead let's take it the
|
||||||
|
* first time we enter here so that we have a chance to pass many
|
||||||
|
* existing messages before grabbing a reference to a location. This
|
||||||
|
* value cannot be produced after initialization.
|
||||||
|
*/
|
||||||
|
if (unlikely(ofs == ~0)) {
|
||||||
|
ofs = 0;
|
||||||
|
|
||||||
|
/* going to the end means looking at tail-1 */
|
||||||
|
ofs = (flags & RING_WF_SEEK_NEW) ? buf.data - 1 : 0;
|
||||||
|
|
||||||
|
//HA_ATOMIC_INC(b_peek(&buf, ofs));
|
||||||
|
}
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
|
//HA_RWLOCK_RDLOCK(RING_LOCK, &ring->lock);
|
||||||
|
|
||||||
if (ofs >= buf.size) {
|
if (ofs >= buf.size) {
|
||||||
fprintf(stderr, "FATAL error at %d\n", __LINE__);
|
fprintf(stderr, "FATAL error at %d\n", __LINE__);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
//HA_ATOMIC_DEC(b_peek(&buf, ofs));
|
||||||
|
|
||||||
/* in this loop, ofs always points to the counter byte that precedes
|
/* in this loop, ofs always points to the counter byte that precedes
|
||||||
* the message so that we can take our reference there if we have to
|
* the message so that we can take our reference there if we have to
|
||||||
@ -166,6 +198,9 @@ int dump_ring_as_buf(struct buffer buf, size_t ofs, int flags)
|
|||||||
ofs += cnt + msg_len;
|
ofs += cnt + msg_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//HA_ATOMIC_INC(b_peek(&buf, ofs));
|
||||||
|
//HA_RWLOCK_RDUNLOCK(RING_LOCK, &ring->lock);
|
||||||
|
|
||||||
if (!(flags & RING_WF_WAIT_MODE))
|
if (!(flags & RING_WF_WAIT_MODE))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -175,84 +210,9 @@ int dump_ring_as_buf(struct buffer buf, size_t ofs, int flags)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This function dumps all events from the ring <ring> from offset <ofs> and
|
|
||||||
* with flags <flags>.
|
|
||||||
*/
|
|
||||||
int dump_ring_v1(struct ring_v1 *ring, size_t ofs, int flags)
|
|
||||||
{
|
|
||||||
struct buffer buf;
|
|
||||||
|
|
||||||
/* Explanation: the storage area in the writing process starts after
|
|
||||||
* the end of the structure. Since the whole area is mmapped(), we know
|
|
||||||
* it starts at 0 mod 4096, hence the buf->area pointer's 12 LSB point
|
|
||||||
* to the relative offset of the storage area. As there will always be
|
|
||||||
* users using the wrong version of the tool with a dump, we need to
|
|
||||||
* run a few checks first. After that we'll create our own buffer
|
|
||||||
* descriptor matching that area.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Now make our own buffer pointing to that area */
|
|
||||||
buf = b_make(((void *)ring + (((long)ring->buf.area) & 4095)),
|
|
||||||
ring->buf.size, ring->buf.head, ring->buf.data);
|
|
||||||
|
|
||||||
return dump_ring_as_buf(buf, ofs, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* This function dumps all events from the ring <ring> from offset <ofs> and
|
|
||||||
* with flags <flags>.
|
|
||||||
*/
|
|
||||||
int dump_ring_v2(struct ring_v2 *ring, size_t ofs, int flags)
|
|
||||||
{
|
|
||||||
size_t size, head, tail, data;
|
|
||||||
struct buffer buf;
|
|
||||||
|
|
||||||
/* In ring v2 format, we have in this order:
|
|
||||||
* - size
|
|
||||||
* - hdr len (reserved bytes)
|
|
||||||
* - tail
|
|
||||||
* - head
|
|
||||||
* We can rebuild an equivalent buffer from these info for the function
|
|
||||||
* to dump.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Now make our own buffer pointing to that area */
|
|
||||||
size = ring->size;
|
|
||||||
head = ring->head;
|
|
||||||
tail = ring->tail & ~RING_TAIL_LOCK;
|
|
||||||
data = (head <= tail ? 0 : size) + tail - head;
|
|
||||||
buf = b_make((void *)ring + ring->rsvd, size, head, data);
|
|
||||||
return dump_ring_as_buf(buf, ofs, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* This function dumps all events from the ring <ring> from offset <ofs> and
|
|
||||||
* with flags <flags>.
|
|
||||||
*/
|
|
||||||
int dump_ring_v2a(struct ring_v2a *ring, size_t ofs, int flags)
|
|
||||||
{
|
|
||||||
size_t size, head, tail, data;
|
|
||||||
struct buffer buf;
|
|
||||||
|
|
||||||
/* In ring v2 format, we have in this order:
|
|
||||||
* - size
|
|
||||||
* - hdr len (reserved bytes)
|
|
||||||
* - tail
|
|
||||||
* - head
|
|
||||||
* We can rebuild an equivalent buffer from these info for the function
|
|
||||||
* to dump.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Now make our own buffer pointing to that area */
|
|
||||||
size = ring->size;
|
|
||||||
head = ring->head;
|
|
||||||
tail = ring->tail & ~RING_TAIL_LOCK;
|
|
||||||
data = (head <= tail ? 0 : size) + tail - head;
|
|
||||||
buf = b_make((void *)ring + ring->rsvd, size, head, data);
|
|
||||||
return dump_ring_as_buf(buf, ofs, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
int main(int argc, char **argv)
|
int main(int argc, char **argv)
|
||||||
{
|
{
|
||||||
void *ring;
|
struct ring *ring;
|
||||||
struct stat statbuf;
|
struct stat statbuf;
|
||||||
const char *arg0;
|
const char *arg0;
|
||||||
int fd;
|
int fd;
|
||||||
@ -294,15 +254,7 @@ int main(int argc, char **argv)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (((struct ring_v2 *)ring)->rsvd < 4096 && // not a pointer (v1), must be ringv2's rsvd
|
return dump_ring(ring, ~0, 0);
|
||||||
((struct ring_v2 *)ring)->rsvd + ((struct ring_v2 *)ring)->size == statbuf.st_size) {
|
|
||||||
if (((struct ring_v2 *)ring)->rsvd < 192)
|
|
||||||
return dump_ring_v2(ring, 0, 0);
|
|
||||||
else
|
|
||||||
return dump_ring_v2a(ring, 0, 0); // thread-aligned version
|
|
||||||
}
|
|
||||||
else
|
|
||||||
return dump_ring_v1(ring, 0, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -1,31 +0,0 @@
|
|||||||
include ../../include/make/verbose.mk
|
|
||||||
|
|
||||||
CC = cc
|
|
||||||
OPTIMIZE = -O2 -g
|
|
||||||
DEFINE =
|
|
||||||
INCLUDE =
|
|
||||||
OBJS = ncpu.so ncpu
|
|
||||||
OBJDUMP = objdump
|
|
||||||
|
|
||||||
all: $(OBJS)
|
|
||||||
|
|
||||||
%.o: %.c
|
|
||||||
$(cmd_CC) $(OPTIMIZE) $(DEFINE) $(INCLUDE) -shared -fPIC -c -o $@ $^
|
|
||||||
|
|
||||||
%.so: %.o
|
|
||||||
$(cmd_CC) -pie -o $@ $^
|
|
||||||
$(Q)rm -f $^
|
|
||||||
|
|
||||||
%: %.so
|
|
||||||
$(call qinfo, PATCHING)set -- $$($(OBJDUMP) -j .dynamic -h $^ | fgrep .dynamic); \
|
|
||||||
ofs=$$6; size=$$3; \
|
|
||||||
dd status=none bs=1 count=$$((0x$$ofs)) if=$^ of=$^-p1; \
|
|
||||||
dd status=none bs=1 skip=$$((0x$$ofs)) count=$$((0x$$size)) if=$^ of=$^-p2; \
|
|
||||||
dd status=none bs=1 skip=$$((0x$$ofs+0x$$size)) if=$^ of=$^-p3; \
|
|
||||||
sed -e 's,\xfb\xff\xff\x6f\x00\x00\x00\x00\x00\x00\x00\x08,\xfb\xff\xff\x6f\x00\x00\x00\x00\x00\x00\x00\x00,g' < $^-p2 > $^-p2-patched; \
|
|
||||||
cat $^-p1 $^-p2-patched $^-p3 > "$@"
|
|
||||||
$(Q)rm -f $^-p*
|
|
||||||
$(Q)chmod 755 "$@"
|
|
||||||
|
|
||||||
clean:
|
|
||||||
rm -f $(OBJS) *.[oas] *.so-* *~
|
|
||||||
136
dev/ncpu/ncpu.c
136
dev/ncpu/ncpu.c
@ -1,136 +0,0 @@
|
|||||||
#define _GNU_SOURCE
|
|
||||||
#include <errno.h>
|
|
||||||
#include <limits.h>
|
|
||||||
#include <sched.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
|
|
||||||
// gcc -fPIC -shared -O2 -o ncpu{.so,.c}
|
|
||||||
// NCPU=16 LD_PRELOAD=$PWD/ncpu.so command args...
|
|
||||||
|
|
||||||
static char prog_full_path[PATH_MAX];
|
|
||||||
|
|
||||||
long sysconf(int name)
|
|
||||||
{
|
|
||||||
if (name == _SC_NPROCESSORS_ONLN ||
|
|
||||||
name == _SC_NPROCESSORS_CONF) {
|
|
||||||
const char *ncpu = getenv("NCPU");
|
|
||||||
int n;
|
|
||||||
|
|
||||||
n = ncpu ? atoi(ncpu) : CPU_SETSIZE;
|
|
||||||
if (n < 0 || n > CPU_SETSIZE)
|
|
||||||
n = CPU_SETSIZE;
|
|
||||||
return n;
|
|
||||||
}
|
|
||||||
errno = EINVAL;
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* return a cpu_set having the first $NCPU set */
|
|
||||||
int sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask)
|
|
||||||
{
|
|
||||||
const char *ncpu;
|
|
||||||
int i, n;
|
|
||||||
|
|
||||||
CPU_ZERO_S(cpusetsize, mask);
|
|
||||||
|
|
||||||
ncpu = getenv("NCPU");
|
|
||||||
n = ncpu ? atoi(ncpu) : CPU_SETSIZE;
|
|
||||||
if (n < 0 || n > CPU_SETSIZE)
|
|
||||||
n = CPU_SETSIZE;
|
|
||||||
|
|
||||||
for (i = 0; i < n; i++)
|
|
||||||
CPU_SET_S(i, cpusetsize, mask);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* silently ignore the operation */
|
|
||||||
int sched_setaffinity(pid_t pid, size_t cpusetsize, const cpu_set_t *mask)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void usage(const char *argv0)
|
|
||||||
{
|
|
||||||
fprintf(stderr,
|
|
||||||
"Usage: %s [-n ncpu] [cmd [args...]]\n"
|
|
||||||
" Will install itself in LD_PRELOAD before calling <cmd> with args.\n"
|
|
||||||
" The number of CPUs may also come from variable NCPU or default to %d.\n"
|
|
||||||
"\n"
|
|
||||||
"",
|
|
||||||
argv0, CPU_SETSIZE);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Called in wrapper mode, no longer supported on recent glibc */
|
|
||||||
int main(int argc, char **argv)
|
|
||||||
{
|
|
||||||
const char *argv0 = argv[0];
|
|
||||||
char *preload;
|
|
||||||
int plen;
|
|
||||||
|
|
||||||
prog_full_path[0] = 0;
|
|
||||||
plen = readlink("/proc/self/exe", prog_full_path, sizeof(prog_full_path) - 1);
|
|
||||||
if (plen != -1)
|
|
||||||
prog_full_path[plen] = 0;
|
|
||||||
else
|
|
||||||
plen = snprintf(prog_full_path, sizeof(prog_full_path), "%s", argv[0]);
|
|
||||||
|
|
||||||
while (1) {
|
|
||||||
argc--;
|
|
||||||
argv++;
|
|
||||||
|
|
||||||
if (argc < 1)
|
|
||||||
usage(argv0);
|
|
||||||
|
|
||||||
if (strcmp(argv[0], "--") == 0) {
|
|
||||||
argc--;
|
|
||||||
argv++;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
else if (strcmp(argv[0], "-n") == 0) {
|
|
||||||
if (argc < 2)
|
|
||||||
usage(argv0);
|
|
||||||
|
|
||||||
if (setenv("NCPU", argv[1], 1) != 0)
|
|
||||||
usage(argv0);
|
|
||||||
argc--;
|
|
||||||
argv++;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
/* unknown arg, that's the command */
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* here the only args left start with the cmd name */
|
|
||||||
|
|
||||||
/* now we'll concatenate ourselves at the end of the LD_PRELOAD variable */
|
|
||||||
preload = getenv("LD_PRELOAD");
|
|
||||||
if (preload) {
|
|
||||||
int olen = strlen(preload);
|
|
||||||
preload = realloc(preload, olen + 1 + plen + 1);
|
|
||||||
if (!preload) {
|
|
||||||
perror("realloc");
|
|
||||||
exit(2);
|
|
||||||
}
|
|
||||||
preload[olen] = ' ';
|
|
||||||
memcpy(preload + olen + 1, prog_full_path, plen);
|
|
||||||
preload[olen + 1 + plen] = 0;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
preload = prog_full_path;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (setenv("LD_PRELOAD", preload, 1) < 0) {
|
|
||||||
perror("setenv");
|
|
||||||
exit(2);
|
|
||||||
}
|
|
||||||
|
|
||||||
execvp(*argv, argv);
|
|
||||||
perror("execve");
|
|
||||||
exit(2);
|
|
||||||
}
|
|
||||||
@ -1,395 +0,0 @@
|
|||||||
Patchbot: AI bot making use of Natural Language Processing to suggest backports
|
|
||||||
=============================================================== 2023-12-18 ====
|
|
||||||
|
|
||||||
|
|
||||||
Background
|
|
||||||
----------
|
|
||||||
|
|
||||||
Selecting patches to backport from the development branch is a tedious task, in
|
|
||||||
part due to the abundance of patches and the fact that many bug fixes are for
|
|
||||||
that same version and not for backporting. The more it gets delayed, the harder
|
|
||||||
it becomes, and the harder it is to start, the less likely it gets started. The
|
|
||||||
urban legend along which one "just" has to do that periodically doesn't work
|
|
||||||
because certain patches need to be left hanging for a while under observation,
|
|
||||||
others need to be merged urgently, and for some, the person in charge of the
|
|
||||||
backport might simply need an opinion from the patch's author or the affected
|
|
||||||
subsystem maintainer, and this cannot make the whole backport process stall.
|
|
||||||
|
|
||||||
The information needed to figure if a patch needs to be backported is present
|
|
||||||
in the commit message, with varying nuances such as "may", "may not", "should",
|
|
||||||
"probably", "shouldn't unless", "keep under observation" etc. One particularly
|
|
||||||
that is specific to backports is that the opinion on a patch may change over
|
|
||||||
time, either because it was later found to be wrong or insufficient, or because
|
|
||||||
the former analysis mistakenly suggested to backport or not to.
|
|
||||||
|
|
||||||
This means that the person in charge of the backports has to read the whole
|
|
||||||
commit message for each patch, to figure the backporting instructions, and this
|
|
||||||
takes a while.
|
|
||||||
|
|
||||||
Several attempts were made over the years to try to partially automate this
|
|
||||||
task, including the cherry-pick mode of the "git-show-backports" utility that
|
|
||||||
eases navigation back-and-forth between commits.
|
|
||||||
|
|
||||||
Lately, a lot of progress was made in the domain of Natural Language
|
|
||||||
Understanding (NLU) and more generally Natural Language Processing (NLP). Since
|
|
||||||
the first attempts in early 2023 involving successive layers of the Roberta
|
|
||||||
model, called from totally unreliable Python code, and December 2023, the
|
|
||||||
situation evolved from promising but unusable to mostly autonomous.
|
|
||||||
|
|
||||||
For those interested in history, the first attempts in early 2023 involved
|
|
||||||
successive layers of the Roberta model, but these were relying on totally
|
|
||||||
unreliable Python code that broke all the time and could barely be transferred
|
|
||||||
to another machine without upgrading or downgrading the installed modules, and
|
|
||||||
it used to use huge amounts of resources for a somewhat disappointing result:
|
|
||||||
the verdicts were correct roughly 60-70% of the time, it was not possible to
|
|
||||||
get hints such as "wait" nor even "uncertain". It could just be qualified as
|
|
||||||
promising. Another big limitation was the limit to 256 tokens, forcing the
|
|
||||||
script to select only the last few lines of the commit message to take the
|
|
||||||
decision. Roughly at the same time, in March 2023 Meta issued their much larger
|
|
||||||
LLaMa model, and Georgi Gerganov released "llama.cpp", an open-source C++
|
|
||||||
engine that loads and runs such large models without all the usual problems
|
|
||||||
inherent to the Python ecosystem. New attempts were made with LLaMa and it was
|
|
||||||
already much better than Roberta, but the output was difficult to parse, and it
|
|
||||||
required to be combined with the final decision layer of Roberta. Then new
|
|
||||||
variants of LLaMa appeared such as Alpaca, which follows instructions, but
|
|
||||||
tends to forget them if given before the patch, then Vicuna which was pretty
|
|
||||||
reliable but very slow at 33B size and difficult to tune, then Airoboros,
|
|
||||||
which was the first one to give very satisfying results in a reasonable time,
|
|
||||||
following instructions reasonably closely with a stable output, but with
|
|
||||||
sometimes surprising analysis and contradictions. It was already about 90%
|
|
||||||
reliable and considered as a time saver in 13B size. Other models were later
|
|
||||||
tried as they appeared such as OpenChat-3.5, Juna, OpenInstruct, Orca-2,
|
|
||||||
Mistral-0.1 and it variants Neural and OpenHermes-2.5. Mistral showed an
|
|
||||||
unrivaled understanding despite being smaller and much faster than other ones,
|
|
||||||
but was a bit freewheeling regarding instructions. Dolphin-2.1 rebased on top
|
|
||||||
of it gave extremely satisfying results, with less variations in the output
|
|
||||||
format, but still the script had difficulties trying to catch its conclusion
|
|
||||||
from time to time, though it was pretty much readable for the human in charge
|
|
||||||
of the task. And finally just before releasing, Mistral-0.2 was released and
|
|
||||||
addressed all issues, with a human-like understanding and perfectly obeying
|
|
||||||
instructions, providing an extremely stable output format that is easy to parse
|
|
||||||
from simple scripts. The decisions now match the human's ones in close to 100%
|
|
||||||
of the patches, unless the human is aware of extra context, of course.
|
|
||||||
|
|
||||||
|
|
||||||
Architecture
|
|
||||||
------------
|
|
||||||
|
|
||||||
The current solution relies on the llama.cpp engine, which is a simple, fast,
|
|
||||||
reliable and portable engine to load models and run inference, and the
|
|
||||||
Mistral-0.2 LLM.
|
|
||||||
|
|
||||||
A collection of patches is built from the development branch since the -dev0
|
|
||||||
tag, and for each of them, the engine is called to evaluate the developer's
|
|
||||||
intent based on the commit message. A detailed context explaining the haproxy
|
|
||||||
maintenance model and what the user wants is passed, then the LLM is invited to
|
|
||||||
provide its opinion on the need for a backport and an explanation of the reason
|
|
||||||
for its choice. This often helps the user to find a quick summary about the
|
|
||||||
patch. All these outputs are then converted to a long HTML page with colors and
|
|
||||||
radio buttons, where patches are pre-selected based on this classification,
|
|
||||||
that the user can consult and adjust, read the commits if needed, and the
|
|
||||||
selected patches finally provide some copy-pastable commands in a text-area to
|
|
||||||
select commit IDs to work on, typically in a form that's suitable for a simple
|
|
||||||
"git cherry-pick -sx".
|
|
||||||
|
|
||||||
The scripts are designed to be able to run on a headless machine, called from a
|
|
||||||
crontab and with the output served from a static HTTP server.
|
|
||||||
|
|
||||||
The code is currently found from Georgi Gerganov's repository:
|
|
||||||
|
|
||||||
https://github.com/ggerganov/llama.cpp
|
|
||||||
|
|
||||||
Tag b1505 is known to work fine, and uses the GGUF file format.
|
|
||||||
|
|
||||||
The model(s) can be found on Hugging Face user "TheBloke"'s collection of
|
|
||||||
models:
|
|
||||||
|
|
||||||
https://huggingface.co/TheBloke
|
|
||||||
|
|
||||||
Model Mistral-7B-Instruct-v0.2-GGUF quantized at Q5K_M is known to work well
|
|
||||||
with the llama.cpp version above.
|
|
||||||
|
|
||||||
|
|
||||||
Deployment
|
|
||||||
----------
|
|
||||||
|
|
||||||
Note: it is a good idea to start to download the model(s) in the background as
|
|
||||||
such files are typically 5 GB or more and can take some time to download
|
|
||||||
depending on the internet bandwidth.
|
|
||||||
|
|
||||||
It seems reasonable to create a dedicated user to periodically run this task.
|
|
||||||
Let's call it "patchbot". Developers should be able to easily run a shell from
|
|
||||||
this user to perform some maintenance or testing (e.g. "sudo").
|
|
||||||
|
|
||||||
All paths are specified in the example "update-3.0.sh" script, and assume a
|
|
||||||
deployment in the user's home, so this is what is being described here. The
|
|
||||||
proposed deployment layout is the following:
|
|
||||||
|
|
||||||
$HOME (e.g. /home/patchbot)
|
|
||||||
|
|
|
||||||
+- data
|
|
||||||
| |
|
|
||||||
| +-- models # GGUF files from TheBloke's collection
|
|
||||||
| |
|
|
||||||
| +-- prompts # prompt*-pfx*, prompt*-sfx*, cache
|
|
||||||
| |
|
|
||||||
| +-- in
|
|
||||||
| | |
|
|
||||||
| | +-- haproxy # haproxy Git repo
|
|
||||||
| | |
|
|
||||||
| | +-- patches-3.0 # patches from development branch 3.0
|
|
||||||
| |
|
|
||||||
| +-- out # report directory (HTML)
|
|
||||||
|
|
|
||||||
+- prog
|
|
||||||
| |
|
|
||||||
| +-- bin # program(s)
|
|
||||||
| |
|
|
||||||
| +-- scripts # processing scripts
|
|
||||||
| |
|
|
||||||
| +-- llama.cpp # llama Git repository
|
|
||||||
|
|
||||||
|
|
||||||
- Let's first create the structure:
|
|
||||||
|
|
||||||
mkdir -p ~/data/{in,models,prompts} ~/prog/{bin,scripts}
|
|
||||||
|
|
||||||
- data/in/haproxy must contain a clone of the haproxy development tree that
|
|
||||||
will periodically be pulled from:
|
|
||||||
|
|
||||||
cd ~/data/in
|
|
||||||
git clone https://github.com/haproxy/haproxy
|
|
||||||
cd ~
|
|
||||||
|
|
||||||
- The prompt files are a copy of haproxy's "dev/patchbot/prompt/" subdirectory.
|
|
||||||
The prompt files are per-version because they contain references to the
|
|
||||||
haproxy development version number. For each prompt, there is a prefix
|
|
||||||
("-pfx"), that is loaded before the patch, and a suffix ("-sfx") that
|
|
||||||
precises the user's expectations after reading the patch. For best efficiency
|
|
||||||
it's useful to place most of the explanation in the prefix and the least
|
|
||||||
possible in the suffix, because the prefix is cacheable. Different models
|
|
||||||
will use different instructions formats and different explanations, so it's
|
|
||||||
fine to keep a collection of prompts and use only one. Different instruction
|
|
||||||
formats are commonly used, "llama-2", "alpaca", "vicuna", "chatml" being
|
|
||||||
common. When experimenting with a new model, just copy-paste the closest one
|
|
||||||
and tune it for best results. Since we already cloned haproxy above, we'll
|
|
||||||
take the files from there:
|
|
||||||
|
|
||||||
cp ~/data/in/haproxy/dev/patchbot/prompt/*txt ~/data/prompts/
|
|
||||||
|
|
||||||
Upon first run, a cache file will be produced in this directory by parsing
|
|
||||||
an empty file and saving the current model's context. The cache file will
|
|
||||||
automatically be deleted and rebuilt if it is absent or older than the prefix
|
|
||||||
or suffix file. The cache files are specific to a model so when experimenting
|
|
||||||
with other models, be sure not to reuse the same cache file, or in doubt,
|
|
||||||
just delete them. Rebuilding the cache file typically takes around 2 minutes
|
|
||||||
of processing on a 8-core machine.
|
|
||||||
|
|
||||||
- The model(s) from TheBloke's Hugging Face account have to be downloaded in
|
|
||||||
GGUF file format, quantized at Q5K_M, and stored as-is into data/models/.
|
|
||||||
|
|
||||||
- data/in/patches-3.0/ is where the "mk-patch-list.sh" script will emit the
|
|
||||||
patches corresponding to new commits in the development branch. Its suffix
|
|
||||||
must match the name of the current development branch for patches to be found
|
|
||||||
there. In addition, the classification of the patches will be emitted there
|
|
||||||
next to the input patches, with the same name as the original file with a
|
|
||||||
suffix indicating what model/prompt combination was used.
|
|
||||||
|
|
||||||
mkdir -p ~/data/in/patches-3.0
|
|
||||||
|
|
||||||
- data/out is where the final report will be emitted. If running on a headless
|
|
||||||
machine, it is worth making sure that this directory is accessible from a
|
|
||||||
static web server. Thus either create a directory and place a symlink or
|
|
||||||
configuration somewhere in the web server's settings to reference this
|
|
||||||
location, or make it a symlink to another place already exported by the web
|
|
||||||
server and make sure the user has the permissions to write there.
|
|
||||||
|
|
||||||
mkdir -p ~/data/out
|
|
||||||
|
|
||||||
On Ubuntu-20.04 it was found that the package "micro-httpd" works out of the
|
|
||||||
box serving /var/www/html and follows symlinks. As such this is sufficient to
|
|
||||||
expose the reports:
|
|
||||||
|
|
||||||
sudo ln -s ~patchbot/data/out /var/www/html/patchbot
|
|
||||||
|
|
||||||
- prog/bin will contain the executable(s) needed to operate, namely "main" from
|
|
||||||
llama.cpp:
|
|
||||||
|
|
||||||
mkdir -p ~/prog/bin
|
|
||||||
|
|
||||||
- prog/llama.cpp is a clone of the "llama.cpp" GitHub repository. As of
|
|
||||||
december 2023, the project has improved its forward compatibility and it's
|
|
||||||
generally both safe and recommended to stay on the last version, hence to
|
|
||||||
just clone the master branch. In case of difficulties, tag b1505 was proven
|
|
||||||
to work well with the aforementioned model. Building is done by default for
|
|
||||||
the local platform, optimised for speed with native CPU.
|
|
||||||
|
|
||||||
mkdir -p ~/prog
|
|
||||||
cd ~/prog
|
|
||||||
git clone https://github.com/ggerganov/llama.cpp
|
|
||||||
[ only in case of problems: cd llama.cpp && git checkout b1505 ]
|
|
||||||
|
|
||||||
make -j$(nproc) main LLAMA_FAST=1
|
|
||||||
cp main ~/prog/bin/
|
|
||||||
cd ~
|
|
||||||
|
|
||||||
- prog/scripts needs the following scripts:
|
|
||||||
- mk-patch-list.sh from haproxy's scripts/ subdirectory
|
|
||||||
- submit-ai.sh, process-*.sh, post-ai.sh, update-*.sh
|
|
||||||
|
|
||||||
cp ~/data/in/haproxy/scripts/mk-patch-list.sh ~/prog/scripts/
|
|
||||||
cp ~/data/in/haproxy/dev/patchbot/scripts/*.sh ~/prog/scripts/
|
|
||||||
|
|
||||||
- verify that the various paths in update-3.0.sh match your choices, or
|
|
||||||
adjust them:
|
|
||||||
|
|
||||||
vi ~/prog/scripts/update-3.0.sh
|
|
||||||
|
|
||||||
- the tool is memory-bound, so a machine with more memory channels and/or
|
|
||||||
very fast memory will usually be faster than a higher CPU count with a
|
|
||||||
lower memory bandwidth. In addition, the performance is not linear with
|
|
||||||
the number of cores and experimentation shows that efficiency drops above
|
|
||||||
8 threads. For this reason the script integrates a "PARALLEL_RUNS" variable
|
|
||||||
indicating how many instances to run in parallel, each on its own patch.
|
|
||||||
This allows to make better use of the CPUs and memory bandwidth. Setting
|
|
||||||
2 instances for 8 cores / 16 threads gives optimal results on dual memory
|
|
||||||
channel systems.
|
|
||||||
|
|
||||||
From this point, executing this update script manually should work and produce
|
|
||||||
the result. Count around 0.5-2 mn per patch on a 8-core machine, so it can be
|
|
||||||
reasonably fast during the early development stages (before -dev1) but
|
|
||||||
unbearably long later, where it can make more sense to run it at night. It
|
|
||||||
should not report any error and should only report the total execution time.
|
|
||||||
|
|
||||||
If interrupted (Ctrl-C, logout, out of memory etc), check for incomplete .txt
|
|
||||||
files in ~/data/in/patches*/ that can result from this interruption, and delete
|
|
||||||
them because they will not be reproduced:
|
|
||||||
|
|
||||||
ls -lart ~/data/in/patches-3.0/*.txt
|
|
||||||
ls -lS ~/data/in/patches-3.0/*.txt
|
|
||||||
|
|
||||||
Once the output is produced, visit ~/data/out/ using a web browser and check
|
|
||||||
that the table loads correctly. Note that after a new release or a series of
|
|
||||||
backports, the table may appear empty, it's just because all known patches are
|
|
||||||
already backported and collapsed by default. Clicking on "All" at the top left
|
|
||||||
will unhide them.
|
|
||||||
|
|
||||||
Finally when satisfied, place it in a crontab, for example, run every hour:
|
|
||||||
|
|
||||||
crontab -e
|
|
||||||
|
|
||||||
# m h dom mon dow command
|
|
||||||
# run every hour at minute 02
|
|
||||||
2 * * * * /home/patchbot/update-3.0.sh
|
|
||||||
|
|
||||||
|
|
||||||
Usage
|
|
||||||
-----
|
|
||||||
|
|
||||||
Using the HTML output is a bit rustic but efficient. The interface is split in
|
|
||||||
5 columns from left to right:
|
|
||||||
|
|
||||||
- first column: patch number from 1 to N, just to ease navigation. Below the
|
|
||||||
number appears a radio button which allows to mark this patch as the start
|
|
||||||
of the review. When clicked, all prior patches disappear and are not listed
|
|
||||||
anymore. This can be undone by clicking on the radio button under the "All"
|
|
||||||
word in this column's header.
|
|
||||||
|
|
||||||
|
|
||||||
- second column: commit ID (abbreviated "CID" in the header). It's a 8-digit
|
|
||||||
shortened representation of the commit ID. It's presented as a link, which,
|
|
||||||
if clicked, will directly show that commit from the haproxy public
|
|
||||||
repository. Below the commit ID is the patch's author date in condensed
|
|
||||||
format "DD-MmmYY", e.g. "18-Dec23" for "18th December 2023". It was found
|
|
||||||
that having a date indication sometimes helps differentiate certain related
|
|
||||||
patches.
|
|
||||||
|
|
||||||
- third column: "Subject", this is the subject of the patch, prefixed with
|
|
||||||
the 4-digit number matching the file name in the directory (e.g. helps to
|
|
||||||
remove or reprocess one if needed). This is also a link to the same commit
|
|
||||||
in the haproxy's public repository. At the lower right under the subject
|
|
||||||
is the shortened e-mail address (only user@domain keeping only the first
|
|
||||||
part of the domain, e.g. "foo@haproxy"). Just like with the date, it helps
|
|
||||||
figuring what to expect after a recent discussion with a developer.
|
|
||||||
|
|
||||||
- fourth column: "Verdict". This column contains 4 radio buttons prefiguring
|
|
||||||
the choice for this patch between "N" for "No", represented in gray (this
|
|
||||||
patch should not be backported, let's drop it), "U" for "Uncertain" in
|
|
||||||
green (still unsure about it, most likely the author should be contacted),
|
|
||||||
"W" for "Wait" in blue (this patch should be backported but not
|
|
||||||
immediately, only after it has spent some time in the development branch),
|
|
||||||
and "Y" for "Yes" in red (this patch must be backported, let's pick it).
|
|
||||||
The choice is preselected by the scripts above, and since these are radio
|
|
||||||
buttons, the user is free to change this selection. Reloading will lose the
|
|
||||||
user's choices. When changing a selection, the line's background changes to
|
|
||||||
match a similar color tone, allowing to visually spot preselected patches.
|
|
||||||
|
|
||||||
- fifth column: reason for the choice. The scripts try to provide an
|
|
||||||
explanation for the choice of the preselection, and try to always end with
|
|
||||||
a conclusion among "yes", "no", "wait", "uncertain". The explanation
|
|
||||||
usually fits in 2-4 lines and is faster to read than a whole commit message
|
|
||||||
and very often pretty accurate. It's also been noticed that Mistral-v0.2
|
|
||||||
shows much less hallucinations than others (it doesn't seem to invent
|
|
||||||
information that was not part of its input), so seeing certain topics being
|
|
||||||
discussed there generally indicate that they were in the original commit
|
|
||||||
message. The scripts try to emphasize the sensitive parts of the commit
|
|
||||||
message such as risks, dependencies, referenced issues, oldest version to
|
|
||||||
backport to, etc. Elements that look like issues numbers and commit IDs are
|
|
||||||
turned to links to ease navigation.
|
|
||||||
|
|
||||||
In addition, in order to improve readability, the top of the table shows 4
|
|
||||||
buttons allowing to show/hide each category. For example, when trying to focus
|
|
||||||
only on "uncertain" and "wait", it can make sense to hide "N" and "Y" and click
|
|
||||||
"Y" or "N" on the displayed ones until there is none anymore.
|
|
||||||
|
|
||||||
In order to reduce the risk of missing a misqualified patch, those marked "BUG"
|
|
||||||
or "DOC" are displayed in bold even if tagged "No". It has been shown to be
|
|
||||||
sufficient to catch the eye when scrolling and encouraging to re-visit them.
|
|
||||||
|
|
||||||
More importantly, the script will try to also check which patches were already
|
|
||||||
backported to the previous stable version. Those that were backported will have
|
|
||||||
the first two columns colored gray, and by default, the review will start from
|
|
||||||
the first patch after the last backported one. This explains why just after a
|
|
||||||
backport, the table may appear empty with only the footer "New" checked.
|
|
||||||
|
|
||||||
Finally, at the bottom of the table is an editable, copy-pastable text area
|
|
||||||
that is redrawn at each click. It contains a series of 4 shell commands that
|
|
||||||
can be copy-pasted at once and assign commit IDs to 4 variables, one per
|
|
||||||
category. Most often only "y" will be of interest, so for example if the
|
|
||||||
review process ends with:
|
|
||||||
|
|
||||||
cid_y=( 7dab3e82 456ba6e9 75f5977f 917f7c74 )
|
|
||||||
|
|
||||||
Then copy-pasting it in a terminal already in the haproxy-2.9 directory and
|
|
||||||
issuing:
|
|
||||||
|
|
||||||
git cherry-pick -sx ${cid_y[@]}
|
|
||||||
|
|
||||||
Will result in all these patches to be backported to that version.
|
|
||||||
|
|
||||||
|
|
||||||
Criticisms
|
|
||||||
----------
|
|
||||||
|
|
||||||
The interface is absolutely ugly but gets the job done. Proposals to revamp it
|
|
||||||
are welcome, provided that they do not alter usability and portability (e.g.
|
|
||||||
the ability to open the locally produced file without requiring access to an
|
|
||||||
external server).
|
|
||||||
|
|
||||||
|
|
||||||
Thanks
|
|
||||||
------
|
|
||||||
|
|
||||||
This utility is the proof that boringly repetitive tasks that can be offloaded
|
|
||||||
from humans can save their time to do more productive things. This work which
|
|
||||||
started with extremely limited tools was made possible thanks to Meta, for
|
|
||||||
opening their models after leaking it, Georgi Gerganov and the community that
|
|
||||||
developed around llama.cpp, for creating the first really open engine that
|
|
||||||
builds out of the box and just works, contrary to the previous crippled Python-
|
|
||||||
only ecosystem, Tom Jobbins (aka TheBloke) for making it so easy to discover
|
|
||||||
new models every day by simply quantizing all of them and making them available
|
|
||||||
from a single location, MistralAI for producing an exceptionally good model
|
|
||||||
that surpasses all others, is the first one to feel as smart and accurate as a
|
|
||||||
real human on such tasks, is fast, and totally free, and of course, HAProxy
|
|
||||||
Technologies for investing some time on this and for the available hardware
|
|
||||||
that permits a lot of experimentation.
|
|
||||||
@ -1,70 +0,0 @@
|
|||||||
BEGININPUT
|
|
||||||
BEGINCONTEXT
|
|
||||||
|
|
||||||
HAProxy's development cycle consists in one development branch, and multiple
|
|
||||||
maintenance branches.
|
|
||||||
|
|
||||||
All the development is made into the development branch exclusively. This
|
|
||||||
includes mostly new features, doc updates, cleanups and or course, fixes.
|
|
||||||
|
|
||||||
The maintenance branches, also called stable branches, never see any
|
|
||||||
development, and only receive ultra-safe fixes for bugs that affect them,
|
|
||||||
that are picked from the development branch.
|
|
||||||
|
|
||||||
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
|
|
||||||
release, the development branch enters maintenance and a new development branch
|
|
||||||
is created with a new, higher version. The current development branch is
|
|
||||||
2.9-dev, and maintenance branches are 2.8 and below.
|
|
||||||
|
|
||||||
Fixes created in the development branch for issues that were introduced in an
|
|
||||||
earlier branch are applied in descending order to each and every version till
|
|
||||||
that branch that introduced the issue: 2.8 first, then 2.7, then 2.6 and so
|
|
||||||
on. This operation is called "backporting". A fix for an issue is never
|
|
||||||
backported beyond the branch that introduced the issue. An important point is
|
|
||||||
that the project maintainers really aim at zero regression in maintenance
|
|
||||||
branches, so they're never willing to take any risk backporting patches that
|
|
||||||
are not deemed strictly necessary.
|
|
||||||
|
|
||||||
Fixes consist of patches managed using the Git version control tool and are
|
|
||||||
identified by a Git commit ID and a commit message. For this reason we
|
|
||||||
indistinctly talk about backporting fixes, commits, or patches; all mean the
|
|
||||||
same thing. When mentioning commit IDs, developers always use a short form
|
|
||||||
made of the first 8 characters only, and expect the AI assistant to do the
|
|
||||||
same.
|
|
||||||
|
|
||||||
It seldom happens that some fixes depend on changes that were brought by other
|
|
||||||
patches that were not in some branches and that will need to be backported as
|
|
||||||
well for the fix to work. In this case, such information is explicitly provided
|
|
||||||
in the commit message by the patch's author in natural language.
|
|
||||||
|
|
||||||
Developers are serious and always indicate if a patch needs to be backported.
|
|
||||||
Sometimes they omit the exact target branch, or they will say that the patch is
|
|
||||||
"needed" in some older branch, but it means the same. If a commit message
|
|
||||||
doesn't mention any backport instructions, it means that the commit does not
|
|
||||||
have to be backported. And patches that are not strictly bug fixes nor doc
|
|
||||||
improvements are normally not backported. For example, fixes for design
|
|
||||||
limitations, architectural improvements and performance optimizations are
|
|
||||||
considered too risky for a backport. Finally, all bug fixes are tagged as
|
|
||||||
"BUG" at the beginning of their subject line. Patches that are not tagged as
|
|
||||||
such are not bugs, and must never be backported unless their commit message
|
|
||||||
explicitly requests so.
|
|
||||||
|
|
||||||
ENDCONTEXT
|
|
||||||
|
|
||||||
A developer is reviewing the development branch, trying to spot which commits
|
|
||||||
need to be backported to maintenance branches. This person is already expert
|
|
||||||
on HAProxy and everything related to Git, patch management, and the risks
|
|
||||||
associated with backports, so he doesn't want to be told how to proceed nor to
|
|
||||||
review the contents of the patch.
|
|
||||||
|
|
||||||
The goal for this developer is to get some help from the AI assistant to save
|
|
||||||
some precious time on this tedious review work. In order to do a better job, he
|
|
||||||
needs an accurate summary of the information and instructions found in each
|
|
||||||
commit message. Specifically he needs to figure if the patch fixes a problem
|
|
||||||
affecting an older branch or not, if it needs to be backported, if so to which
|
|
||||||
branches, and if other patches need to be backported along with it.
|
|
||||||
|
|
||||||
The indented text block below after an "id" line and starting with a Subject line
|
|
||||||
is a commit message from the HAProxy development branch that describes a patch
|
|
||||||
applied to that branch, starting with its subject line, please read it carefully.
|
|
||||||
|
|
||||||
@ -1,68 +0,0 @@
|
|||||||
### Instruction:
|
|
||||||
|
|
||||||
HAProxy's development cycle consists in one development branch, and multiple
|
|
||||||
maintenance branches.
|
|
||||||
|
|
||||||
All the development is made into the development branch exclusively. This
|
|
||||||
includes mostly new features, doc updates, cleanups and or course, fixes.
|
|
||||||
|
|
||||||
The maintenance branches, also called stable branches, never see any
|
|
||||||
development, and only receive ultra-safe fixes for bugs that affect them,
|
|
||||||
that are picked from the development branch.
|
|
||||||
|
|
||||||
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
|
|
||||||
release, the development branch enters maintenance and a new development branch
|
|
||||||
is created with a new, higher version. The current development branch is
|
|
||||||
2.9-dev, and maintenance branches are 2.8 and below.
|
|
||||||
|
|
||||||
Fixes created in the development branch for issues that were introduced in an
|
|
||||||
earlier branch are applied in descending order to each and every version till
|
|
||||||
that branch that introduced the issue: 2.8 first, then 2.7, then 2.6 and so
|
|
||||||
on. This operation is called "backporting". A fix for an issue is never
|
|
||||||
backported beyond the branch that introduced the issue. An important point is
|
|
||||||
that the project maintainers really aim at zero regression in maintenance
|
|
||||||
branches, so they're never willing to take any risk backporting patches that
|
|
||||||
are not deemed strictly necessary.
|
|
||||||
|
|
||||||
Fixes consist of patches managed using the Git version control tool and are
|
|
||||||
identified by a Git commit ID and a commit message. For this reason we
|
|
||||||
indistinctly talk about backporting fixes, commits, or patches; all mean the
|
|
||||||
same thing. When mentioning commit IDs, developers always use a short form
|
|
||||||
made of the first 8 characters only, and expect the AI assistant to do the
|
|
||||||
same.
|
|
||||||
|
|
||||||
It seldom happens that some fixes depend on changes that were brought by other
|
|
||||||
patches that were not in some branches and that will need to be backported as
|
|
||||||
well for the fix to work. In this case, such information is explicitly provided
|
|
||||||
in the commit message by the patch's author in natural language.
|
|
||||||
|
|
||||||
Developers are serious and always indicate if a patch needs to be backported.
|
|
||||||
Sometimes they omit the exact target branch, or they will say that the patch is
|
|
||||||
"needed" in some older branch, but it means the same. If a commit message
|
|
||||||
doesn't mention any backport instructions, it means that the commit does not
|
|
||||||
have to be backported. And patches that are not strictly bug fixes nor doc
|
|
||||||
improvements are normally not backported. For example, fixes for design
|
|
||||||
limitations, architectural improvements and performance optimizations are
|
|
||||||
considered too risky for a backport. Finally, all bug fixes are tagged as
|
|
||||||
"BUG" at the beginning of their subject line. Patches that are not tagged as
|
|
||||||
such are not bugs, and must never be backported unless their commit message
|
|
||||||
explicitly requests so.
|
|
||||||
|
|
||||||
A developer is reviewing the development branch, trying to spot which commits
|
|
||||||
need to be backported to maintenance branches. This person is already expert
|
|
||||||
on HAProxy and everything related to Git, patch management, and the risks
|
|
||||||
associated with backports, so he doesn't want to be told how to proceed nor to
|
|
||||||
review the contents of the patch.
|
|
||||||
|
|
||||||
The goal for this developer is to get some help from the AI assistant to save
|
|
||||||
some precious time on this tedious review work. In order to do a better job, he
|
|
||||||
needs an accurate summary of the information and instructions found in each
|
|
||||||
commit message. Specifically he needs to figure if the patch fixes a problem
|
|
||||||
affecting an older branch or not, if it needs to be backported, if so to which
|
|
||||||
branches, and if other patches need to be backported along with it.
|
|
||||||
|
|
||||||
The indented text block below after an "id" line and starting with a Subject line
|
|
||||||
is a commit message from the HAProxy development branch that describes a patch
|
|
||||||
applied to that branch, starting with its subject line, please read it carefully.
|
|
||||||
|
|
||||||
### Input:
|
|
||||||
@ -1,28 +0,0 @@
|
|||||||
|
|
||||||
### Instruction:
|
|
||||||
|
|
||||||
You are an AI assistant that follows instruction extremely well. Help as much
|
|
||||||
as you can, responding to a single question using a single response.
|
|
||||||
|
|
||||||
The developer wants to know if he needs to backport the patch above to fix
|
|
||||||
maintenance branches, for which branches, and what possible dependencies might
|
|
||||||
be mentioned in the commit message. Carefully study the commit message and its
|
|
||||||
backporting instructions if any (otherwise it should probably not be backported),
|
|
||||||
then provide a very concise and short summary that will help the developer decide
|
|
||||||
to backport it, or simply to skip it.
|
|
||||||
|
|
||||||
Start by explaining in one or two sentences what you recommend for this one and why.
|
|
||||||
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
|
|
||||||
where X is a single word among:
|
|
||||||
- "yes", if you recommend to backport the patch right now either because
|
|
||||||
it explicitly states this or because it's a fix for a bug that affects
|
|
||||||
a maintenance branch (2.8 or lower);
|
|
||||||
- "wait", if this patch explicitly mentions that it must be backported, but
|
|
||||||
only after waiting some time.
|
|
||||||
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
|
|
||||||
lack of explicit backport instructions, or it's just an improvement);
|
|
||||||
- "uncertain" otherwise for cases not covered above
|
|
||||||
|
|
||||||
### Response:
|
|
||||||
|
|
||||||
Explanation:
|
|
||||||
@ -1,67 +0,0 @@
|
|||||||
<|im_start|>system
|
|
||||||
HAProxy's development cycle consists in one development branch, and multiple
|
|
||||||
maintenance branches.
|
|
||||||
|
|
||||||
All the development is made into the development branch exclusively. This
|
|
||||||
includes mostly new features, doc updates, cleanups and or course, fixes.
|
|
||||||
|
|
||||||
The maintenance branches, also called stable branches, never see any
|
|
||||||
development, and only receive ultra-safe fixes for bugs that affect them,
|
|
||||||
that are picked from the development branch.
|
|
||||||
|
|
||||||
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
|
|
||||||
release, the development branch enters maintenance and a new development branch
|
|
||||||
is created with a new, higher version. The current development branch is
|
|
||||||
2.9-dev, and maintenance branches are 2.8 and below.
|
|
||||||
|
|
||||||
Fixes created in the development branch for issues that were introduced in an
|
|
||||||
earlier branch are applied in descending order to each and every version till
|
|
||||||
that branch that introduced the issue: 2.8 first, then 2.7, then 2.6 and so
|
|
||||||
on. This operation is called "backporting". A fix for an issue is never
|
|
||||||
backported beyond the branch that introduced the issue. An important point is
|
|
||||||
that the project maintainers really aim at zero regression in maintenance
|
|
||||||
branches, so they're never willing to take any risk backporting patches that
|
|
||||||
are not deemed strictly necessary.
|
|
||||||
|
|
||||||
Fixes consist of patches managed using the Git version control tool and are
|
|
||||||
identified by a Git commit ID and a commit message. For this reason we
|
|
||||||
indistinctly talk about backporting fixes, commits, or patches; all mean the
|
|
||||||
same thing. When mentioning commit IDs, developers always use a short form
|
|
||||||
made of the first 8 characters only, and expect the AI assistant to do the
|
|
||||||
same.
|
|
||||||
|
|
||||||
It seldom happens that some fixes depend on changes that were brought by other
|
|
||||||
patches that were not in some branches and that will need to be backported as
|
|
||||||
well for the fix to work. In this case, such information is explicitly provided
|
|
||||||
in the commit message by the patch's author in natural language.
|
|
||||||
|
|
||||||
Developers are serious and always indicate if a patch needs to be backported.
|
|
||||||
Sometimes they omit the exact target branch, or they will say that the patch is
|
|
||||||
"needed" in some older branch, but it means the same. If a commit message
|
|
||||||
doesn't mention any backport instructions, it means that the commit does not
|
|
||||||
have to be backported. And patches that are not strictly bug fixes nor doc
|
|
||||||
improvements are normally not backported. For example, fixes for design
|
|
||||||
limitations, architectural improvements and performance optimizations are
|
|
||||||
considered too risky for a backport. Finally, all bug fixes are tagged as
|
|
||||||
"BUG" at the beginning of their subject line. Patches that are not tagged as
|
|
||||||
such are not bugs, and must never be backported unless their commit message
|
|
||||||
explicitly requests so.
|
|
||||||
|
|
||||||
A developer is reviewing the development branch, trying to spot which commits
|
|
||||||
need to be backported to maintenance branches. This person is already expert
|
|
||||||
on HAProxy and everything related to Git, patch management, and the risks
|
|
||||||
associated with backports, so he doesn't want to be told how to proceed nor to
|
|
||||||
review the contents of the patch.
|
|
||||||
|
|
||||||
The goal for this developer is to get some help from the AI assistant to save
|
|
||||||
some precious time on this tedious review work. In order to do a better job, he
|
|
||||||
needs an accurate summary of the information and instructions found in each
|
|
||||||
commit message. Specifically he needs to figure if the patch fixes a problem
|
|
||||||
affecting an older branch or not, if it needs to be backported, if so to which
|
|
||||||
branches, and if other patches need to be backported along with it.
|
|
||||||
|
|
||||||
The indented text block below after an "id" line and starting with a Subject line
|
|
||||||
is a commit message from the HAProxy development branch that describes a patch
|
|
||||||
applied to that branch, starting with its subject line, please read it carefully.
|
|
||||||
<|im_end|>
|
|
||||||
<|im_start|>user
|
|
||||||
@ -1,28 +0,0 @@
|
|||||||
<|im_end|>
|
|
||||||
<|im_start|>system
|
|
||||||
|
|
||||||
You are an AI assistant that follows instruction extremely well. Help as much
|
|
||||||
as you can, responding to a single question using a single response.
|
|
||||||
|
|
||||||
The developer wants to know if he needs to backport the patch above to fix
|
|
||||||
maintenance branches, for which branches, and what possible dependencies might
|
|
||||||
be mentioned in the commit message. Carefully study the commit message and its
|
|
||||||
backporting instructions if any (otherwise it should probably not be backported),
|
|
||||||
then provide a very concise and short summary that will help the developer decide
|
|
||||||
to backport it, or simply to skip it.
|
|
||||||
|
|
||||||
Start by explaining in one or two sentences what you recommend for this one and why.
|
|
||||||
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
|
|
||||||
where X is a single word among:
|
|
||||||
- "yes", if you recommend to backport the patch right now either because
|
|
||||||
it explicitly states this or because it's a fix for a bug that affects
|
|
||||||
a maintenance branch (2.8 or lower);
|
|
||||||
- "wait", if this patch explicitly mentions that it must be backported, but
|
|
||||||
only after waiting some time.
|
|
||||||
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
|
|
||||||
lack of explicit backport instructions, or it's just an improvement);
|
|
||||||
- "uncertain" otherwise for cases not covered above
|
|
||||||
<|im_end|>
|
|
||||||
<|im_start|>assistant
|
|
||||||
|
|
||||||
Explanation:
|
|
||||||
@ -1,29 +0,0 @@
|
|||||||
|
|
||||||
ENDINPUT
|
|
||||||
BEGININSTRUCTION
|
|
||||||
|
|
||||||
You are an AI assistant that follows instruction extremely well. Help as much
|
|
||||||
as you can, responding to a single question using a single response.
|
|
||||||
|
|
||||||
The developer wants to know if he needs to backport the patch above to fix
|
|
||||||
maintenance branches, for which branches, and what possible dependencies might
|
|
||||||
be mentioned in the commit message. Carefully study the commit message and its
|
|
||||||
backporting instructions if any (otherwise it should probably not be backported),
|
|
||||||
then provide a very concise and short summary that will help the developer decide
|
|
||||||
to backport it, or simply to skip it.
|
|
||||||
|
|
||||||
Start by explaining in one or two sentences what you recommend for this one and why.
|
|
||||||
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
|
|
||||||
where X is a single word among:
|
|
||||||
- "yes", if you recommend to backport the patch right now either because
|
|
||||||
it explicitly states this or because it's a fix for a bug that affects
|
|
||||||
a maintenance branch (2.8 or lower);
|
|
||||||
- "wait", if this patch explicitly mentions that it must be backported, but
|
|
||||||
only after waiting some time.
|
|
||||||
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
|
|
||||||
lack of explicit backport instructions, or it's just an improvement);
|
|
||||||
- "uncertain" otherwise for cases not covered above
|
|
||||||
|
|
||||||
ENDINSTRUCTION
|
|
||||||
|
|
||||||
Explanation:
|
|
||||||
@ -1,70 +0,0 @@
|
|||||||
BEGININPUT
|
|
||||||
BEGINCONTEXT
|
|
||||||
|
|
||||||
HAProxy's development cycle consists in one development branch, and multiple
|
|
||||||
maintenance branches.
|
|
||||||
|
|
||||||
All the development is made into the development branch exclusively. This
|
|
||||||
includes mostly new features, doc updates, cleanups and or course, fixes.
|
|
||||||
|
|
||||||
The maintenance branches, also called stable branches, never see any
|
|
||||||
development, and only receive ultra-safe fixes for bugs that affect them,
|
|
||||||
that are picked from the development branch.
|
|
||||||
|
|
||||||
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
|
|
||||||
release, the development branch enters maintenance and a new development branch
|
|
||||||
is created with a new, higher version. The current development branch is
|
|
||||||
3.1-dev, and maintenance branches are 3.0 and below.
|
|
||||||
|
|
||||||
Fixes created in the development branch for issues that were introduced in an
|
|
||||||
earlier branch are applied in descending order to each and every version till
|
|
||||||
that branch that introduced the issue: 3.0 first, then 2.9, then 2.8 and so
|
|
||||||
on. This operation is called "backporting". A fix for an issue is never
|
|
||||||
backported beyond the branch that introduced the issue. An important point is
|
|
||||||
that the project maintainers really aim at zero regression in maintenance
|
|
||||||
branches, so they're never willing to take any risk backporting patches that
|
|
||||||
are not deemed strictly necessary.
|
|
||||||
|
|
||||||
Fixes consist of patches managed using the Git version control tool and are
|
|
||||||
identified by a Git commit ID and a commit message. For this reason we
|
|
||||||
indistinctly talk about backporting fixes, commits, or patches; all mean the
|
|
||||||
same thing. When mentioning commit IDs, developers always use a short form
|
|
||||||
made of the first 8 characters only, and expect the AI assistant to do the
|
|
||||||
same.
|
|
||||||
|
|
||||||
It seldom happens that some fixes depend on changes that were brought by other
|
|
||||||
patches that were not in some branches and that will need to be backported as
|
|
||||||
well for the fix to work. In this case, such information is explicitly provided
|
|
||||||
in the commit message by the patch's author in natural language.
|
|
||||||
|
|
||||||
Developers are serious and always indicate if a patch needs to be backported.
|
|
||||||
Sometimes they omit the exact target branch, or they will say that the patch is
|
|
||||||
"needed" in some older branch, but it means the same. If a commit message
|
|
||||||
doesn't mention any backport instructions, it means that the commit does not
|
|
||||||
have to be backported. And patches that are not strictly bug fixes nor doc
|
|
||||||
improvements are normally not backported. For example, fixes for design
|
|
||||||
limitations, architectural improvements and performance optimizations are
|
|
||||||
considered too risky for a backport. Finally, all bug fixes are tagged as
|
|
||||||
"BUG" at the beginning of their subject line. Patches that are not tagged as
|
|
||||||
such are not bugs, and must never be backported unless their commit message
|
|
||||||
explicitly requests so.
|
|
||||||
|
|
||||||
ENDCONTEXT
|
|
||||||
|
|
||||||
A developer is reviewing the development branch, trying to spot which commits
|
|
||||||
need to be backported to maintenance branches. This person is already expert
|
|
||||||
on HAProxy and everything related to Git, patch management, and the risks
|
|
||||||
associated with backports, so he doesn't want to be told how to proceed nor to
|
|
||||||
review the contents of the patch.
|
|
||||||
|
|
||||||
The goal for this developer is to get some help from the AI assistant to save
|
|
||||||
some precious time on this tedious review work. In order to do a better job, he
|
|
||||||
needs an accurate summary of the information and instructions found in each
|
|
||||||
commit message. Specifically he needs to figure if the patch fixes a problem
|
|
||||||
affecting an older branch or not, if it needs to be backported, if so to which
|
|
||||||
branches, and if other patches need to be backported along with it.
|
|
||||||
|
|
||||||
The indented text block below after an "id" line and starting with a Subject line
|
|
||||||
is a commit message from the HAProxy development branch that describes a patch
|
|
||||||
applied to that branch, starting with its subject line, please read it carefully.
|
|
||||||
|
|
||||||
@ -1,29 +0,0 @@
|
|||||||
|
|
||||||
ENDINPUT
|
|
||||||
BEGININSTRUCTION
|
|
||||||
|
|
||||||
You are an AI assistant that follows instruction extremely well. Help as much
|
|
||||||
as you can, responding to a single question using a single response.
|
|
||||||
|
|
||||||
The developer wants to know if he needs to backport the patch above to fix
|
|
||||||
maintenance branches, for which branches, and what possible dependencies might
|
|
||||||
be mentioned in the commit message. Carefully study the commit message and its
|
|
||||||
backporting instructions if any (otherwise it should probably not be backported),
|
|
||||||
then provide a very concise and short summary that will help the developer decide
|
|
||||||
to backport it, or simply to skip it.
|
|
||||||
|
|
||||||
Start by explaining in one or two sentences what you recommend for this one and why.
|
|
||||||
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
|
|
||||||
where X is a single word among:
|
|
||||||
- "yes", if you recommend to backport the patch right now either because
|
|
||||||
it explicitly states this or because it's a fix for a bug that affects
|
|
||||||
a maintenance branch (3.0 or lower);
|
|
||||||
- "wait", if this patch explicitly mentions that it must be backported, but
|
|
||||||
only after waiting some time.
|
|
||||||
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
|
|
||||||
lack of explicit backport instructions, or it's just an improvement);
|
|
||||||
- "uncertain" otherwise for cases not covered above
|
|
||||||
|
|
||||||
ENDINSTRUCTION
|
|
||||||
|
|
||||||
Explanation:
|
|
||||||
@ -1,70 +0,0 @@
|
|||||||
BEGININPUT
|
|
||||||
BEGINCONTEXT
|
|
||||||
|
|
||||||
HAProxy's development cycle consists in one development branch, and multiple
|
|
||||||
maintenance branches.
|
|
||||||
|
|
||||||
All the development is made into the development branch exclusively. This
|
|
||||||
includes mostly new features, doc updates, cleanups and or course, fixes.
|
|
||||||
|
|
||||||
The maintenance branches, also called stable branches, never see any
|
|
||||||
development, and only receive ultra-safe fixes for bugs that affect them,
|
|
||||||
that are picked from the development branch.
|
|
||||||
|
|
||||||
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
|
|
||||||
release, the development branch enters maintenance and a new development branch
|
|
||||||
is created with a new, higher version. The current development branch is
|
|
||||||
3.2-dev, and maintenance branches are 3.1 and below.
|
|
||||||
|
|
||||||
Fixes created in the development branch for issues that were introduced in an
|
|
||||||
earlier branch are applied in descending order to each and every version till
|
|
||||||
that branch that introduced the issue: 3.1 first, then 3.0, then 2.9, then 2.8
|
|
||||||
and so on. This operation is called "backporting". A fix for an issue is never
|
|
||||||
backported beyond the branch that introduced the issue. An important point is
|
|
||||||
that the project maintainers really aim at zero regression in maintenance
|
|
||||||
branches, so they're never willing to take any risk backporting patches that
|
|
||||||
are not deemed strictly necessary.
|
|
||||||
|
|
||||||
Fixes consist of patches managed using the Git version control tool and are
|
|
||||||
identified by a Git commit ID and a commit message. For this reason we
|
|
||||||
indistinctly talk about backporting fixes, commits, or patches; all mean the
|
|
||||||
same thing. When mentioning commit IDs, developers always use a short form
|
|
||||||
made of the first 8 characters only, and expect the AI assistant to do the
|
|
||||||
same.
|
|
||||||
|
|
||||||
It seldom happens that some fixes depend on changes that were brought by other
|
|
||||||
patches that were not in some branches and that will need to be backported as
|
|
||||||
well for the fix to work. In this case, such information is explicitly provided
|
|
||||||
in the commit message by the patch's author in natural language.
|
|
||||||
|
|
||||||
Developers are serious and always indicate if a patch needs to be backported.
|
|
||||||
Sometimes they omit the exact target branch, or they will say that the patch is
|
|
||||||
"needed" in some older branch, but it means the same. If a commit message
|
|
||||||
doesn't mention any backport instructions, it means that the commit does not
|
|
||||||
have to be backported. And patches that are not strictly bug fixes nor doc
|
|
||||||
improvements are normally not backported. For example, fixes for design
|
|
||||||
limitations, architectural improvements and performance optimizations are
|
|
||||||
considered too risky for a backport. Finally, all bug fixes are tagged as
|
|
||||||
"BUG" at the beginning of their subject line. Patches that are not tagged as
|
|
||||||
such are not bugs, and must never be backported unless their commit message
|
|
||||||
explicitly requests so.
|
|
||||||
|
|
||||||
ENDCONTEXT
|
|
||||||
|
|
||||||
A developer is reviewing the development branch, trying to spot which commits
|
|
||||||
need to be backported to maintenance branches. This person is already expert
|
|
||||||
on HAProxy and everything related to Git, patch management, and the risks
|
|
||||||
associated with backports, so he doesn't want to be told how to proceed nor to
|
|
||||||
review the contents of the patch.
|
|
||||||
|
|
||||||
The goal for this developer is to get some help from the AI assistant to save
|
|
||||||
some precious time on this tedious review work. In order to do a better job, he
|
|
||||||
needs an accurate summary of the information and instructions found in each
|
|
||||||
commit message. Specifically he needs to figure if the patch fixes a problem
|
|
||||||
affecting an older branch or not, if it needs to be backported, if so to which
|
|
||||||
branches, and if other patches need to be backported along with it.
|
|
||||||
|
|
||||||
The indented text block below after an "id" line and starting with a Subject line
|
|
||||||
is a commit message from the HAProxy development branch that describes a patch
|
|
||||||
applied to that branch, starting with its subject line, please read it carefully.
|
|
||||||
|
|
||||||
@ -1,29 +0,0 @@
|
|||||||
|
|
||||||
ENDINPUT
|
|
||||||
BEGININSTRUCTION
|
|
||||||
|
|
||||||
You are an AI assistant that follows instruction extremely well. Help as much
|
|
||||||
as you can, responding to a single question using a single response.
|
|
||||||
|
|
||||||
The developer wants to know if he needs to backport the patch above to fix
|
|
||||||
maintenance branches, for which branches, and what possible dependencies might
|
|
||||||
be mentioned in the commit message. Carefully study the commit message and its
|
|
||||||
backporting instructions if any (otherwise it should probably not be backported),
|
|
||||||
then provide a very concise and short summary that will help the developer decide
|
|
||||||
to backport it, or simply to skip it.
|
|
||||||
|
|
||||||
Start by explaining in one or two sentences what you recommend for this one and why.
|
|
||||||
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
|
|
||||||
where X is a single word among:
|
|
||||||
- "yes", if you recommend to backport the patch right now either because
|
|
||||||
it explicitly states this or because it's a fix for a bug that affects
|
|
||||||
a maintenance branch (3.1 or lower);
|
|
||||||
- "wait", if this patch explicitly mentions that it must be backported, but
|
|
||||||
only after waiting some time.
|
|
||||||
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
|
|
||||||
lack of explicit backport instructions, or it's just an improvement);
|
|
||||||
- "uncertain" otherwise for cases not covered above
|
|
||||||
|
|
||||||
ENDINSTRUCTION
|
|
||||||
|
|
||||||
Explanation:
|
|
||||||
@ -1,70 +0,0 @@
|
|||||||
BEGININPUT
|
|
||||||
BEGINCONTEXT
|
|
||||||
|
|
||||||
HAProxy's development cycle consists in one development branch, and multiple
|
|
||||||
maintenance branches.
|
|
||||||
|
|
||||||
All the development is made into the development branch exclusively. This
|
|
||||||
includes mostly new features, doc updates, cleanups and or course, fixes.
|
|
||||||
|
|
||||||
The maintenance branches, also called stable branches, never see any
|
|
||||||
development, and only receive ultra-safe fixes for bugs that affect them,
|
|
||||||
that are picked from the development branch.
|
|
||||||
|
|
||||||
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
|
|
||||||
release, the development branch enters maintenance and a new development branch
|
|
||||||
is created with a new, higher version. The current development branch is
|
|
||||||
3.3-dev, and maintenance branches are 3.2 and below.
|
|
||||||
|
|
||||||
Fixes created in the development branch for issues that were introduced in an
|
|
||||||
earlier branch are applied in descending order to each and every version till
|
|
||||||
that branch that introduced the issue: 3.2 first, then 3.1, then 3.0, then 2.9
|
|
||||||
and so on. This operation is called "backporting". A fix for an issue is never
|
|
||||||
backported beyond the branch that introduced the issue. An important point is
|
|
||||||
that the project maintainers really aim at zero regression in maintenance
|
|
||||||
branches, so they're never willing to take any risk backporting patches that
|
|
||||||
are not deemed strictly necessary.
|
|
||||||
|
|
||||||
Fixes consist of patches managed using the Git version control tool and are
|
|
||||||
identified by a Git commit ID and a commit message. For this reason we
|
|
||||||
indistinctly talk about backporting fixes, commits, or patches; all mean the
|
|
||||||
same thing. When mentioning commit IDs, developers always use a short form
|
|
||||||
made of the first 8 characters only, and expect the AI assistant to do the
|
|
||||||
same.
|
|
||||||
|
|
||||||
It seldom happens that some fixes depend on changes that were brought by other
|
|
||||||
patches that were not in some branches and that will need to be backported as
|
|
||||||
well for the fix to work. In this case, such information is explicitly provided
|
|
||||||
in the commit message by the patch's author in natural language.
|
|
||||||
|
|
||||||
Developers are serious and always indicate if a patch needs to be backported.
|
|
||||||
Sometimes they omit the exact target branch, or they will say that the patch is
|
|
||||||
"needed" in some older branch, but it means the same. If a commit message
|
|
||||||
doesn't mention any backport instructions, it means that the commit does not
|
|
||||||
have to be backported. And patches that are not strictly bug fixes nor doc
|
|
||||||
improvements are normally not backported. For example, fixes for design
|
|
||||||
limitations, architectural improvements and performance optimizations are
|
|
||||||
considered too risky for a backport. Finally, all bug fixes are tagged as
|
|
||||||
"BUG" at the beginning of their subject line. Patches that are not tagged as
|
|
||||||
such are not bugs, and must never be backported unless their commit message
|
|
||||||
explicitly requests so.
|
|
||||||
|
|
||||||
ENDCONTEXT
|
|
||||||
|
|
||||||
A developer is reviewing the development branch, trying to spot which commits
|
|
||||||
need to be backported to maintenance branches. This person is already expert
|
|
||||||
on HAProxy and everything related to Git, patch management, and the risks
|
|
||||||
associated with backports, so he doesn't want to be told how to proceed nor to
|
|
||||||
review the contents of the patch.
|
|
||||||
|
|
||||||
The goal for this developer is to get some help from the AI assistant to save
|
|
||||||
some precious time on this tedious review work. In order to do a better job, he
|
|
||||||
needs an accurate summary of the information and instructions found in each
|
|
||||||
commit message. Specifically he needs to figure if the patch fixes a problem
|
|
||||||
affecting an older branch or not, if it needs to be backported, if so to which
|
|
||||||
branches, and if other patches need to be backported along with it.
|
|
||||||
|
|
||||||
The indented text block below after an "id" line and starting with a Subject line
|
|
||||||
is a commit message from the HAProxy development branch that describes a patch
|
|
||||||
applied to that branch, starting with its subject line, please read it carefully.
|
|
||||||
|
|
||||||
@ -1,29 +0,0 @@
|
|||||||
|
|
||||||
ENDINPUT
|
|
||||||
BEGININSTRUCTION
|
|
||||||
|
|
||||||
You are an AI assistant that follows instruction extremely well. Help as much
|
|
||||||
as you can, responding to a single question using a single response.
|
|
||||||
|
|
||||||
The developer wants to know if he needs to backport the patch above to fix
|
|
||||||
maintenance branches, for which branches, and what possible dependencies might
|
|
||||||
be mentioned in the commit message. Carefully study the commit message and its
|
|
||||||
backporting instructions if any (otherwise it should probably not be backported),
|
|
||||||
then provide a very concise and short summary that will help the developer decide
|
|
||||||
to backport it, or simply to skip it.
|
|
||||||
|
|
||||||
Start by explaining in one or two sentences what you recommend for this one and why.
|
|
||||||
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
|
|
||||||
where X is a single word among:
|
|
||||||
- "yes", if you recommend to backport the patch right now either because
|
|
||||||
it explicitly states this or because it's a fix for a bug that affects
|
|
||||||
a maintenance branch (3.2 or lower);
|
|
||||||
- "wait", if this patch explicitly mentions that it must be backported, but
|
|
||||||
only after waiting some time.
|
|
||||||
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
|
|
||||||
lack of explicit backport instructions, or it's just an improvement);
|
|
||||||
- "uncertain" otherwise for cases not covered above
|
|
||||||
|
|
||||||
ENDINSTRUCTION
|
|
||||||
|
|
||||||
Explanation:
|
|
||||||
@ -1,70 +0,0 @@
|
|||||||
BEGININPUT
|
|
||||||
BEGINCONTEXT
|
|
||||||
|
|
||||||
HAProxy's development cycle consists in one development branch, and multiple
|
|
||||||
maintenance branches.
|
|
||||||
|
|
||||||
All the development is made into the development branch exclusively. This
|
|
||||||
includes mostly new features, doc updates, cleanups and or course, fixes.
|
|
||||||
|
|
||||||
The maintenance branches, also called stable branches, never see any
|
|
||||||
development, and only receive ultra-safe fixes for bugs that affect them,
|
|
||||||
that are picked from the development branch.
|
|
||||||
|
|
||||||
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
|
|
||||||
release, the development branch enters maintenance and a new development branch
|
|
||||||
is created with a new, higher version. The current development branch is
|
|
||||||
3.4-dev, and maintenance branches are 3.3 and below.
|
|
||||||
|
|
||||||
Fixes created in the development branch for issues that were introduced in an
|
|
||||||
earlier branch are applied in descending order to each and every version till
|
|
||||||
that branch that introduced the issue: 3.3 first, then 3.2, then 3.1, then 3.0
|
|
||||||
and so on. This operation is called "backporting". A fix for an issue is never
|
|
||||||
backported beyond the branch that introduced the issue. An important point is
|
|
||||||
that the project maintainers really aim at zero regression in maintenance
|
|
||||||
branches, so they're never willing to take any risk backporting patches that
|
|
||||||
are not deemed strictly necessary.
|
|
||||||
|
|
||||||
Fixes consist of patches managed using the Git version control tool and are
|
|
||||||
identified by a Git commit ID and a commit message. For this reason we
|
|
||||||
indistinctly talk about backporting fixes, commits, or patches; all mean the
|
|
||||||
same thing. When mentioning commit IDs, developers always use a short form
|
|
||||||
made of the first 8 characters only, and expect the AI assistant to do the
|
|
||||||
same.
|
|
||||||
|
|
||||||
It seldom happens that some fixes depend on changes that were brought by other
|
|
||||||
patches that were not in some branches and that will need to be backported as
|
|
||||||
well for the fix to work. In this case, such information is explicitly provided
|
|
||||||
in the commit message by the patch's author in natural language.
|
|
||||||
|
|
||||||
Developers are serious and always indicate if a patch needs to be backported.
|
|
||||||
Sometimes they omit the exact target branch, or they will say that the patch is
|
|
||||||
"needed" in some older branch, but it means the same. If a commit message
|
|
||||||
doesn't mention any backport instructions, it means that the commit does not
|
|
||||||
have to be backported. And patches that are not strictly bug fixes nor doc
|
|
||||||
improvements are normally not backported. For example, fixes for design
|
|
||||||
limitations, architectural improvements and performance optimizations are
|
|
||||||
considered too risky for a backport. Finally, all bug fixes are tagged as
|
|
||||||
"BUG" at the beginning of their subject line. Patches that are not tagged as
|
|
||||||
such are not bugs, and must never be backported unless their commit message
|
|
||||||
explicitly requests so.
|
|
||||||
|
|
||||||
ENDCONTEXT
|
|
||||||
|
|
||||||
A developer is reviewing the development branch, trying to spot which commits
|
|
||||||
need to be backported to maintenance branches. This person is already expert
|
|
||||||
on HAProxy and everything related to Git, patch management, and the risks
|
|
||||||
associated with backports, so he doesn't want to be told how to proceed nor to
|
|
||||||
review the contents of the patch.
|
|
||||||
|
|
||||||
The goal for this developer is to get some help from the AI assistant to save
|
|
||||||
some precious time on this tedious review work. In order to do a better job, he
|
|
||||||
needs an accurate summary of the information and instructions found in each
|
|
||||||
commit message. Specifically he needs to figure if the patch fixes a problem
|
|
||||||
affecting an older branch or not, if it needs to be backported, if so to which
|
|
||||||
branches, and if other patches need to be backported along with it.
|
|
||||||
|
|
||||||
The indented text block below after an "id" line and starting with a Subject line
|
|
||||||
is a commit message from the HAProxy development branch that describes a patch
|
|
||||||
applied to that branch, starting with its subject line, please read it carefully.
|
|
||||||
|
|
||||||
@ -1,29 +0,0 @@
|
|||||||
|
|
||||||
ENDINPUT
|
|
||||||
BEGININSTRUCTION
|
|
||||||
|
|
||||||
You are an AI assistant that follows instruction extremely well. Help as much
|
|
||||||
as you can, responding to a single question using a single response.
|
|
||||||
|
|
||||||
The developer wants to know if he needs to backport the patch above to fix
|
|
||||||
maintenance branches, for which branches, and what possible dependencies might
|
|
||||||
be mentioned in the commit message. Carefully study the commit message and its
|
|
||||||
backporting instructions if any (otherwise it should probably not be backported),
|
|
||||||
then provide a very concise and short summary that will help the developer decide
|
|
||||||
to backport it, or simply to skip it.
|
|
||||||
|
|
||||||
Start by explaining in one or two sentences what you recommend for this one and why.
|
|
||||||
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
|
|
||||||
where X is a single word among:
|
|
||||||
- "yes", if you recommend to backport the patch right now either because
|
|
||||||
it explicitly states this or because it's a fix for a bug that affects
|
|
||||||
a maintenance branch (3.3 or lower);
|
|
||||||
- "wait", if this patch explicitly mentions that it must be backported, but
|
|
||||||
only after waiting some time.
|
|
||||||
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
|
|
||||||
lack of explicit backport instructions, or it's just an improvement);
|
|
||||||
- "uncertain" otherwise for cases not covered above
|
|
||||||
|
|
||||||
ENDINSTRUCTION
|
|
||||||
|
|
||||||
Explanation:
|
|
||||||
@ -1,417 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
####
|
|
||||||
#### Todo:
|
|
||||||
#### - change line color based on the selected radio button
|
|
||||||
#### - support collapsing lines per color/category (show/hide for each)
|
|
||||||
#### - add category "next" and see if the prompt can handle that (eg: d3e379b3)
|
|
||||||
#### - produce multiple lists on output (per category) allowing to save batches
|
|
||||||
####
|
|
||||||
|
|
||||||
die() {
|
|
||||||
[ "$#" -eq 0 ] || echo "$*" >&2
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
err() {
|
|
||||||
echo "$*" >&2
|
|
||||||
}
|
|
||||||
|
|
||||||
quit() {
|
|
||||||
[ "$#" -eq 0 ] || echo "$*"
|
|
||||||
exit 0
|
|
||||||
}
|
|
||||||
|
|
||||||
#### Main
|
|
||||||
|
|
||||||
USAGE="Usage: ${0##*/} [ -h ] [ -b 'bkp_list' ] patch..."
|
|
||||||
MYSELF="$0"
|
|
||||||
GITURL="http://git.haproxy.org/?p=haproxy.git;a=commitdiff;h="
|
|
||||||
ISSUES="https://github.com/haproxy/haproxy/issues/"
|
|
||||||
BKP=""
|
|
||||||
|
|
||||||
while [ -n "$1" -a -z "${1##-*}" ]; do
|
|
||||||
case "$1" in
|
|
||||||
-h|--help) quit "$USAGE" ;;
|
|
||||||
-b) BKP="$2"; shift 2 ;;
|
|
||||||
*) die "$USAGE" ;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
PATCHES=( "$@" )
|
|
||||||
|
|
||||||
if [ ${#PATCHES[@]} = 0 ]; then
|
|
||||||
die "$USAGE"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# BKP is a space-delimited list of 8-char commit IDs, we'll
|
|
||||||
# assign them to the local bkp[] associative array.
|
|
||||||
|
|
||||||
declare -A bkp
|
|
||||||
|
|
||||||
for cid in $BKP; do
|
|
||||||
bkp[$cid]=1
|
|
||||||
done
|
|
||||||
|
|
||||||
# some colors
|
|
||||||
BG_B="#e0e0e0"
|
|
||||||
BT_N="gray"; BG_N="white"
|
|
||||||
BT_U="#00e000"; BG_U="#e0ffe0"
|
|
||||||
BT_W="#0060ff"; BG_W="#e0e0ff"
|
|
||||||
BT_Y="red"; BG_Y="#ffe0e0"
|
|
||||||
|
|
||||||
echo "<HTML>"
|
|
||||||
|
|
||||||
cat <<- EOF
|
|
||||||
<HEAD><style>
|
|
||||||
input.n[type="radio"] {
|
|
||||||
appearance: none;
|
|
||||||
width: 1.25em;
|
|
||||||
height: 1.25em;
|
|
||||||
border-radius: 50%;
|
|
||||||
border: 3px solid $BT_N;
|
|
||||||
background-color: transparent;
|
|
||||||
}
|
|
||||||
input.n[type="radio"]:checked {
|
|
||||||
appearance: none;
|
|
||||||
width: 1.25em;
|
|
||||||
height: 1.25em;
|
|
||||||
border-radius: 50%;
|
|
||||||
border: 2px solid black;
|
|
||||||
background-color: $BT_N;
|
|
||||||
}
|
|
||||||
|
|
||||||
input.u[type="radio"] {
|
|
||||||
appearance: none;
|
|
||||||
width: 1.25em;
|
|
||||||
height: 1.25em;
|
|
||||||
border-radius: 50%;
|
|
||||||
border: 3px solid $BT_U;
|
|
||||||
background-color: transparent;
|
|
||||||
}
|
|
||||||
input.u[type="radio"]:checked {
|
|
||||||
appearance: none;
|
|
||||||
width: 1.25em;
|
|
||||||
height: 1.25em;
|
|
||||||
border-radius: 50%;
|
|
||||||
border: 2px solid black;
|
|
||||||
background-color: $BT_U;
|
|
||||||
}
|
|
||||||
|
|
||||||
input.w[type="radio"] {
|
|
||||||
appearance: none;
|
|
||||||
width: 1.25em;
|
|
||||||
height: 1.25em;
|
|
||||||
border-radius: 50%;
|
|
||||||
border: 3px solid $BT_W;
|
|
||||||
background-color: transparent;
|
|
||||||
}
|
|
||||||
input.w[type="radio"]:checked {
|
|
||||||
appearance: none;
|
|
||||||
width: 1.25em;
|
|
||||||
height: 1.25em;
|
|
||||||
border-radius: 50%;
|
|
||||||
border: 2px solid black;
|
|
||||||
background-color: $BT_W;
|
|
||||||
}
|
|
||||||
|
|
||||||
input.y[type="radio"] {
|
|
||||||
appearance: none;
|
|
||||||
width: 1.25em;
|
|
||||||
height: 1.25em;
|
|
||||||
border-radius: 50%;
|
|
||||||
border: 3px solid $BT_Y;
|
|
||||||
background-color: transparent;
|
|
||||||
}
|
|
||||||
input.y[type="radio"]:checked {
|
|
||||||
appearance: none;
|
|
||||||
width: 1.25em;
|
|
||||||
height: 1.25em;
|
|
||||||
border-radius: 50%;
|
|
||||||
border: 2px solid black;
|
|
||||||
background-color: $BT_Y;
|
|
||||||
}
|
|
||||||
</style>
|
|
||||||
|
|
||||||
<script type="text/javascript"><!--
|
|
||||||
|
|
||||||
var nb_patches = 0;
|
|
||||||
var cid = [];
|
|
||||||
var bkp = [];
|
|
||||||
|
|
||||||
// first line to review
|
|
||||||
var review = 0;
|
|
||||||
|
|
||||||
// show/hide table lines and update their color
|
|
||||||
function updt_table(line) {
|
|
||||||
var b = document.getElementById("sh_b").checked;
|
|
||||||
var n = document.getElementById("sh_n").checked;
|
|
||||||
var u = document.getElementById("sh_u").checked;
|
|
||||||
var w = document.getElementById("sh_w").checked;
|
|
||||||
var y = document.getElementById("sh_y").checked;
|
|
||||||
var tn = 0, tu = 0, tw = 0, ty = 0;
|
|
||||||
var bn = 0, bu = 0, bw = 0, by = 0;
|
|
||||||
var i, el;
|
|
||||||
|
|
||||||
for (i = 1; i < nb_patches; i++) {
|
|
||||||
if (document.getElementById("bt_" + i + "_n").checked) {
|
|
||||||
tn++;
|
|
||||||
if (bkp[i])
|
|
||||||
bn++;
|
|
||||||
if (line && i != line)
|
|
||||||
continue;
|
|
||||||
el = document.getElementById("tr_" + i);
|
|
||||||
el.style.backgroundColor = "$BG_N";
|
|
||||||
el.style.display = n && (b || !bkp[i]) && i >= review ? "" : "none";
|
|
||||||
}
|
|
||||||
else if (document.getElementById("bt_" + i + "_u").checked) {
|
|
||||||
tu++;
|
|
||||||
if (bkp[i])
|
|
||||||
bu++;
|
|
||||||
if (line && i != line)
|
|
||||||
continue;
|
|
||||||
el = document.getElementById("tr_" + i);
|
|
||||||
el.style.backgroundColor = "$BG_U";
|
|
||||||
el.style.display = u && (b || !bkp[i]) && i >= review ? "" : "none";
|
|
||||||
}
|
|
||||||
else if (document.getElementById("bt_" + i + "_w").checked) {
|
|
||||||
tw++;
|
|
||||||
if (bkp[i])
|
|
||||||
bw++;
|
|
||||||
if (line && i != line)
|
|
||||||
continue;
|
|
||||||
el = document.getElementById("tr_" + i);
|
|
||||||
el.style.backgroundColor = "$BG_W";
|
|
||||||
el.style.display = w && (b || !bkp[i]) && i >= review ? "" : "none";
|
|
||||||
}
|
|
||||||
else if (document.getElementById("bt_" + i + "_y").checked) {
|
|
||||||
ty++;
|
|
||||||
if (bkp[i])
|
|
||||||
by++;
|
|
||||||
if (line && i != line)
|
|
||||||
continue;
|
|
||||||
el = document.getElementById("tr_" + i);
|
|
||||||
el.style.backgroundColor = "$BG_Y";
|
|
||||||
el.style.display = y && (b || !bkp[i]) && i >= review ? "" : "none";
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
// bug
|
|
||||||
if (line && i != line)
|
|
||||||
continue;
|
|
||||||
el = document.getElementById("tr_" + i);
|
|
||||||
el.style.backgroundColor = "red";
|
|
||||||
el.style.display = "";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
document.getElementById("cnt_n").innerText = tn;
|
|
||||||
document.getElementById("cnt_u").innerText = tu;
|
|
||||||
document.getElementById("cnt_w").innerText = tw;
|
|
||||||
document.getElementById("cnt_y").innerText = ty;
|
|
||||||
|
|
||||||
document.getElementById("cnt_bn").innerText = bn;
|
|
||||||
document.getElementById("cnt_bu").innerText = bu;
|
|
||||||
document.getElementById("cnt_bw").innerText = bw;
|
|
||||||
document.getElementById("cnt_by").innerText = by;
|
|
||||||
document.getElementById("cnt_bt").innerText = bn + bu + bw + by;
|
|
||||||
|
|
||||||
document.getElementById("cnt_nbn").innerText = tn - bn;
|
|
||||||
document.getElementById("cnt_nbu").innerText = tu - bu;
|
|
||||||
document.getElementById("cnt_nbw").innerText = tw - bw;
|
|
||||||
document.getElementById("cnt_nby").innerText = ty - by;
|
|
||||||
document.getElementById("cnt_nbt").innerText = tn - bn + tu - bu + tw - bw + ty - by;
|
|
||||||
}
|
|
||||||
|
|
||||||
function updt_output() {
|
|
||||||
var b = document.getElementById("sh_b").checked;
|
|
||||||
var i, y = "", w = "", u = "", n = "";
|
|
||||||
|
|
||||||
for (i = 1; i < nb_patches; i++) {
|
|
||||||
if (i < review)
|
|
||||||
continue;
|
|
||||||
if (bkp[i])
|
|
||||||
continue;
|
|
||||||
if (document.getElementById("bt_" + i + "_y").checked)
|
|
||||||
y = y + " " + cid[i];
|
|
||||||
else if (document.getElementById("bt_" + i + "_w").checked)
|
|
||||||
w = w + " " + cid[i];
|
|
||||||
else if (document.getElementById("bt_" + i + "_u").checked)
|
|
||||||
u = u + " " + cid[i];
|
|
||||||
else if (document.getElementById("bt_" + i + "_n").checked)
|
|
||||||
n = n + " " + cid[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
// update the textarea
|
|
||||||
document.getElementById("output").value =
|
|
||||||
"cid_y=(" + y + " )\n" +
|
|
||||||
"cid_w=(" + w + " )\n" +
|
|
||||||
"cid_u=(" + u + " )\n" +
|
|
||||||
"cid_n=(" + n + " )\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
function updt(line,value) {
|
|
||||||
if (value == "r") {
|
|
||||||
review = line;
|
|
||||||
line = 0; // redraw everything
|
|
||||||
}
|
|
||||||
updt_table(line);
|
|
||||||
updt_output();
|
|
||||||
}
|
|
||||||
|
|
||||||
function show_only(b,n,u,w,y) {
|
|
||||||
document.getElementById("sh_b").checked = !!b;
|
|
||||||
document.getElementById("sh_n").checked = !!n;
|
|
||||||
document.getElementById("sh_u").checked = !!u;
|
|
||||||
document.getElementById("sh_w").checked = !!w;
|
|
||||||
document.getElementById("sh_y").checked = !!y;
|
|
||||||
document.getElementById("show_all").checked = true;
|
|
||||||
updt(0,"r");
|
|
||||||
}
|
|
||||||
|
|
||||||
// -->
|
|
||||||
</script>
|
|
||||||
</HEAD>
|
|
||||||
EOF
|
|
||||||
|
|
||||||
echo "<BODY>"
|
|
||||||
echo -n "<table cellpadding=3 cellspacing=5 style='font-size: 150%;'><tr><th align=left>Backported</th>"
|
|
||||||
echo -n "<td style='background-color:$BG_N'><a href='#' onclick='show_only(1,1,0,0,0);'> N: <span id='cnt_bn'>0</span> </a></td>"
|
|
||||||
echo -n "<td style='background-color:$BG_U'><a href='#' onclick='show_only(1,0,1,0,0);'> U: <span id='cnt_bu'>0</span> </a></td>"
|
|
||||||
echo -n "<td style='background-color:$BG_W'><a href='#' onclick='show_only(1,0,0,1,0);'> W: <span id='cnt_bw'>0</span> </a></td>"
|
|
||||||
echo -n "<td style='background-color:$BG_Y'><a href='#' onclick='show_only(1,0,0,0,1);'> Y: <span id='cnt_by'>0</span> </a></td>"
|
|
||||||
echo -n "<td>total: <span id='cnt_bt'>0</span></td>"
|
|
||||||
echo "</tr><tr>"
|
|
||||||
echo -n "<th align=left>Not backported</th>"
|
|
||||||
echo -n "<td style='background-color:$BG_N'><a href='#' onclick='show_only(0,1,0,0,0);'> N: <span id='cnt_nbn'>0</span> </a></td>"
|
|
||||||
echo -n "<td style='background-color:$BG_U'><a href='#' onclick='show_only(0,0,1,0,0);'> U: <span id='cnt_nbu'>0</span> </a></td>"
|
|
||||||
echo -n "<td style='background-color:$BG_W'><a href='#' onclick='show_only(0,0,0,1,0);'> W: <span id='cnt_nbw'>0</span> </a></td>"
|
|
||||||
echo -n "<td style='background-color:$BG_Y'><a href='#' onclick='show_only(0,0,0,0,1);'> Y: <span id='cnt_nby'>0</span> </a></td>"
|
|
||||||
echo -n "<td>total: <span id='cnt_nbt'>0</span></td>"
|
|
||||||
echo "</tr></table><P/>"
|
|
||||||
echo -n "<big><big>Show:"
|
|
||||||
echo -n " <span style='background-color:$BG_B'><input type='checkbox' onclick='updt_table(0);' id='sh_b' checked />B (${#bkp[*]})</span> "
|
|
||||||
echo -n " <span style='background-color:$BG_N'><input type='checkbox' onclick='updt_table(0);' id='sh_n' checked />N (<span id='cnt_n'>0</span>)</span> "
|
|
||||||
echo -n " <span style='background-color:$BG_U'><input type='checkbox' onclick='updt_table(0);' id='sh_u' checked />U (<span id='cnt_u'>0</span>)</span> "
|
|
||||||
echo -n " <span style='background-color:$BG_W'><input type='checkbox' onclick='updt_table(0);' id='sh_w' checked />W (<span id='cnt_w'>0</span>)</span> "
|
|
||||||
echo -n " <span style='background-color:$BG_Y'><input type='checkbox' onclick='updt_table(0);' id='sh_y' checked />Y (<span id='cnt_y'>0</span>)</span> "
|
|
||||||
echo -n "</big/></big><br/>(B=show backported, N=no/drop, U=uncertain, W=wait/next, Y=yes/pick"
|
|
||||||
echo ")<P/>"
|
|
||||||
|
|
||||||
echo "<TABLE COLS=5 BORDER=1 CELLSPACING=0 CELLPADDING=3>"
|
|
||||||
echo "<TR><TH>All<br/><input type='radio' name='review' id='show_all' onclick='updt(0,\"r\");' checked title='Start review here'/></TH><TH>CID</TH><TH>Subject</TH><TH>Verdict<BR>N U W Y</BR></TH><TH>Reason</TH></TR>"
|
|
||||||
seq_num=1; do_check=1; review=0;
|
|
||||||
for patch in "${PATCHES[@]}"; do
|
|
||||||
# try to retrieve the patch's numbering (0001-9999)
|
|
||||||
pnum="${patch##*/}"
|
|
||||||
pnum="${pnum%%[^0-9]*}"
|
|
||||||
|
|
||||||
id=$(sed -ne 's/^#id: \(.*\)/\1/p' "$patch")
|
|
||||||
resp=$(grep -v ^llama "$patch" | sed -ne '/^Explanation:/,$p' | sed -z 's/\n[\n]*/\n/g' | sed -z 's/\([^. ]\)\n\([A-Z]\)/\1.\n\2/' | tr '\012' ' ')
|
|
||||||
resp="${resp#Explanation:}";
|
|
||||||
while [ -n "$resp" -a -z "${resp##[ .]*}" ]; do
|
|
||||||
resp="${resp#[ .]}"
|
|
||||||
done
|
|
||||||
|
|
||||||
respl=$(echo -- "$resp" | tr 'A-Z' 'a-z')
|
|
||||||
|
|
||||||
if [[ "${respl}" =~ (conclusion|verdict)[:\ ][^.]*yes ]]; then
|
|
||||||
verdict=yes
|
|
||||||
elif [[ "${respl}" =~ (conclusion|verdict)[:\ ][^.]*wait ]]; then
|
|
||||||
verdict=wait
|
|
||||||
elif [[ "${respl}" =~ (conclusion|verdict)[:\ ][^.]*no ]]; then
|
|
||||||
verdict=no
|
|
||||||
elif [[ "${respl}" =~ (conclusion|verdict)[:\ ][^.]*uncertain ]]; then
|
|
||||||
verdict=uncertain
|
|
||||||
elif [[ "${respl}" =~ (\"wait\"|\"yes\"|\"no\"|\"uncertain\")[^\"]*$ ]]; then
|
|
||||||
# last word under quotes in the response, sometimes happens as
|
|
||||||
# in 'thus I would conclude "no"'.
|
|
||||||
verdict=${BASH_REMATCH[1]}
|
|
||||||
else
|
|
||||||
verdict=uncertain
|
|
||||||
fi
|
|
||||||
|
|
||||||
verdict="${verdict//[\"\',;:. ]}"
|
|
||||||
verdict=$(echo -n "$verdict" | tr '[A-Z]' '[a-z]')
|
|
||||||
|
|
||||||
# There are two formats for the ID line:
|
|
||||||
# - old: #id: cid subject
|
|
||||||
# - new: #id: cid author date subject
|
|
||||||
# We can detect the 2nd one as the date starts with a series of digits
|
|
||||||
# followed by "-" then an upper case letter (eg: "18-Dec23").
|
|
||||||
set -- $id
|
|
||||||
cid="$1"
|
|
||||||
author=""
|
|
||||||
date=""
|
|
||||||
if [ -n "$3" ] && [ -z "${3##[1-9]-[A-Z]*}" -o -z "${3##[0-3][0-9]-[A-Z]*}" ]; then
|
|
||||||
author="$2"
|
|
||||||
date="$3"
|
|
||||||
subj="${id#$cid $author $date }"
|
|
||||||
else
|
|
||||||
subj="${id#$cid }"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$cid" ]; then
|
|
||||||
echo "ERROR: commit ID not found in patch $pnum: $patch" >&2
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "<script type='text/javascript'>cid[$seq_num]='$cid'; bkp[$seq_num]=${bkp[$cid]:+1}+0;</script>"
|
|
||||||
|
|
||||||
echo -n "<TR id='tr_$seq_num' name='$cid'"
|
|
||||||
|
|
||||||
# highlight unqualified docs and bugs
|
|
||||||
if [ "$verdict" != "no" ]; then
|
|
||||||
: # no special treatment for accepted/uncertain elements
|
|
||||||
elif [ -z "${subj##BUG*}" ] && ! [[ "${respl}" =~ (explicitly|specifically|clearly|also|commit\ message|does)[\ ]*(state|mention|say|request) ]]; then
|
|
||||||
# bold for BUG marked "no" with no "explicitly states that ..."
|
|
||||||
echo -n " style='font-weight:bold'"
|
|
||||||
elif [ -z "${subj##DOC*}" ]; then # && ! [[ "${respl}" =~ (explicitly|specifically|clearly|also|commit\ message|does)[\ ]*(state|mention|say|request) ]]; then
|
|
||||||
# gray for DOC marked "no"
|
|
||||||
echo -n " style='font-weight:bold'"
|
|
||||||
#echo -n " bgcolor=#E0E0E0" #"$BG_U"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo -n ">"
|
|
||||||
|
|
||||||
# HTMLify subject and summary
|
|
||||||
subj="${subj//&/&}"; subj="${subj//</<}"; subj="${subj//>/>}";
|
|
||||||
resp="${resp//&/&}"; resp="${resp//</<}"; resp="${resp//>/>}";
|
|
||||||
|
|
||||||
# turn "#XXXX" to a link to an issue
|
|
||||||
resp=$(echo "$resp" | sed -e "s|#\([0-9]\{1,5\}\)|<a href='${ISSUES}\1'>#\1</a>|g")
|
|
||||||
|
|
||||||
# put links to commit IDs
|
|
||||||
resp=$(echo "$resp" | sed -e "s|\([0-9a-f]\{7,40\}\)|<a href='${GITURL}\1'>\1</a>|g")
|
|
||||||
|
|
||||||
echo -n "<TD nowrap align=center ${bkp[$cid]:+style='background-color:${BG_B}'}>$seq_num<BR/>"
|
|
||||||
echo -n "<input type='radio' name='review' onclick='updt($seq_num,\"r\");' ${do_check:+checked} title='Start review here'/></TD>"
|
|
||||||
echo -n "<TD nowrap ${bkp[$cid]:+style='background-color:${BG_B}'}><tt><a href='${GITURL}${cid}'>$cid</a></tt>${date:+<br/><small style='font-weight:normal'>$date</small>}</TD>"
|
|
||||||
echo -n "<TD nowrap><a href='${GITURL}${cid}'>${pnum:+$pnum }$subj</a>${author:+<br/><div align=right><small style='font-weight:normal'>$author</small></div>}</TD>"
|
|
||||||
echo -n "<TD nowrap align=center>"
|
|
||||||
echo -n "<input type='radio' onclick='updt($seq_num,\"n\");' id='bt_${seq_num}_n' class='n' name='$cid' value='n' title='Drop' $( [ "$verdict" != no ] || echo -n checked) />"
|
|
||||||
echo -n "<input type='radio' onclick='updt($seq_num,\"u\");' id='bt_${seq_num}_u' class='u' name='$cid' value='u' title='Uncertain' $( [ "$verdict" != uncertain ] || echo -n checked) />"
|
|
||||||
echo -n "<input type='radio' onclick='updt($seq_num,\"w\");' id='bt_${seq_num}_w' class='w' name='$cid' value='w' title='wait in -next' $([ "$verdict" != wait ] || echo -n checked) />"
|
|
||||||
echo -n "<input type='radio' onclick='updt($seq_num,\"y\");' id='bt_${seq_num}_y' class='y' name='$cid' value='y' title='Pick' $( [ "$verdict" != yes ] || echo -n checked) />"
|
|
||||||
echo -n "</TD>"
|
|
||||||
echo -n "<TD>$resp</TD>"
|
|
||||||
echo "</TR>"
|
|
||||||
echo
|
|
||||||
((seq_num++))
|
|
||||||
|
|
||||||
# if this patch was already backported, make the review start on the next
|
|
||||||
if [ -n "${bkp[$cid]}" ]; then
|
|
||||||
review=$seq_num
|
|
||||||
do_check=1
|
|
||||||
else
|
|
||||||
do_check=
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "<TR><TH>New<br/><input type='radio' name='review' onclick='updt($seq_num,\"r\");' ${do_check:+checked} title='Nothing to backport'/></TH><TH>CID</TH><TH>Subject</TH><TH>Verdict<BR>N U W Y</BR></TH><TH>Reason</TH></TR>"
|
|
||||||
|
|
||||||
echo "</TABLE>"
|
|
||||||
echo "<P/>"
|
|
||||||
echo "<H3>Output:</H3>"
|
|
||||||
echo "<textarea cols=120 rows=10 id='output'></textarea>"
|
|
||||||
echo "<P/>"
|
|
||||||
echo "<script type='text/javascript'>nb_patches=$seq_num; review=$review; updt_table(0); updt_output();</script>"
|
|
||||||
echo "</BODY></HTML>"
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user