tapfile_helper ff.: support TAP and Markdown output

This change adds markdown output support to tapfile helper.
tap_generate_report() has been refactored to use low-level output
functions to write tests; TAP and markdown output is supported and both
are generated by default. Also, it should be straightforward to add
other output formats by implementing the respective low level print
functions.

The markdown output is now used by run-kola-tests.yaml to generate step
output and, if run from a PR, add a comment with test results to the PR.

Signed-off-by: Thilo Fromm <thilofromm@microsoft.com>
This commit is contained in:
Thilo Fromm 2023-03-29 12:59:34 +02:00
parent ab2000e3e4
commit f07cb5f781
7 changed files with 204 additions and 53 deletions

View File

@ -34,7 +34,8 @@ concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref_name }}
cancel-in-progress: true
permissions: {}
permissions:
pull-requests: write
jobs:
packages:

View File

@ -18,7 +18,8 @@ on:
You can e.g. get this from a run's URL -
https://github.com/flatcar/scripts/actions/runs/<ID> .
permissions: {}
permissions:
pull-requests: write
jobs:
test:

View File

@ -26,6 +26,8 @@ jobs:
fail-fast: false
matrix:
arch: ["amd64", "arm64"]
permissions:
pull-requests: write
steps:
- name: Prepare machine
@ -196,7 +198,6 @@ jobs:
mv flatcar_test_update.gz scripts/
- name: Run tests
shell: bash
run: |
@ -235,9 +236,8 @@ jobs:
export MAX_RETRIES=5
export SKIP_COPY_TO_BINCACHE=1
# run the test.
# run the tests.
test_run ${{ matrix.arch }} qemu_uefi
test_run ${{ matrix.arch }} qemu_update
# Stop the background webserver
@ -245,6 +245,26 @@ jobs:
kill %1
set -e
- name: Create Test Summary
if: always()
shell: bash
run: |
exec 2>&1
set -x
set -euo pipefail
# qemu_update report includes all reqults of qemu_uefi as test results are
# stored in a temporary sqlite DB in scripts/ which is not deleted between test runs.
cp scripts/results-qemu_update.md ./test-results.md
cat test-results.md >> "$GITHUB_STEP_SUMMARY"
- name: If started from a PR, post test summary to PR
if: ${{ github.event_name == 'pull_request' }}
uses: mshick/add-pr-comment@v2
with:
if: always()
message-path: "test-results.md"
- name: Upload artifacts
if: always()
uses: actions/upload-artifact@v3
@ -252,28 +272,8 @@ jobs:
name: ${{ matrix.arch }}-test-results
path: |
scripts/__TESTS__/*/_kola_temp/
scripts/__TESTS__/*/*.tap
scripts/__TESTS__/*/*.txt
scripts/results-*.tap
- name: Patch TAP reports so test-summary can parse
if: always()
shell: bash
run: |
exec 2>&1
set -x
set -euo pipefail
cd scripts
for tap in results-*.tap; do
sumtap="test-summary-${tap}"
# If this is missing then test-summary assumes the TAP report
# is, in fact, XML.
# See https://github.com/flatcar/scripts/pull/696#discussion_r1151027499
echo "TAP version 13" > "${sumtap}"
cat "${tap}" >> "${sumtap}"
done
- name: Create Test Summary
if: always()
uses: test-summary/action@v2
with:
paths: "scripts/test-summary-results-*.tap"
scripts/results-*.md
test-results.md

View File

@ -47,6 +47,11 @@ CONTAINER_IMAGE_ROOT="/home/sdk/build/images"
# echo "export PARALLEL_TESTS=\"5\"" > sdk_container/.env
# to override the number of test cases to be run in parallel.
# -- General --
# "tap" for TAP reports, "md" for markdown are currently supported
TEST_REPORT_FORMATS=("tap" "md")
# -- QEMU --
QEMU_IMAGE_NAME=${QEMU_IMAGE_NAME:-flatcar_production_image.bin}

View File

@ -197,27 +197,167 @@ function tap_failed_tests_for_vendor() {
}
# --
# TAP output format primitives for tap_generate_report()
__tap_print_header() {
local arch="$1"
local version="$2"
local vendors="$3"
local count="$4"
# We use count + 1 here because the very first "test result" will just print
# the list of platforms tested, not an actual test's result.
echo "1..$((count+1))"
echo "ok - Version: ${version}, Architecture: ${arch}"
echo " ---"
echo " Platforms tested: ${vendors}"
echo " ..."
}
# --
__tap_print_test_verdict() {
local verdict="$1"
local name="$2"
local succeded_vendors="$3"
local failed_vendors="$4"
echo "${verdict} - ${test_name}"
echo " ---"
if [ -n "${succeded_vendors}" ] ; then
echo " Succeeded: ${succeded_vendors}"
fi
if [ -n "${failed_vendors}" ] ; then
echo " Failed: ${failed_vendors}"
fi
}
# --
__tap_print_test_run_diag_output() {
local vendor="$1"
local run="$2"
echo " Error messages for ${vendor}, run ${run}:"
cat -
}
# --
__tap_finish_test_verdict() {
local verdict="$1"
local name="$2"
local succeded_vendors="$3"
local failed_vendors="$4"
echo " ..."
}
# --
__tap_finish_test_report() {
true
}
# --
# markdown output format primitives for tap_generate_report()
__md_print_header() {
local arch="$1"
local version="$2"
local vendors="$3"
local count="$4"
echo "### Test report for ${version} / ${arch}"
echo
echo "**Platforms tested** : ${vendors}"
}
# --
__md_print_test_verdict() {
local verdict="$1"
local name="$2"
local succeded_vendors="$3"
local failed_vendors="$4"
v="![${verdict}](https://via.placeholder.com/50x20/00ff00/000000?text=PASS)"
if [ "${verdict}" = "not ok" ] ; then
v="![${verdict}](https://via.placeholder.com/50x20/ff0000/ffffff?text=FAIL)"
fi
echo
echo -n "${v} **${name}**"
if [ -n "${succeded_vendors}" ] ; then
echo -n " 🟢 Succeeded: ${succeded_vendors}"
fi
if [ -n "${failed_vendors}" ] ; then
echo -n " ❌ Failed: ${failed_vendors}"
fi
echo
if [ "${verdict}" = "not ok" ] ; then
echo
echo "<details>"
echo
fi
}
# --
__md_print_test_run_diag_output() {
local vendor="$1"
local run="$2"
echo "* Diagnostic output for ${vendor}, run ${run}"
echo
echo " \`\`\`"
cat -
echo " \`\`\`"
echo
}
# --
#
__md_finish_test_verdict() {
local verdict="$1"
local name="$2"
local succeded_vendors="$3"
local failed_vendors="$4"
if [ "${verdict}" = "not ok" ] ; then
echo
echo "</details>"
echo
fi
}
# --
__md_finish_test_report() {
true
}
# --
# Print the tap file from contents of the database.
# INPUT:
# 1: <arch> - Architecture to be included in the first line of the report
# 2: <version> - OS version tested, to be included in the first line of the report
# 3: <include_transient_errors> - If set to "true" then debug output of transient test failures
# 3: <format> - Output format of the report. "tap" and "markdown" are supported.
# 4: <include_transient_errors> - If set to "true" then debug output of transient test failures
# is included in the result report.
function tap_generate_report() {
local arch="$1"
local version="$2"
local full_error_report="${3:-false}"
local format="$3"
local full_error_report="${4:-false}"
case "${format}" in
tap) ;;
md) ;;
*) echo "ERROR: tap_generate_report() unknown format '${format}'" >&2
return 1
;;
esac
local count
count="$(__sqlite3_wrapper 'SELECT count(name) FROM test_case;')"
local vendors
vendors="$(__sqlite3_wrapper 'SELECT name FROM vendor;' | tr '\n' ' ')"
echo "1..$((count+1))"
echo "ok - Version: ${version}, Architecture: ${arch}"
echo " ---"
echo " Platforms tested: ${vendors}"
echo " ..."
__"${format}"_print_header "${arch}" "${version}" "${vendors}" "${count}"
# Print result line for every test, including platforms it succeeded on
# and transient failed runs.
@ -265,21 +405,17 @@ function tap_generate_report() {
r=r ", " $2
else
r="(" $2 ; }
END { if (t) print t r ")"; }'
END { if (t) print t " " r ")"; }'
}
local succeded
succeded="$(list_runs 1)"
local succeeded
succeeded="$(list_runs 1)"
local failed
failed="$(list_runs 0)"
echo "${verdict} - ${test_name}"
echo " ---"
if [ -n "${succeded}" ] ; then
echo " Succeeded: ${succeded}"
fi
__"${format}"_print_test_verdict "${verdict}" "${test_name}" \
"${succeeded}" "${failed}"
if [ -n "${failed}" ] ; then
echo " Failed: ${failed}"
if [ "${verdict}" = "not ok" -o "${full_error_report}" = "true" ] ; then
# generate diagnostic output, per failed run.
__sqlite3_wrapper -csv "
@ -291,7 +427,7 @@ function tap_generate_report() {
ORDER BY t.run DESC;" | \
sed 's/,/ /' | \
while read -r vendor run; do
echo " Error messages for ${vendor}, run ${run}:"
{
__sqlite3_wrapper -csv "
SELECT t.output FROM test_run AS t, test_case AS c
WHERE t.case_id=c.id
@ -299,10 +435,14 @@ function tap_generate_report() {
AND t.run='${run}';" | \
sed 's/"/ /g' | \
awk '{print " L" NR ": \"" $0 "\""}'
} | __"${format}"_print_test_run_diag_output "${vendor}" "${run}"
done
fi
fi
echo " ..."
__"${format}"_finish_test_verdict "${verdict}" "${test_name}" \
"${succeeded}" "${failed}"
done
__"${format}"_finish_test_report
}
# --

View File

@ -165,8 +165,8 @@ function _test_run_impl() {
# Make the torcx artifacts available to test implementation
__prepare_torcx "${arch}" "${vernum}" "${work_dir}"
local tap_merged_summary="results-${image}.tap"
local tap_merged_detailed="results-${image}-detailed.tap"
local tap_merged_summary="results-${image}"
local tap_merged_detailed="results-${image}-detailed"
local retry=""
local success=false
local print_give_up=true
@ -242,9 +242,9 @@ function _test_run_impl() {
copy_to_buildcache "testing/${vernum}/${arch}/${image}" \
"${tests_dir}/"*.tap
copy_to_buildcache "testing/${vernum}/${arch}/${image}" \
"${tap_merged_summary}"
"${tap_merged_summary}"*
copy_to_buildcache "testing/${vernum}/${arch}/${image}" \
"${tap_merged_detailed}"
"${tap_merged_detailed}"*
fi
if ! $success; then
return 1

View File

@ -19,8 +19,12 @@ failfile="$6"
merged_summary="$7"
merged_detailed="$8"
source ci-automation/ci-config.env
source ci-automation/tapfile_helper_lib.sh
tap_ingest_tapfile "${tapfile}" "${image}" "${retry}"
tap_failed_tests_for_vendor "${image}" > "${failfile}"
tap_generate_report "${arch}" "${vernum}" > "${merged_summary}"
tap_generate_report "${arch}" "${vernum}" "true" > "${merged_detailed}"
for format in "${TEST_REPORT_FORMATS[@]}"; do
tap_generate_report "${arch}" "${vernum}" "${format}" > "${merged_summary}.${format}"
tap_generate_report "${arch}" "${vernum}" "${format}" "true" > "${merged_detailed}.${format}"
done