diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index a36e9df6de..2892c66970 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -34,7 +34,8 @@ concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.ref_name }} cancel-in-progress: true -permissions: {} +permissions: + pull-requests: write jobs: packages: diff --git a/.github/workflows/dispatch-kola-tests.yaml b/.github/workflows/dispatch-kola-tests.yaml index c970711d30..edc9ca96b3 100644 --- a/.github/workflows/dispatch-kola-tests.yaml +++ b/.github/workflows/dispatch-kola-tests.yaml @@ -18,7 +18,8 @@ on: You can e.g. get this from a run's URL - https://github.com/flatcar/scripts/actions/runs/ . -permissions: {} +permissions: + pull-requests: write jobs: test: diff --git a/.github/workflows/run-kola-tests.yaml b/.github/workflows/run-kola-tests.yaml index 82fe53dc34..99db29bab4 100644 --- a/.github/workflows/run-kola-tests.yaml +++ b/.github/workflows/run-kola-tests.yaml @@ -26,6 +26,8 @@ jobs: fail-fast: false matrix: arch: ["amd64", "arm64"] + permissions: + pull-requests: write steps: - name: Prepare machine @@ -196,7 +198,6 @@ jobs: mv flatcar_test_update.gz scripts/ - - name: Run tests shell: bash run: | @@ -235,9 +236,8 @@ jobs: export MAX_RETRIES=5 export SKIP_COPY_TO_BINCACHE=1 - # run the test. + # run the tests. test_run ${{ matrix.arch }} qemu_uefi - test_run ${{ matrix.arch }} qemu_update # Stop the background webserver @@ -245,6 +245,26 @@ jobs: kill %1 set -e + - name: Create Test Summary + if: always() + shell: bash + run: | + exec 2>&1 + set -x + set -euo pipefail + + # qemu_update report includes all reqults of qemu_uefi as test results are + # stored in a temporary sqlite DB in scripts/ which is not deleted between test runs. + cp scripts/results-qemu_update.md ./test-results.md + cat test-results.md >> "$GITHUB_STEP_SUMMARY" + + - name: If started from a PR, post test summary to PR + if: ${{ github.event_name == 'pull_request' }} + uses: mshick/add-pr-comment@v2 + with: + if: always() + message-path: "test-results.md" + - name: Upload artifacts if: always() uses: actions/upload-artifact@v3 @@ -252,28 +272,8 @@ jobs: name: ${{ matrix.arch }}-test-results path: | scripts/__TESTS__/*/_kola_temp/ + scripts/__TESTS__/*/*.tap + scripts/__TESTS__/*/*.txt scripts/results-*.tap - - - name: Patch TAP reports so test-summary can parse - if: always() - shell: bash - run: | - exec 2>&1 - set -x - set -euo pipefail - - cd scripts - for tap in results-*.tap; do - sumtap="test-summary-${tap}" - # If this is missing then test-summary assumes the TAP report - # is, in fact, XML. - # See https://github.com/flatcar/scripts/pull/696#discussion_r1151027499 - echo "TAP version 13" > "${sumtap}" - cat "${tap}" >> "${sumtap}" - done - - - name: Create Test Summary - if: always() - uses: test-summary/action@v2 - with: - paths: "scripts/test-summary-results-*.tap" + scripts/results-*.md + test-results.md diff --git a/ci-automation/ci-config.env b/ci-automation/ci-config.env index fb7b476a57..944625fe45 100644 --- a/ci-automation/ci-config.env +++ b/ci-automation/ci-config.env @@ -47,6 +47,11 @@ CONTAINER_IMAGE_ROOT="/home/sdk/build/images" # echo "export PARALLEL_TESTS=\"5\"" > sdk_container/.env # to override the number of test cases to be run in parallel. +# -- General -- + +# "tap" for TAP reports, "md" for markdown are currently supported +TEST_REPORT_FORMATS=("tap" "md") + # -- QEMU -- QEMU_IMAGE_NAME=${QEMU_IMAGE_NAME:-flatcar_production_image.bin} diff --git a/ci-automation/tapfile_helper_lib.sh b/ci-automation/tapfile_helper_lib.sh index bbb7c6c4c5..8c837d8315 100644 --- a/ci-automation/tapfile_helper_lib.sh +++ b/ci-automation/tapfile_helper_lib.sh @@ -197,27 +197,167 @@ function tap_failed_tests_for_vendor() { } # -- +# TAP output format primitives for tap_generate_report() + +__tap_print_header() { + local arch="$1" + local version="$2" + local vendors="$3" + local count="$4" + + # We use count + 1 here because the very first "test result" will just print + # the list of platforms tested, not an actual test's result. + echo "1..$((count+1))" + echo "ok - Version: ${version}, Architecture: ${arch}" + echo " ---" + echo " Platforms tested: ${vendors}" + echo " ..." +} +# -- + +__tap_print_test_verdict() { + local verdict="$1" + local name="$2" + local succeded_vendors="$3" + local failed_vendors="$4" + + echo "${verdict} - ${test_name}" + echo " ---" + + if [ -n "${succeded_vendors}" ] ; then + echo " Succeeded: ${succeded_vendors}" + fi + if [ -n "${failed_vendors}" ] ; then + echo " Failed: ${failed_vendors}" + fi +} +# -- + +__tap_print_test_run_diag_output() { + local vendor="$1" + local run="$2" + echo " Error messages for ${vendor}, run ${run}:" + cat - +} +# -- + +__tap_finish_test_verdict() { + local verdict="$1" + local name="$2" + local succeded_vendors="$3" + local failed_vendors="$4" + echo " ..." +} +# -- + +__tap_finish_test_report() { + true +} +# -- + +# markdown output format primitives for tap_generate_report() + +__md_print_header() { + local arch="$1" + local version="$2" + local vendors="$3" + local count="$4" + + echo "### Test report for ${version} / ${arch}" + echo + echo "**Platforms tested** : ${vendors}" +} +# -- + +__md_print_test_verdict() { + local verdict="$1" + local name="$2" + local succeded_vendors="$3" + local failed_vendors="$4" + + v="![${verdict}](https://via.placeholder.com/50x20/00ff00/000000?text=PASS)" + if [ "${verdict}" = "not ok" ] ; then + v="![${verdict}](https://via.placeholder.com/50x20/ff0000/ffffff?text=FAIL)" + fi + + echo + echo -n "${v} **${name}**" + if [ -n "${succeded_vendors}" ] ; then + echo -n " 🟢 Succeeded: ${succeded_vendors}" + fi + if [ -n "${failed_vendors}" ] ; then + echo -n " ❌ Failed: ${failed_vendors}" + fi + echo + if [ "${verdict}" = "not ok" ] ; then + echo + echo "
" + echo + fi +} +# -- + +__md_print_test_run_diag_output() { + local vendor="$1" + local run="$2" + + echo "* Diagnostic output for ${vendor}, run ${run}" + echo + echo " \`\`\`" + cat - + echo " \`\`\`" + echo + +} +# -- +# +__md_finish_test_verdict() { + local verdict="$1" + local name="$2" + local succeded_vendors="$3" + local failed_vendors="$4" + if [ "${verdict}" = "not ok" ] ; then + echo + echo "
" + echo + fi +} +# -- + +__md_finish_test_report() { + true +} +# -- + + # Print the tap file from contents of the database. # INPUT: # 1: - Architecture to be included in the first line of the report # 2: - OS version tested, to be included in the first line of the report -# 3: - If set to "true" then debug output of transient test failures +# 3: - Output format of the report. "tap" and "markdown" are supported. +# 4: - If set to "true" then debug output of transient test failures # is included in the result report. function tap_generate_report() { local arch="$1" local version="$2" - local full_error_report="${3:-false}" + local format="$3" + local full_error_report="${4:-false}" + + case "${format}" in + tap) ;; + md) ;; + *) echo "ERROR: tap_generate_report() unknown format '${format}'" >&2 + return 1 + ;; + esac + local count count="$(__sqlite3_wrapper 'SELECT count(name) FROM test_case;')" local vendors vendors="$(__sqlite3_wrapper 'SELECT name FROM vendor;' | tr '\n' ' ')" - echo "1..$((count+1))" - echo "ok - Version: ${version}, Architecture: ${arch}" - echo " ---" - echo " Platforms tested: ${vendors}" - echo " ..." + __"${format}"_print_header "${arch}" "${version}" "${vendors}" "${count}" # Print result line for every test, including platforms it succeeded on # and transient failed runs. @@ -265,21 +405,17 @@ function tap_generate_report() { r=r ", " $2 else r="(" $2 ; } - END { if (t) print t r ")"; }' + END { if (t) print t " " r ")"; }' } - local succeded - succeded="$(list_runs 1)" + local succeeded + succeeded="$(list_runs 1)" local failed failed="$(list_runs 0)" - echo "${verdict} - ${test_name}" - echo " ---" - if [ -n "${succeded}" ] ; then - echo " Succeeded: ${succeded}" - fi + __"${format}"_print_test_verdict "${verdict}" "${test_name}" \ + "${succeeded}" "${failed}" if [ -n "${failed}" ] ; then - echo " Failed: ${failed}" if [ "${verdict}" = "not ok" -o "${full_error_report}" = "true" ] ; then # generate diagnostic output, per failed run. __sqlite3_wrapper -csv " @@ -291,7 +427,7 @@ function tap_generate_report() { ORDER BY t.run DESC;" | \ sed 's/,/ /' | \ while read -r vendor run; do - echo " Error messages for ${vendor}, run ${run}:" + { __sqlite3_wrapper -csv " SELECT t.output FROM test_run AS t, test_case AS c WHERE t.case_id=c.id @@ -299,10 +435,14 @@ function tap_generate_report() { AND t.run='${run}';" | \ sed 's/"/ /g' | \ awk '{print " L" NR ": \"" $0 "\""}' + } | __"${format}"_print_test_run_diag_output "${vendor}" "${run}" done fi fi - echo " ..." + __"${format}"_finish_test_verdict "${verdict}" "${test_name}" \ + "${succeeded}" "${failed}" done + + __"${format}"_finish_test_report } # -- diff --git a/ci-automation/test.sh b/ci-automation/test.sh index c9ec153a3d..2082aa83dd 100644 --- a/ci-automation/test.sh +++ b/ci-automation/test.sh @@ -165,8 +165,8 @@ function _test_run_impl() { # Make the torcx artifacts available to test implementation __prepare_torcx "${arch}" "${vernum}" "${work_dir}" - local tap_merged_summary="results-${image}.tap" - local tap_merged_detailed="results-${image}-detailed.tap" + local tap_merged_summary="results-${image}" + local tap_merged_detailed="results-${image}-detailed" local retry="" local success=false local print_give_up=true @@ -242,9 +242,9 @@ function _test_run_impl() { copy_to_buildcache "testing/${vernum}/${arch}/${image}" \ "${tests_dir}/"*.tap copy_to_buildcache "testing/${vernum}/${arch}/${image}" \ - "${tap_merged_summary}" + "${tap_merged_summary}"* copy_to_buildcache "testing/${vernum}/${arch}/${image}" \ - "${tap_merged_detailed}" + "${tap_merged_detailed}"* fi if ! $success; then return 1 diff --git a/ci-automation/test_update_reruns.sh b/ci-automation/test_update_reruns.sh index 2fcd79e543..0cd91f9ad4 100755 --- a/ci-automation/test_update_reruns.sh +++ b/ci-automation/test_update_reruns.sh @@ -19,8 +19,12 @@ failfile="$6" merged_summary="$7" merged_detailed="$8" +source ci-automation/ci-config.env source ci-automation/tapfile_helper_lib.sh tap_ingest_tapfile "${tapfile}" "${image}" "${retry}" tap_failed_tests_for_vendor "${image}" > "${failfile}" -tap_generate_report "${arch}" "${vernum}" > "${merged_summary}" -tap_generate_report "${arch}" "${vernum}" "true" > "${merged_detailed}" + +for format in "${TEST_REPORT_FORMATS[@]}"; do + tap_generate_report "${arch}" "${vernum}" "${format}" > "${merged_summary}.${format}" + tap_generate_report "${arch}" "${vernum}" "${format}" "true" > "${merged_detailed}.${format}" +done