diff --git a/ci-automation/ci-config.env b/ci-automation/ci-config.env index 2c2672758f..2282b66b6f 100644 --- a/ci-automation/ci-config.env +++ b/ci-automation/ci-config.env @@ -95,3 +95,10 @@ GCE_PARALLEL="${PARALLEL_TESTS:-4}" DIGITALOCEAN_PARALLEL="${PARALLEL_TESTS:-8}" # DIGITALOCEAN_TOKEN_JSON env var is used for credentials, and should # come from sdk_container/.env. It must be base64-encoded. + +# -- VMware ESX -- + +: ${VMWARE_ESX_IMAGE_NAME:='flatcar_production_vmware_ova.ova'} +VMWARE_ESX_PARALLEL="${PARALLEL_TESTS:-4}" +# VMWARE_ESX_CREDS should come from sdk_container/.env and must be +# base64-encoded. diff --git a/ci-automation/vendor-testing/vmware.sh b/ci-automation/vendor-testing/vmware.sh new file mode 100755 index 0000000000..c0c57a6c30 --- /dev/null +++ b/ci-automation/vendor-testing/vmware.sh @@ -0,0 +1,68 @@ +#!/bin/bash +# Copyright (c) 2022 The Flatcar Maintainers. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +set -euo pipefail + +# Test execution script for the VMware ESX vendor image. +# This script is supposed to run in the mantle container. + +source ci-automation/vendor_test.sh + +# We never ran VMware ESX on arm64, so for now fail it as an +# unsupported option. +if [[ "${CIA_ARCH}" == "arm64" ]]; then + echo "1..1" > "${CIA_TAPFILE}" + echo "not ok - all qemu tests" >> "${CIA_TAPFILE}" + echo " ---" >> "${CIA_TAPFILE}" + echo " ERROR: ARM64 tests not supported on VMware ESX." | tee -a "${CIA_TAPFILE}" + echo " ..." >> "${CIA_TAPFILE}" + break_retest_cycle + exit 1 +fi + +# Fetch image if not present. +if [ -f "${VMWARE_ESX_IMAGE_NAME}" ] ; then + echo "++++ ${CIA_TESTSCRIPT}: Using existing ${work_dir}/${VMWARE_ESX_IMAGE_NAME} for testing ${CIA_VERNUM} (${CIA_ARCH}) ++++" +else + echo "++++ ${CIA_TESTSCRIPT}: downloading ${VMWARE_ESX_IMAGE_NAME} for ${CIA_VERNUM} (${CIA_ARCH}) ++++" + copy_from_buildcache "images/${CIA_ARCH}/${CIA_VERNUM}/${VMWARE_ESX_IMAGE_NAME}" . +fi + +config_file='' +secret_to_file config_file "${VMWARE_ESX_CREDS}" + +# If we are using static IPs, then delete every VM that is running +# because we'll use all available spots. This is to avoid entering a +# broken state if there are some left-over VMs from manual usage or a +# forcefully terminated job. +# +# The assumption here is that we can do it without any interference +# with other CI Vms, because we have acquired a resource lock to those +# VMs. +static_ips="$(jq '.["default"]["static_ips"]' "${config_file}")" +if [[ "${static_ips}" -ne 0 ]]; then + ore esx --esx-config-file "${config_file}" remove-vms || : +fi + +kola_test_basename="ci-${CIA_VERNUM//+/-}" + +trap 'ore esx --esx-config-file "${config_file}" remove-vms \ + --pattern "${kola_test_basename}*" || :' EXIT + +set -x + +sudo timeout --signal=SIGQUIT 2h kola run \ + --board="${CIA_ARCH}-usr" \ + --basename="${kola_test_basename}" \ + --channel="${CIA_CHANNEL}" \ + --platform=esx \ + --tapfile="${CIA_TAPFILE}" \ + --parallel="${VMWARE_ESX_PARALLEL}" \ + --torcx-manifest="${CIA_TORCX_MANIFEST}" \ + --esx-config-file "${config_file}" \ + --esx-ova-path "${VMWARE_ESX_IMAGE_NAME}" \ + "${@}" + +set +x