Copy new pipeline stuff from main branch

Synced to commit 25883519d971d3670f1e188b608da0d13a5e4a84.
This commit is contained in:
Krzesimir Nowak 2022-07-05 17:56:05 +02:00
parent 66e56bd8b3
commit 350b884067
64 changed files with 5542 additions and 52 deletions

2
.dockerignore Normal file
View File

@ -0,0 +1,2 @@
__build__
sdk_container/.cache

6
.gitmodules vendored Normal file
View File

@ -0,0 +1,6 @@
[submodule "sdk_container/src/third_party/coreos-overlay"]
path = sdk_container/src/third_party/coreos-overlay
url = https://github.com/flatcar/coreos-overlay.git
[submodule "sdk_container/src/third_party/portage-stable"]
path = sdk_container/src/third_party/portage-stable
url = https://github.com/flatcar/portage-stable.git

79
bootstrap_sdk_container Executable file
View File

@ -0,0 +1,79 @@
#!/bin/bash
#
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
set -eu
cd $(dirname "$0")
source sdk_lib/sdk_container_common.sh
seed_version=""
target_version=""
declare -a cleanup
# --
usage() {
echo " Usage:"
echo " $0 <seed-sdk-version> <new-sdk-version> [-x <cleanup-script>]"
echo
echo " This script will bootstrap a new SDK tarball using an SDK container."
echo " '$sdk_container_common_versionfile' will be updated to the target version."
echo
echo " <seed-sdk-vernum> - SDK version number (e.g. '3005.0.0') to use for bootstrapping."
echo " The SDK container will be pulled and the tarball"
echo " downloaded if necessary."
echo " <new-sdk-vernum> - SDK version number (e.g. '3027.0.0') of the new SDK."
echo " -x <cleanup-script> - For each resource generated during build (container etc.)"
echo " add a cleanup line to <script> which, when run, will free"
echo " the resource. Useful for CI."
echo " -h - Print this help."
echo
}
# --
while [ 0 -lt $# ] ; do
case "$1" in
-h) usage; exit 0;;
-x) cleanup=("-x" "$2"); shift; shift;;
*) if [ -z "$seed_version" ] ; then
seed_version="$1"
elif [ -z "$target_version" ] ; then
target_version="$1"
else
echo "ERROR: Spurious positional parameter '$1'"
usage; exit 1;
fi
shift;;
esac
done
if [ -z "$seed_version" -o -z "$target_version" ] ; then
echo "ERROR: Missing seed and /or target SDK version."
usage
exit 1
fi
# --
vernum="$(strip_version_prefix "$target_version")"
if is_official "$vernum" ; then
official="true"
else
official="false"
fi
yell "\n######\n###### Bootstrapping SDK version $target_version from seed ($seed_version)"
if $official; then
export COREOS_OFFICIAL=1
fi
# bootstrap_sdk needs FLATCAR_SDK_VERSION set to the seed version
./run_sdk_container "${cleanup[@]}" -V "$seed_version" -v "$target_version" \
sudo -E ./bootstrap_sdk
# Update versionfile to the actual SDK version
create_versionfile "${target_version}"

View File

@ -58,10 +58,33 @@ extract_update() {
local image_name="$1"
local disk_layout="$2"
local update_path="${BUILD_DIR}/${image_name%_image.bin}_update.bin"
local digest_path="${update_path}.DIGESTS"
"${BUILD_LIBRARY_DIR}/disk_util" --disk_layout="${disk_layout}" \
extract "${BUILD_DIR}/${image_name}" "USR-A" "${update_path}"
upload_image "${update_path}"
# Compress image
files_to_evaluate+=( "${update_path}" )
declare -a compressed_images
declare -a extra_files
compress_disk_images files_to_evaluate compressed_images extra_files
# Upload compressed image
upload_image -d "${digest_path}" "${compressed_images[@]}" "${extra_files[@]}"
# Upload legacy digests
upload_legacy_digests "${digest_path}" compressed_images
# For production as well as dev builds we generate a dev-key-signed update
# payload for running tests (the signature won't be accepted by production systems).
local update_test="${BUILD_DIR}/flatcar_test_update.gz"
delta_generator \
-private_key "/usr/share/update_engine/update-payload-key.key.pem" \
-new_image "${update_path}" \
-new_kernel "${BUILD_DIR}/${image_name%.bin}.vmlinuz" \
-out_file "${update_test}"
upload_image "${update_test}"
}
zip_update_tools() {
@ -94,7 +117,18 @@ generate_update() {
-new_kernel "${image_kernel}" \
-out_file "${update}.gz"
upload_image -d "${update}.DIGESTS" "${update}".{bin,gz,zip}
# Compress image
declare -a files_to_evaluate
declare -a compressed_images
declare -a extra_files
files_to_evaluate+=( "${update}.bin" )
compress_disk_images files_to_evaluate compressed_images extra_files
# Upload images
upload_image -d "${update}.DIGESTS" "${update}".{gz,zip} "${compressed_images[@]}" "${extra_files[@]}"
# Upload legacy digests
upload_legacy_digests "${update}.DIGESTS" compressed_images
}
# ldconfig cannot generate caches for non-native arches.

View File

@ -108,9 +108,21 @@ create_dev_container() {
systemd_enable "${root_fs_dir}" "multi-user.target" "remount-usr.service"
finish_image "${image_name}" "${disk_layout}" "${root_fs_dir}" "${image_contents}"
upload_image -d "${BUILD_DIR}/${image_name}.bz2.DIGESTS" \
declare -a files_to_evaluate
declare -a compressed_images
declare -a extra_files
files_to_evaluate+=( "${BUILD_DIR}/${image_name}" )
compress_disk_images files_to_evaluate compressed_images extra_files
upload_image -d "${BUILD_DIR}/${image_name}.DIGESTS" \
"${BUILD_DIR}/${image_contents}" \
"${BUILD_DIR}/${image_packages}" \
"${BUILD_DIR}/${image_licenses}" \
"${BUILD_DIR}/${image_name}"
"${compressed_images[@]}" \
"${extra_files[@]}"
# Upload legacy digests
upload_legacy_digests "${BUILD_DIR}/${image_name}.DIGESTS" compressed_images
}

View File

@ -83,7 +83,21 @@ finish_modify_image() {
cleanup_mounts "${ROOT_FS_DIR}"
trap - EXIT
upload_image "${DST_IMAGE}"
declare -a files_to_evaluate
declare -a compressed_images
declare -a extra_files
files_to_evaluate+=( "${DST_IMAGE}" )
compress_disk_images files_to_evaluate compressed_images extra_files
upload_image -d "${DST_IMAGE}.DIGESTS" \
"${compressed_images[@]}" \
"${extra_files[@]}"
# Upload legacy digests
upload_legacy_digests "${DST_IMAGE}.DIGESTS" compressed_images
for filename in "${EXTRA_FILES[@]}"; do
if [[ -e "${BUILD_DIR}/${filename}" ]]; then
upload_image "${BUILD_DIR}/${filename}"

View File

@ -138,17 +138,27 @@ EOF
"${BUILD_DIR}/${image_contents}"
"${BUILD_DIR}/${image_packages}"
"${BUILD_DIR}/${image_licenses}"
"${BUILD_DIR}/${image_name}"
"${BUILD_DIR}/${image_kernel}"
"${BUILD_DIR}/${image_pcr_policy}"
"${BUILD_DIR}/${image_grub}"
"${BUILD_DIR}/${image_kconfig}"
)
local files_to_evaluate=( "${BUILD_DIR}/${image_name}" )
declare -a compressed_images
declare -a extra_files
compress_disk_images files_to_evaluate compressed_images extra_files
to_upload+=( "${compressed_images[@]}" )
to_upload+=( "${extra_files[@]}" )
# FIXME(bgilbert): no shim on arm64
if [[ -f "${BUILD_DIR}/${image_shim}" ]]; then
to_upload+=("${BUILD_DIR}/${image_shim}")
fi
upload_image -d "${BUILD_DIR}/${image_name}.bz2.DIGESTS" "${to_upload[@]}"
upload_image -d "${BUILD_DIR}/${image_name}.DIGESTS" "${to_upload[@]}"
# Upload legacy digests
upload_legacy_digests "${BUILD_DIR}/${image_name}.DIGESTS" compressed_images
}
create_prod_tar() {

View File

@ -7,6 +7,7 @@ UPLOAD_ROOT=
UPLOAD_PATH=
TORCX_UPLOAD_ROOT=
UPLOAD_DEFAULT=${FLAGS_FALSE}
DEFAULT_IMAGE_COMPRESSION_FORMAT="bz2"
# Default upload root can be overridden from the environment.
_user="${USER}"
@ -15,9 +16,6 @@ _user="${USER}"
: ${FLATCAR_TORCX_UPLOAD_ROOT:=${FLATCAR_UPLOAD_ROOT}/torcx}
unset _user
IMAGE_ZIPPER="lbzip2 --compress --keep"
IMAGE_ZIPEXT=".bz2"
DEFINE_boolean parallel ${FLAGS_TRUE} \
"Enable parallelism in gsutil."
DEFINE_boolean upload ${UPLOAD_DEFAULT} \
@ -42,6 +40,113 @@ DEFINE_string sign "" \
"Sign all files to be uploaded with the given GPG key."
DEFINE_string sign_digests "" \
"Sign image DIGESTS files with the given GPG key."
DEFINE_string image_compression_formats "${DEFAULT_IMAGE_COMPRESSION_FORMAT}" \
"Compress the resulting images using thise formats. This option acceps a list of comma separated values. Options are: none, bz2, gz, zip, zst"
DEFINE_boolean only_store_compressed ${FLAGS_TRUE} \
"Delete input file when compressing, except when 'none' is part of the compression formats or the generic image is the input"
compress_file() {
local filepath="$1"
local compression_format="$2"
[ ! -f "${filepath}" ] && die "Image file ${filepath} does not exist"
[ -z "${compression_format}" ] && die "compression format parameter is mandatory"
case "${compression_format}" in
"none"|"")
echo -n "${filepath}"
return 0
;;
"bz2")
IMAGE_ZIPPER="lbzip2 --compress --keep"
;;
"gz")
IMAGE_ZIPPER="pigz --keep"
;;
"zip")
IMAGE_ZIPPER="pigz --keep --zip"
;;
"zst")
IMAGE_ZIPPER="zstd --format=zstd -k -q --no-progress"
;;
*)
die "Unsupported compression format ${compression_format}"
;;
esac
${IMAGE_ZIPPER} -f "${filepath}" 2>&1 >/dev/null || die "failed to compress ${filepath}"
echo -n "${filepath}.${compression_format}"
}
compress_disk_images() {
# An array of files that are to be evaluated and possibly compressed if images are
# among them.
local -n local_files_to_evaluate="$1"
# An array that will hold the path on disk to the resulting disk image archives.
# Multiple compression formats may be requested, so this array may hold
# multiple archives for the same image.
local -n local_resulting_archives="$2"
# Files that did not match the filter for disk images.
local -n local_extra_files="$3"
info "Compressing images"
# We want to compress images, but we also want to remove the uncompressed files
# from the list of uploadable files.
for filename in "${local_files_to_evaluate[@]}"; do
if [[ "${filename}" =~ \.(img|bin|vdi|vhd|vmdk)$ ]]; then
# Parse the formats as an array. This will yield an extra empty
# array element at the end.
readarray -td, FORMATS<<<"${FLAGS_image_compression_formats},"
# unset the last element
unset 'FORMATS[-1]'
# An associative array we set an element on whenever we process a format.
# This way we don't process the same format twice. A unique for array elements.
declare -A processed_format
for format in "${FORMATS[@]}";do
if [ -z "${processed_format[${format}]}" ]; then
info "Compressing ${filename##*/} to ${format}"
COMPRESSED_FILENAME=$(compress_file "${filename}" "${format}")
local_resulting_archives+=( "$COMPRESSED_FILENAME" )
processed_format["${format}"]=1
fi
done
# If requested, delete the input file after compression (only if 'none' is not part of the formats)
# Exclude the generic image and update payload because they are needed for generating other formats
if [ "${FLAGS_only_store_compressed}" -eq "${FLAGS_TRUE}" ] &&
[ "${filename##*/}" != "flatcar_production_image.bin" ] &&
[ "${filename##*/}" != "flatcar_production_update.bin" ] &&
! echo "${FORMATS[@]}" | grep -q "none"; then
rm "${filename}"
fi
else
local_extra_files+=( "${filename}" )
fi
done
}
upload_legacy_digests() {
[[ ${FLAGS_upload} -eq ${FLAGS_TRUE} ]] || return 0
local local_digest_file="$1"
local -n local_compressed_files="$2"
[[ "${#local_compressed_files[@]}" -gt 0 ]] || return 0
# Upload legacy digests
declare -a digests_to_upload
for file in "${local_compressed_files[@]}";do
legacy_digest_file="${file}.DIGESTS"
cp "${local_digest_file}" "${legacy_digest_file}"
digests_to_upload+=( "${legacy_digest_file}" )
done
local def_upload_path="${UPLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}"
upload_files "digests" "${def_upload_path}" "" "${digests_to_upload[@]}"
}
check_gsutil_opts() {
[[ ${FLAGS_upload} -eq ${FLAGS_TRUE} ]] || return 0
@ -209,15 +314,7 @@ upload_image() {
if [[ ! -f "${filename}" ]]; then
die "File '${filename}' does not exist!"
fi
# Compress disk images
if [[ "${filename}" =~ \.(img|bin|vdi|vhd|vmdk)$ ]]; then
info "Compressing ${filename##*/}"
$IMAGE_ZIPPER -f "${filename}"
uploads+=( "${filename}${IMAGE_ZIPEXT}" )
else
uploads+=( "${filename}" )
fi
uploads+=( "${filename}" )
done
if [[ -z "${digests}" ]]; then

View File

@ -167,6 +167,10 @@ get_sdk_libdir() {
portageq envvar "LIBDIR_$(get_sdk_arch)"
}
get_sdk_symlink_lib() {
portageq envvar "SYMLINK_LIB"
}
# Usage: get_sdk_binhost [version...]
# If no versions are specified the current and SDK versions are used.
get_sdk_binhost() {

View File

@ -1193,45 +1193,46 @@ vm_cleanup() {
}
vm_upload() {
declare -a legacy_uploads
declare -a uploadable_files
declare -a compressed_images
declare -a image_files
declare -a digest_uploads
compress_disk_images VM_GENERATED_FILES compressed_images uploadable_files
if [ "${#compressed_images[@]}" -gt 0 ]; then
uploadable_files+=( "${compressed_images[@]}" )
legacy_uploads+=( "${compressed_images[@]}" )
fi
local digests="$(_dst_dir)/$(_dst_name .DIGESTS)"
upload_image -d "${digests}" "${VM_GENERATED_FILES[@]}"
upload_image -d "${digests}" "${uploadable_files[@]}"
[[ -e "${digests}" ]] || return 0
# FIXME(marineam): Temporary alternate name for .DIGESTS
# This used to be derived from the first file listed in
# ${VM_GENERATED_FILES[@]}", usually $VM_DST_IMG or similar.
# Since not everything actually uploads $VM_DST_IMG this was not very
# consistent and relying on ordering was breakable.
# Now the common prefix, output by $(_dst_name) is used above.
# Some download/install scripts may still refer to the old name.
local uploaded legacy_uploaded
for uploaded in "${VM_GENERATED_FILES[@]}"; do
if [[ "${uploaded}" == "${VM_DST_IMG}" ]]; then
legacy_uploaded="$(_dst_dir)/$(basename ${VM_DST_IMG})"
break
# Since depending on the ordering of $VM_GENERATED_FILES is brittle only
# use it if $VM_DST_IMG isn't included in the uploaded files.
if [ "${#legacy_uploads[@]}" -eq 0 ];then
legacy_uploads+=( "${VM_GENERATED_FILES[0]}" )
fi
for legacy_upload in "${legacy_uploads[@]}";do
local legacy_digest_file="${legacy_upload}.DIGESTS"
[[ "${legacy_digest_file}" == "${digests}" ]] && continue
cp "${digests}" "${legacy_digest_file}"
digest_uploads+=( "${legacy_digest_file}" )
if [[ -e "${digests}.asc" ]]; then
digest_uploads+=( "${legacy_digest_file}.asc" )
cp "${digests}.asc" "${legacy_digest_file}.asc"
fi
done
# Since depending on the ordering of $VM_GENERATED_FILES is brittle only
# use it if $VM_DST_IMG isn't included in the uploaded files.
if [[ -z "${legacy_uploaded}" ]]; then
legacy_uploaded="${VM_GENERATED_FILES[0]}"
fi
# If upload_images compressed $legacy_uploaded be sure to add .bz2
if [[ "${legacy_uploaded}" =~ \.(img|bin|vdi|vhd|vmdk)$ ]]; then
legacy_uploaded+="${IMAGE_ZIPEXT}"
fi
local legacy_digests="${legacy_uploaded}.DIGESTS"
[[ "${legacy_digests}" != "${digests}" ]] || return 0
local legacy_uploads=( "${legacy_digests}" )
cp "${digests}" "${legacy_digests}"
if [[ -e "${digests}.asc" ]]; then
legacy_uploads+=( "${legacy_digests}.asc" )
cp "${digests}.asc" "${legacy_digests}.asc"
if [ "${#digest_uploads[@]}" -gt 0 ];then
legacy_uploads+=( "${digest_uploads[@]}" )
fi
local def_upload_path="${UPLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}"

View File

@ -173,7 +173,7 @@ fi
# Build cros_workon packages when they are changed.
CROS_WORKON_PKGS=()
if [ "${FLAGS_workon}" -eq "${FLAGS_TRUE}" ]; then
CROS_WORKON_PKGS+=( $(cros_workon list --board=${FLAGS_board}) )
CROS_WORKON_PKGS+=( $("${SRC_ROOT}/scripts/cros_workon" list --board=${FLAGS_board}) )
fi
if [[ ${#CROS_WORKON_PKGS[@]} -gt 0 ]]; then

251
build_sdk_container_image Executable file
View File

@ -0,0 +1,251 @@
#!/bin/bash
#
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script will generate an SDK container image from an SDK tarball
# (which in turn is generated by bootstrap_sdk[_container]).
#
# It uses a number of intermediate build steps:
# 1. Import the SDK tarball into a "tarball" container image.
# 2. Build a "plain" SDK container image which creates the "sdk" user
# and runs "update_chroot" to initialise the x86 and aarch64 SDK cross env.
# This step uses sdk_lib/Dockerfile.sdk-import.
# 3. Using the "plain" SDK image, start a temporary "toolchain" container
# to build toolchain binary packages.
# 4. Inheriting from the "plain" SDK image and using the toolchain packages,
# build a "full" SDK container image, with board support for both amd64-usr and arm64-usr.
# A temporary HTTP server on the Docker interface IP is spun up in this step
# to serve the toolchain binpkgs to the build container.
# This step uses sdk_lib/Dockerfile.sdk-build.
# 5. In a last step, all unnecessary binaries are removed from the "full" image and 3
# output SDK images are produced:
# - "all", with both amd64 and arm64 target support, and
# - "amd64" and "arm64, with only amd64 or arm64 target support respectively.
# This step uses sdk_lib/Dockerfile.lean-arch.
set -eu
cd $(dirname "$0")
source sdk_lib/sdk_container_common.sh
arch="amd64"
official="0"
tarball=""
os_version=""
keep="false"
cleanup=""
usage() {
echo " $0 - Create SDK container image from an SDK tarball"
echo " This script will set up a new SDK container from a tarball."
echo " The resulting container comes in 3 flavours:"
echo " 1. flatcar-sdk-all - includes both ARM64 and AMD64 support"
echo " 2.+3. flatcar-sdk-(amd64|arm64) - only includes support for one target."
echo " Usage:"
echo " $0 <tarball>] [-k] [-v <os-version>] [-x <script>]"
echo
echo " <tarball> - Local tarball to build SDK from."
echo " The tarball must follow the format"
echo " flatcar-sdk-(amd64|arm64)-<version>.tar.bz2."
echo " -v <version> - Use custom OS version (defaults to tarball's SDK version)."
echo " -k - Keep intermediate build containers (sdk-import / sdk-tarball)."
echo " (Useful for sdk container build script development.)"
echo " -x <script> - For each resource generated during build (container etc.)"
echo " add a cleanup line to <script> which, when run, will free"
echo " the resource. Useful for CI."
echo " -h - Print this help."
echo
}
# --
while [ 0 -lt $# ] ; do
case "$1" in
-h) usage; exit 0;;
-k) keep="true"; shift;;
-v) os_version="$2"; shift; shift;;
-x) cleanup="$2"; shift; shift;;
*) if [ -z "$tarball" ] ; then
tarball="$1"; shift
else
echo "ERROR: spurious positional parameter '$@'."
usage
exit 1
fi;;
esac
done
if [ -z "$tarball" -o ! -s "$tarball" ] ; then
echo "ERROR: missing / invalid SDK tarball argument"
exit 1
fi
# --
# Grok version / arch from tarball name, set official if version is a release version
version="$(echo "$tarball" | sed -n 's/.*flatcar-sdk-\(arm64\|amd64\)-\(.\+\)\.tar\.bz2/\2/p')"
arch="$(echo "$tarball" | sed -n 's/.*flatcar-sdk-\(arm64\|amd64\)-.*\.tar\.bz2/\1/p')"
if [ -z "$version" -o -z "$arch" ]; then
echo "ERROR: Unable to determine version / arch from '$tarball'"
exit 1
fi
if [ -z "${os_version}" ] ; then
os_version="${version}"
fi
if is_official "$version" && [ "${version}" = "${os_version}" ] ; then
official="1"
else
official="0"
fi
# --
# import tarball
#
yell "\n######\n###### Building SDK container for version $version from '$tarball'"
create_versionfile "$version" "${os_version}"
docker_vernum="$(vernum_to_docker_image_version "${version}")"
import_tarball="flatcar-sdk-tarball:${docker_vernum}"
image_present="$($docker image ls "$import_tarball" --format '{{.Repository}}:{{.Tag}}')"
if [ "${image_present}" = "${import_tarball}" ] ; then
yell "Using existing SDK tarball image '${import_tarball}'"
else
yell "Importing SDK tarball"
if [ -n "$cleanup" ] ; then
echo "$docker image rm -f '${import_tarball}'" >> "$cleanup"
fi
$docker import "${tarball}" "${import_tarball}"
fi
# --
# build plain SDK container w/o board support
#
import_image="flatcar-sdk-import:${docker_vernum}"
image_present="$($docker image ls "${import_image}" --format '{{.Repository}}:{{.Tag}}')"
if [ "$image_present" = "${import_image}" ] ; then
yell "Using existing SDK import image '${import_image}'"
else
yell "Building plain SDK import image"
if [ -n "$cleanup" ] ; then
echo "$docker image rm -f '${import_image}'" >> "$cleanup"
fi
$docker build -t "$import_image" \
--build-arg VERSION="${docker_vernum}" \
-f sdk_lib/Dockerfile.sdk-import \
.
fi
# --
# build full SDK container w/ board support.
# This uses the SDK import container to first build toolchain binpkgs.
# Then, the import container and toolchain packages are used
# to build a full SDK container w/ amd64 and arm64 board support.
#
sdk_build_image="flatcar-sdk-build:${docker_vernum}"
image_present="$($docker image ls "${sdk_build_image}" --format '{{.Repository}}:{{.Tag}}')"
if [ "$image_present" = "${sdk_build_image}" ] ; then
yell "Using existing SDK build image '${sdk_build_image}'"
else
# --- Toolchains build ---
yell "Building toolchains in a temporary container."
# We need to use run_sdk_container instead of building from a Dockerfile
# since toolchains build uses catalyst which requires privileged access.
tarball_copied=""
if [ "$(basename "${tarball}")" != "${tarball}" ] ; then
cp --reflink=auto "${tarball}" ./
tarball="$(basename "${tarball}")"
tarball_copied="${tarball}"
fi
toolchains_container="flatcar-sdk-toolchains-build-${docker_vernum}"
if [ -n "$cleanup" ] ; then
echo "$docker container rm -f '${toolchains_container}'" >> "$cleanup"
fi
./run_sdk_container -C "${import_image}" -n "${toolchains_container}" \
sudo ./build_toolchains --seed_tarball="./${tarball}"
# remove sdk tarball from scripts root so it's not part of the SDK container build context
if [ -f "${tarball_copied}" ] ; then
rm "${tarball_copied}"
fi
$docker container rm -f "${toolchains_container}"
docker_interface="docker0"
if "${is_podman}"; then
# Make a dummy run without "--net host" here for the interface to be created
$docker run --rm alpine
docker_interface="cni-podman0"
fi
host_ip="$(ip addr show "${docker_interface}" | grep -Po 'inet \K[\d.]+')"
binhost_port="$((1000 + (RANDOM % 55000) ))"
binhost="${host_ip}:${binhost_port}"
binhost_container="${toolchains_container}-binhost-${binhost_port}"
yell "Building SDK container + board support, toolchain packages served at http://${binhost} by ${binhost_container}"
# Spin up temporary toolchains package binhost
if [ -n "$cleanup" ] ; then
echo "$docker container rm -f '${binhost_container}'" >> "$cleanup"
fi
$docker run --rm -d -p "${binhost}":80 \
--name ${binhost_container} \
-v "$(pwd)/__build__/images/catalyst/packages/coreos-toolchains/target":/usr/share/caddy \
docker.io/library/caddy caddy file-server \
--root /usr/share/caddy --browse
# --- Full SDK container build ---
yell "Initialising the SDK container and building board packages"
if [ -n "$cleanup" ] ; then
echo "$docker image rm -f '${sdk_build_image}'" >> "$cleanup"
fi
$docker build -t "${sdk_build_image}" \
--build-arg VERSION="${docker_vernum}" \
--build-arg BINHOST="http://${binhost}" \
--build-arg OFFICIAL="${official}" \
-f sdk_lib/Dockerfile.sdk-build \
.
$docker stop "${binhost_container}"
fi
# --
# Derive "lean" SDK containers from full build. Main purpose
# of this step is to remove "white-outs", i.e. files which have been
# deleted in the full image but are still present in an intermediate layer.
#
for a in all arm64 amd64; do
yell "Creating '$a' arch SDK image"
rmarch=""; rmcross=""
case $a in
arm64) rmarch="amd64-usr"; rmcross="x86_64-cros-linux-gnu";;
amd64) rmarch="arm64-usr"; rmcross="aarch64-cros-linux-gnu";;
esac
$docker build -t "$sdk_container_common_registry/flatcar-sdk-${a}:${docker_vernum}" \
--build-arg VERSION="${docker_vernum}" \
--build-arg RMARCH="${rmarch}" \
--build-arg RMCROSS="${rmcross}" \
-f sdk_lib/Dockerfile.lean-arch \
.
done
# --
# Cleanup
#
if ! $keep; then
yell "Cleaning up intermediate containers"
$docker rmi flatcar-sdk-build:"${docker_vernum}"
$docker rmi flatcar-sdk-import:"${docker_vernum}"
$docker rmi flatcar-sdk-tarball:"${docker_vernum}"
fi

178
checkout Executable file
View File

@ -0,0 +1,178 @@
#!/bin/bash
#
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
set -eu
force_tag="false"
force_branch="false"
update_strategy="fast-forward"
name=""
usage() {
echo " Usage:"
echo " $0 [-t|-b] [-n] [-u <strategy>] <tag-or-branch>"
echo " Check our a tag or branch and synchronise git submodules"
echo " coreos-overlay and portage-stable."
echo " By default, $0 tries to auto-detect whether to handle a tag or a branch,"
echo " with tags being prioritised over branches of the same name."
echo
echo " <tag-or-branch> Name of the tag or branch to check out."
echo " BRANCH: Check out the branch in scripts, coreos-overlay"
echo " and portage-stable, and fast-forward to the"
echo " latest changes by default (but see '-u' below)."
echo " TAG: Check out the tag in scripts, and update submodules."
echo
echo " -t force TAG mode. Branches of the given name are ignored."
echo " Mutually exclusive with '-b'."
echo " -b force BRANCH mode. Tags of the given name are ignored."
echo " Mutually exclusive with '-t'."
echo
echo " -u <strategy> Only applies to BRANCH checkouts."
echo " When checking out submodule branches, use <strategy>."
echo " instead of fast-forward. Strategy is one of:"
echo " 'fast-forward' - fast-forward to upstream branch tip. Default."
echo " 'rebase' - rebase local changes on upstream changes."
echo " Useful for keepling local changes in submodules."
echo " 'omit' - check out branches, but do not update."
echo " Defaults to '$update_strategy'"
echo
echo " -h Print this help."
echo
}
# --
while [ 0 -lt $# ] ; do
case "$1" in
-h) usage; exit 0;;
-t) force_tag="true"; shift;;
-b) force_branch="true"; shift;;
-u) update_strategy="$2"; shift; shift;;
*) if [ -n "$name" ] ; then
echo
echo "ERROR: only ONE tag-or-branch can be specified."
echo
usage
exit 1
fi
name="$1"; shift;;
esac
done
if [ -z "$name" ] ; then
usage
exit 0
fi
if $force_branch && $force_tag; then
echo
echo "ERROR: '-t' and '-b' are mutually exclusive. Please make up your mind."
echo
usage
exit 1
fi
case "$update_strategy" in
fast-forward) update_strategy="--ff-only";;
rebase) update_strategy="--rebase";;
omit) update_strategy="";;
*) echo
echo "ERROR: unsupported branch update strategy '$update_strategy'."
echo
usage
exit 1;;
esac
# --
# make sure submodules are initialised
git submodule init
for dir in sdk_container/src/third_party/coreos-overlay \
sdk_container/src/third_party/portage-stable ; do
if [ ! -f "$dir"/.git ] ; then
git submodule update -N "$dir"
fi
done
function check_all() {
local gitcmd="$1"
local name="$2"
local scripts="$(git $gitcmd \
| sed -e 's/^[[:space:]]*//' -e 's:remotes/[^/]\+/::' \
| grep -m1 -E "^$name\$")"
# tag has submodules pinned, no need to check
if [[ "${gitcmd}" =~ ^tag\ .* ]] ; then
echo "${scripts}"
return
fi
local overlay="$(git -C sdk_container/src/third_party/coreos-overlay $gitcmd \
| sed -e 's/^[[:space:]]*//' -e 's:remotes/[^/]\+/::' \
| grep -m1 -E "^$name\$")"
local portage="$(git -C sdk_container/src/third_party/portage-stable $gitcmd \
| sed -e 's/^[[:space:]]*//' -e 's:remotes/[^/]\+/::' \
| grep -m1 -E "^$name\$")"
if [ -n "$scripts" -a -n "$overlay" -a -n "$portage" ] ; then
echo "$scripts"
fi
}
# --
#
# TAG
#
if ! $force_branch; then
for dir in . \
sdk_container/src/third_party/coreos-overlay \
sdk_container/src/third_party/portage-stable ; do
git -C "$dir" fetch --tags --force --prune --prune-tags
done
tag="$(check_all 'tag -l' "$name")"
if [ -n "$tag" ] ; then
echo
echo "Checking out TAG '$tag'"
echo "----------------------------------"
git checkout "$tag"
git submodule update
exit
fi
fi
echo "No tag by name '$name' in repo + submodules."
if $force_tag; then
echo "Tag-only mode forced, exiting."
exit 1
fi
#
# BRANCH
#
branch="$(check_all "branch -a -l" "$name")"
if [ -z "$branch" ]; then
echo "No branch by name '$name' in repo + submodules."
exit 1
fi
echo
echo "Checking out BRANCH '$branch'"
echo "----------------------------------"
for dir in . \
sdk_container/src/third_party/coreos-overlay \
sdk_container/src/third_party/portage-stable ; do
git -C "$dir" checkout "$branch"
if [ -n "$update_strategy" ] ; then
echo "updating branch in '$dir' /'$update_strategy')"
git -C "$dir" pull "$update_strategy"
fi
done

165
ci-automation/README.md Normal file
View File

@ -0,0 +1,165 @@
# CI automation glue logic scripts
Scripts in this directory aim to ease automation of Flatcar builds in continuous integration systems.
Design goal of the automation scripts is to provide self-contained, context-aware automation with a low integration overhead.
Each step takes its context from the repository (version to build etc.) and from the artifact of the previous build, with the aim of reducing the number of arguments to an absolute minimum.
Each script represents a distinct build step; each step ingests the container image of the previous step and produces a new container image for the next step.
Notable exceptions are "SDK Bootstrap" (`sdk.sh`) which only creates an SDK tarball, and "VMs build" which does not output a container but only VM (vendor) images.
The container images are self-contained and aim for ease of reproducibility.
All steps make use of a "build cache" server for pulling (https) build inputs and for pushing (rsync) artifacts.
Test automation is provided alongside build automation, following the same design principles.
Please refer to the individual scripts for prerequisites, input parameters, and outputs.
## Build steps
The build pipeline can be used to build everything from scratch, including the SDK (starting from 1. below) or to build a new OS image (starting from 3.).
"From scratch" builds (i.e. builds which include a new SDK) are usually only done for the `main` branch (`main` can be considered `alpha-next`).
Release / maintenance branches in the majority of cases do note build a new SDK but start with the OS image build.
Release branches usually use the SDK introduced when the new major version was branched off `main` throughout the lifetime of the major version; i.e. release `stable-MMMM.mm.pp` would use `SDK-MMMM.0.0`.
To reproduce any given build step, follow this pattern:
```
./checkout <build-tag> # Build tag from either SDK bootstrap pr Packages step
source ci-automation/<step-script>.sh
<step_function> <parameters>
```
For example, to rebuild the AMD64 OS image of build `main-3145.0.0-nightly-20220209-0139`, do
```
./checkout main-3145.0.0-nightly-20220209-0139
source ci-automation/image.sh
image_build amd64
```
### SDK bootstrap build
1. SDK Bootstrap (`sdk.sh`): Use a seed SDK tarball and seed SDK container image to build a new SDK tarball.
The resulting SDK tarball will use packages and versions pinned in the coreos-overlay and portage-stable submodules.
This step updates the versionfile, recording the SDK container version just built.
It will generate and push a new version tag to the scripts repo.
2. SDK container build (`sdk_container.sh`) : use SDK tarball to build an SDK container image.
The resulting image will come in "amd64", "arm64", and "all" flavours, with support for respective OS target architectures. This step builds the Flatcar SDK container images published at ghcr.io/flatcar.
```
.---------. .------------. .--------.
| scripts | | CI | | Build |
| repo | | automation | | cache |
`---------´ `------------´ `--------´
| | |
| "alpha-3449.0.0-dev23" |
| | |
| _______v_______ |
+-------- clone -------> ( SDK bootstrap ) |
| `-------------´ |
|<- tag: alpha-3499.0.0-dev23 --´|`--- sdk tarball --->|
| | |
| _______v_______ |
+-------- clone -------> ( SDK container ) |
| alpha-3499.0.0-dev23 `-------------´ |
| |`- sdk container --->|
v v image
continue to OS
image build
|
v
```
### OS image build
3. Packages tag (`packages-tag.sh`): Creates git tag if needed with the SDK container version recorded in the versionfile and the OS version from the tag.
Creates `skip-build` flag file if no changes since last nightly tag are seen.
Pushes the new version tag to the scripts repo as free-standing tag, and for nightlies also to the branch.
4. Packages build (`packages.sh`): Build OS image packages and generate a new container image (containing both SDK and packages).
5. Packages are published and the generic OS image is built.
1. Binary packages are published (`push_pkgs.sh`) to the build cache, making them available to developers who base their work on the main branch.
2. Image build (`image.sh`): Using the container from 3., build an OS image and torcx store, and generate a new container image with everything in it.
6. VMs build (`vms.sh`). Using the packages+torcx+image container from 4., build vendor images. Results are vendor-specific OS images.
```
.---------. .------------. .--------.
| scripts | | CI | | Build |
| repo | | automation | | cache |
`---------´ `------------´ `--------´
| | |
| "alpha-3449.0.0-dev23" |
| | |
| ______v_______ |
+---------- clone ------> ( packages-tag ) |
| `------------´ |
|<-- tag: alpha-3499.0.0-dev23 --´| |
| ____v_____ |
+----- clone ---> ( packages ) |
| alpha-3499.0.0-dev23 `--------´ |
| |`- sdk + OS packages -->|
| | container image |
| | torcx manifest |
| ______v_______ |
| ( publish pkgs ) |
| `------------´ |
| |`-- binary packages --->|
| ___v__ |
+----- clone ---> ( image ) |
| alpha-3499.0.0-dev23 `-----´ |
| |`-- sdk + packages + -->|
| __v__ OS image cnt img |
+----- clone ---> ( vms ) |
alpha-3499.0.0-dev23 `---´ |
`- vendor OS images ---->|
```
## Testing
Testing follows the same design principles build automation adheres to - it's self-contained and context-aware, reducing required parameters to a minimum.
The `test.sh` script needs exactly two parameters: the architecture, and the image type to be tested.
Optionally, patterns matching a group of tests can be supplied (or simply a list of tests); this defaults to "all tests" of a given vendor / image.
`test.sh` also supports re-running failed tests automatically to reduce the need for human interaction on flaky tests.
Testing is implemented in two layers:
1. `ci-automation/test.sh` is a generic test wrapper / stub to be called from CI.
2. `ci-automation/vendor-testing/` contains low-level vendor-specific test wrappers around [`kola`](https://github.com/flatcar/mantle/tree/flatcar-master/kola/), our test scenario orchestrator.
Testing relies on the SDK container and will use tools / test suites from the SDK.
The low-level vendor / image specific script (layer 2. in the list above) runs inside the SDK.
Testing will use the vendor image published by `vms.sh` from buildcache, and the torcx manifest published by `packages.sh`.
Additionally, a script library is provided (at `ci-automation/tapfile_helper_lib.sh`) to help handling `.tap` test result files produced by test runs.
Library functions may be used to merge the result of multiple test runs (e.g. for multiple image types / vendors) into a single test result report.
The test runs are considered successful only if all tests succeeded for all vendors / images at least once.
**Usage**
```
./checkout <version-to-test>
source ci-automation/test.sh
test_run <arch> <image-type>
```
E.g. for running qemu / amd64 tests on `main-3145.0.0-nightly-20220209-0139`:
```
./checkout main-3145.0.0-nightly-20220209-0139
source ci-automation/test.sh
test_run amd64 qemu
```
### QEmu test
`ci-automation/vendor-testing/qemu.sh` implements a `kola` wrapper for testing the `qemu` image.
The wrapper is a straightforward call to `kola` and does not have any additional requirements.
**NOTE** that the generic image (`flatcar_production_image.bin`) is used for the test instead of the QEmu vendor image.
**NOTE on host firewalling** The test automation uses bridged networking and will handle forwarding and NAT.
However, we experienced test failures from lack of internet access with several firewall implementations.
It is recommended to stop firewalling on the host the tests are run on (for example, use `systemctl stop firewalld` if the host used `firewalld`).
**Settings**
* `QEMU_IMAGE_NAME` - file name of the QEmu image to fetch from bincache.
* `QEMU_PARALLEL` - Number of parallel test cases to run.
Note that test cases may involve launching mutliple QEmu VMs (network testing etc.).
Tests are memory bound, not CPU bound; e.g. `20` is a sensible value for a 6 core / 12 threads systwem w/ 32 GB RAM.

131
ci-automation/ci-config.env Normal file
View File

@ -0,0 +1,131 @@
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Flatcar CI static configuration
# Build cache server for build artifacts.
# Required services:
# - http and https (WITHOUT auto-redirect)
# - ssh for BUILDCACHE_USER
BUILDCACHE_SERVER="${BUILDCACHE_SERVER:-bincache.flatcar-linux.net}"
BUILDCACHE_PATH_PREFIX="/srv/bincache"
BUILDCACHE_USER="bincache"
RELEASES_SERVER="mirror.release.flatcar-linux.net"
CONTAINER_REGISTRY="ghcr.io/flatcar"
GC_BUCKET="flatcar-linux"
DEFAULT_HTTP_IMAGE_URL_TEMPLATE="@PROTO@://${BUILDCACHE_SERVER}/images/@ARCH@/@VERNUM@"
if ! command -v pigz > /dev/null; then
# No PIGZ on Flatcar
PIGZ="docker run --rm -i ghcr.io/flatcar/pigz --fast"
fi
CI_GIT_AUTHOR="flatcar-ci"
CI_GIT_EMAIL="infra+ci@flatcar-linux.org"
# build artifacts go here (in container)
CONTAINER_TORCX_ROOT="/home/sdk/build/torcx"
CONTAINER_IMAGE_ROOT="/home/sdk/build/images"
#
# Image / vendor tests settings
#
# NOTE that these settings are evaluated by the vendor-tests script inside the
# SDK container. To override, new values must be passed into the container.
# Use something like
# echo "export [VAR]=\"${[VALUE]}\\"" > sdk_container/.env
# in your CI to override, e.g.
# echo "export PARALLEL_TESTS=\"5\"" > sdk_container/.env
# to override the number of test cases to be run in parallel.
# -- QEMU --
QEMU_IMAGE_NAME=${QEMU_IMAGE_NAME:-flatcar_production_image.bin}
QEMU_PARALLEL="${PARALLEL_TESTS:-20}"
# BIOS path within the SDK
QEMU_BIOS="/usr/share/qemu/bios-256k.bin"
# UEFI bios filename on build cache.
# Published by vms.sh as part of the qemu vendor build.
QEMU_UEFI_BIOS="${QEMU_UEFI_BIOS:-flatcar_production_qemu_uefi_efi_code.fd}"
# -- Equinix Metal --
EQUINIXMETAL_PARALLEL="${PARALLEL_TESTS:-4}"
# Metro is a set of Equinix Metal regions
EQUINIXMETAL_amd64_METRO="${EQUINIXMETAL_amd64_METRO:-SV}"
EQUINIXMETAL_arm64_METRO="${EQUINIXMETAL_arm64_METRO:-DC}"
# Name of the Equinix Metal image
EQUINIXMETAL_IMAGE_NAME="flatcar_production_packet_image.bin.bz2"
# Storage URL required to store user-data
EQUINIXMETAL_STORAGE_URL="${EQUINIXMETAL_STORAGE_URL:-gs://flatcar-jenkins/mantle/packet}"
# Equinix Metal default AMD64 instance type
EQUINIXMETAL_amd64_INSTANCE_TYPE="${EQUINIXMETAL_amd64_INSTANCE_TYPE:-c3.small.x86}"
# Space separated list of instance types. On those instances the
# cl.internet kola test will be run if this test is selected to run.
EQUINIXMETAL_amd64_MORE_INSTANCE_TYPES="m3.small.x86 c3.medium.x86 m3.large.x86 s3.xlarge.x86 n2.xlarge.x86"
# Equinix Metal default ARM64 instance type
EQUINIXMETAL_arm64_INSTANCE_TYPE="c3.large.arm"
# Space separated list of instance types. On those instances the
# cl.internet kola test will be run if this test is selected to run.
EQUINIXMETAL_arm64_MORE_INSTANCE_TYPES=""
# -- PXE --
PXE_KERNEL_NAME="flatcar_production_pxe.vmlinuz"
PXE_IMAGE_NAME="flatcar_production_pxe_image.cpio.gz"
GCE_IMAGE_NAME="flatcar_production_gce.tar.gz"
GCE_GCS_IMAGE_UPLOAD="gs://flatcar-jenkins/developer/gce-ci"
GCE_MACHINE_TYPE="${GCE_MACHINE_TYPE:-n1-standard-2}"
GCE_PARALLEL="${PARALLEL_TESTS:-4}"
# -- Digital Ocean --
# Use the "@PROTO@" "@ARCH@", "@CHANNEL@" and "@VERNUM@" placeholders. They will
# be replaced.
: ${DIGITALOCEAN_IMAGE_URL_TEMPLATE:="${DEFAULT_HTTP_IMAGE_URL_TEMPLATE}/flatcar_production_digitalocean_image.bin.bz2"}
: ${DIGITALOCEAN_REGION:='sfo3'}
: ${DIGITALOCEAN_MACHINE_SIZE:='s-2vcpu-2gb'}
DIGITALOCEAN_PARALLEL="${PARALLEL_TESTS:-8}"
# DIGITALOCEAN_TOKEN_JSON env var is used for credentials, and should
# come from sdk_container/.env. It must be base64-encoded.
# -- VMware ESX --
: ${VMWARE_ESX_IMAGE_NAME:='flatcar_production_vmware_ova.ova'}
VMWARE_ESX_PARALLEL="${PARALLEL_TESTS:-4}"
# VMWARE_ESX_CREDS should come from sdk_container/.env and must be
# base64-encoded.
# -- AWS --
: ${AWS_amd64_INSTANCE_TYPE:="t3.small"}
# Space separated list of instance types. On those instances the
# cl.internet kola test will be run if this test is selected to run.
: ${AWS_amd64_MORE_INSTANCE_TYPES:="m4.2xlarge"}
: ${AWS_arm64_INSTANCE_TYPE:="a1.large"}
# Space separated list of instance types. On those instances the
# cl.internet kola test will be run if this test is selected to run.
: ${AWS_arm64_MORE_INSTANCE_TYPES:=""}
: ${AWS_IAM_PROFILE:="ciauto-test"}
: ${AWS_REGION:="us-east-1"}
: ${AWS_AMI_ID:=""}
AWS_PARALLEL="${PARALLEL_TESTS:-8}"
# AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY should come from
# sdk_container/.env
# -- Azure --
: ${AZURE_IMAGE_NAME:="flatcar_production_azure_image.vhd"}
: ${AZURE_amd64_MACHINE_SIZE:="Standard_D2s_v4"}
: ${AZURE_arm64_MACHINE_SIZE:="Standard_D2pls_v5"}
: ${AZURE_USE_GALLERY:=""}
: ${AZURE_USE_PRIVATE_IPS:=true}
: ${AZURE_VNET_SUBNET_NAME:="jenkins-vnet-westeurope"}
AZURE_PARALLEL="${PARALLEL_TESTS:-20}"
AZURE_LOCATION="${AZURE_LOCATION:-westeurope}"

View File

@ -0,0 +1,456 @@
#!/bin/bash
#
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# CI automation common functions.
source ci-automation/ci-config.env
: ${PIGZ:=pigz}
: ${docker:=docker}
: ${TEST_WORK_DIR:='__TESTS__'}
function init_submodules() {
git submodule init
git submodule update
}
# --
function update_submodule() {
local submodule="$1"
local commit_ish="$2"
cd "sdk_container/src/third_party/${submodule}"
git fetch --all --tags
git checkout "${commit_ish}"
cd -
}
# --
function check_version_string() {
local version="$1"
if [[ ! "${version}" =~ ^(main|alpha|beta|stable|lts)-[0-9]+\.[0-9]+\.[0-9]+(-.+)?$ ]]; then
echo "ERROR: invalid version '${version}', must start with 'main', 'alpha', 'beta', 'stable' or 'lts', followed by a dash and three dot-separated numbers, optionally followed by a dash and a non-empty build ID"
exit 1
fi
}
# --
function update_submodules() {
local coreos_git="$1"
local portage_git="$2"
init_submodules
update_submodule "coreos-overlay" "${coreos_git}"
update_submodule "portage-stable" "${portage_git}"
}
# --
function update_and_push_version() {
local version="$1"
local push_to_branch="${2:-false}"
# set up author and email so git does not complain when tagging
if ! git config --get user.name >/dev/null 2>&1 ; then
git -C . config user.name "${CI_GIT_AUTHOR}"
fi
if ! git config --get user.email >/dev/null 2>&1 ; then
git -C . config user.email "${CI_GIT_EMAIL}"
fi
# Add and commit local changes
git add "sdk_container/src/third_party/coreos-overlay"
git add "sdk_container/src/third_party/portage-stable"
git add "sdk_container/.repo/manifests/version.txt"
git commit --allow-empty -m "New version: ${version}"
git fetch --all --tags --force
local ret=0
git diff --exit-code "${version}" || ret=$?
# This will return != 0 if
# - the remote tag does not exist (rc: 127)
# - the tag does not exist locally (rc: 128)
# - the remote tag has changes compared to the local tree (rc: 1)
if [ "$ret" = "0" ]; then
echo "Reusing existing tag" >&2
git checkout -f --recurse-submodules "${version}"
return
elif [ "$ret" = "1" ]; then
echo "Remote tag exists already and is not equal" >&2
return 1
elif [ "$ret" != "127" ] && [ "$ret" != "128" ]; then
echo "Error: Unexpected git diff return code ($ret)" >&2
return 1
fi
local -a TAG_ARGS
if [ "${SIGN-0}" = 1 ]; then
TAG_ARGS=("-s" "-m" "${version}")
fi
git tag -f "${TAG_ARGS[@]}" "${version}"
if [ "${push_to_branch}" = "true" ]; then
local branch="$(git rev-parse --abbrev-ref HEAD)"
git push origin "${branch}"
fi
git push origin "${version}"
}
# --
function copy_from_buildcache() {
local what="$1"
local where_to="$2"
mkdir -p "$where_to"
curl --fail --silent --show-error --location --retry-delay 1 --retry 60 \
--retry-connrefused --retry-max-time 60 --connect-timeout 20 \
--remote-name --output-dir "${where_to}" "https://${BUILDCACHE_SERVER}/${what}"
}
# --
function gen_sshcmd() {
echo -n "ssh -o BatchMode=yes"
echo -n " -o StrictHostKeyChecking=no"
echo -n " -o UserKnownHostsFile=/dev/null"
echo " -o NumberOfPasswordPrompts=0"
}
# --
function copy_dir_from_buildcache() {
local remote_path="${BUILDCACHE_PATH_PREFIX}/$1"
local local_path="$2"
local sshcmd="$(gen_sshcmd)"
mkdir -p "${local_path}"
rsync --partial -a -e "${sshcmd}" "${BUILDCACHE_USER}@${BUILDCACHE_SERVER}:${remote_path}" \
"${local_path}"
}
# --
function copy_to_buildcache() {
local remote_path="${BUILDCACHE_PATH_PREFIX}/$1"
shift
local sshcmd="$(gen_sshcmd)"
$sshcmd "${BUILDCACHE_USER}@${BUILDCACHE_SERVER}" \
"mkdir -p ${remote_path}"
rsync --partial -a -e "${sshcmd}" "$@" \
"${BUILDCACHE_USER}@${BUILDCACHE_SERVER}:${remote_path}"
}
# --
function image_exists_locally() {
local name="$1"
local version="$2"
local image="${name}:${version}"
local image_exists="$($docker images "${image}" \
--no-trunc --format '{{.Repository}}:{{.Tag}}')"
[ "${image}" = "${image_exists}" ]
}
# --
# Derive docker-safe image version string from vernum.
#
function vernum_to_docker_image_version() {
local vernum="$1"
echo "$vernum" | sed 's/[+]/-/g'
}
# --
# Return the full name (repo+name+tag) of an image. Useful for SDK images
# pulled from the registry (which have the registry pre-pended)
function docker_image_fullname() {
local image="$1"
local version="$2"
$docker images --no-trunc --format '{{.Repository}}:{{.Tag}}' \
| grep -E "^(${CONTAINER_REGISTRY}/)*${image}:${version}$"
}
# --
function docker_image_to_buildcache() {
local image="$1"
local version="$2"
# strip potential container registry prefix
local tarball="$(basename "$image")-${version}.tar.gz"
$docker save "${image}":"${version}" | $PIGZ -c > "${tarball}"
create_digests "${SIGNER:-}" "${tarball}"
sign_artifacts "${SIGNER:-}" "${tarball}"*
copy_to_buildcache "containers/${version}" "${tarball}"*
}
# --
function docker_commit_to_buildcache() {
local container="$1"
local image_name="$2"
local image_version="$3"
$docker commit "${container}" "${image_name}:${image_version}"
docker_image_to_buildcache "${image_name}" "${image_version}"
}
# --
function docker_image_from_buildcache() {
local name="$1"
local version="$2"
local tgz="${name}-${version}.tar.gz"
if image_exists_locally "${name}" "${version}" ; then
return
fi
local url="https://${BUILDCACHE_SERVER}/containers/${version}/${tgz}"
curl --fail --silent --show-error --location --retry-delay 1 --retry 60 \
--retry-connrefused --retry-max-time 60 --connect-timeout 20 \
--remote-name "${url}"
cat "${tgz}" | $PIGZ -d -c | $docker load
rm "${tgz}"
}
# --
function docker_image_from_registry_or_buildcache() {
local image="$1"
local version="$2"
if image_exists_locally "${CONTAINER_REGISTRY}/${image}" "${version}" ; then
return
fi
if $docker pull "${CONTAINER_REGISTRY}/${image}:${version}" ; then
return
fi
echo "Falling back to tar ball download..." >&2
docker_image_from_buildcache "${image}" "${version}"
}
# --
# Called by vendor test in case of complete failure not eligible for
# reruns (like trying to run tests on unsupported architecture).
function break_retest_cycle() {
local work_dir=$(dirname "${PWD}")
local dir=$(basename "${work_dir}")
if [[ "${dir}" != "${TEST_WORK_DIR}" ]]; then
echo "Not breaking retest cycle, expected test work dir to be a parent directory" >&2
return
fi
touch "${work_dir}/break_retests"
}
# --
# Called by test runner to see if the retest cycle should be broken.
function retest_cycle_broken() {
# Using the reverse boolean logic here!
local broken=1
if [[ -f "${TEST_WORK_DIR}/break_retests" ]]; then
broken=0
rm -f "${TEST_WORK_DIR}/break_retests"
fi
return ${broken}
}
# --
# Substitutes fields in the passed template and prints the
# result. Followed by the template, the parameters used for
# replacement are in alphabetical order: arch, channel, proto and
# vernum.
function url_from_template() {
local template="${1}"; shift
local arch="${1}"; shift
local channel="${1}"; shift
local proto="${1}"; shift
local vernum="${1}"; shift
local url="${template}"
url="${url//@ARCH@/${arch}}"
url="${url//@CHANNEL@/${channel}}"
url="${url//@PROTO@/${proto}}"
url="${url//@VERNUM@/${vernum}}"
echo "${url}"
}
# --
# Puts a secret into a file, while trying for the secret to not end up
# on a filesystem at all. A path to the file with the secret in /proc
# in put into the chosen variable. The secret is assumed to be
# base64-encoded.
#
# Typical use:
# secret_file=''
# secret_to_file secret_file "${some_secret}"
#
# Parameters:
# 1 - name of the variable where the path is stored
# 2 - the secret to store in the file
function secret_to_file() {
local config_var_name="${1}"; shift
local secret="${1}"; shift
local tmpfile=$(mktemp)
local -n config_ref="${config_var_name}"
local fd
exec {fd}<>"${tmpfile}"
rm -f "${tmpfile}"
echo "${secret}" | base64 --decode >&${fd}
# Use BASHPID because we may be in a subshell but $$ is only the main shell's PID
config_ref="/proc/${BASHPID}/fd/${fd}"
}
# --
# Creates signatures for the passed files and directories. In case of
# directory, all files inside are signed. Files ending with .asc or
# .sig or .gpg are ignored, though. This function is a noop if signer
# is empty.
#
# Typical use:
# sign_artifacts "${SIGNER}" artifact.tar.gz
# copy_to_buildcache "artifacts/directory" artifact.tar.gz*
#
# Parameters:
#
# 1 - signer whose key is expected to be already imported into the
# keyring
# @ - files and directories to sign
function sign_artifacts() {
local signer="${1}"; shift
# rest of the parameters are directories/files to sign
local to_sign=()
local file
if [[ -z "${signer}" ]]; then
return
fi
list_files to_sign 'asc,gpg,sig' "${@}"
for file in "${to_sign[@]}"; do
gpg --batch --local-user "${signer}" \
--output "${file}.sig" \
--detach-sign "${file}"
done
}
# --
# Creates digests files and armored ASCII files out of them for the
# passed files and directories. In case of directory, all files inside
# it are processed. No new digests file is created if there is one
# already for the processed file. Same for armored ASCII file. Files
# ending with .asc or .sig or .gpg or .DIGESTS are not processed. The
# armored ASCII files won't be created if the signer is empty.
#
# Typical use:
# create_digests "${SIGNER}" artifact.tar.gz
# sign_artifacts "${SIGNER}" artifact.tar.gz*
# copy_to_buildcache "artifacts/directory" artifact.tar.gz*
#
# Parameters:
#
# 1 - signer whose key is expected to be already imported into the
# keyring
# @ - files and directories to create digests for
function create_digests() {
local signer="${1}"; shift
# rest of the parameters are files or directories to create
# digests for
local to_digest=()
local file
local df
local fbn
local hash_type
local output
local af
list_files to_digest 'asc,gpg,sig,DIGESTS' "${@}"
for file in "${to_digest[@]}"; do
df="${file}.DIGESTS"
if [[ ! -e "${df}" ]]; then
touch "${df}"
fbn=$(basename "${file}")
# TODO: modernize - drop md5 and sha1, add b2
for hash_type in md5 sha1 sha512; do
echo "# ${hash_type} HASH" | tr "a-z" "A-Z" >>"${df}"
output=$("${hash_type}sum" "${file}")
echo "${output%% *} ${fbn}" >>"${df}"
done
fi
if [[ -z "${signer}" ]]; then
continue
fi
af="${df}.asc"
if [[ ! -e "${af}" ]]; then
gpg --batch --local-user "${signer}" \
--output "${af}" \
--clearsign "${df}"
fi
done
}
# --
# Puts a filtered list of files from the passed files and directories
# in the passed variable. The filtering is done by ignoring files that
# end with the passed extensions. The extensions list should not
# contain the leading dot.
#
# Typical use:
# local all_files=()
# local ignored_extensions='sh,py,pl' # ignore the shell, python and perl scripts
# list_files all_files "${ignored_extensions}" "${directories_and_files[@]}"
#
# Parameters:
#
# 1 - name of an array variable where the filtered files will be stored
# 2 - comma-separated list of extensions that will be used for filtering files
# @ - files and directories to scan for files
function list_files() {
local files_variable_name="${1}"; shift
local ignored_extensions="${1}"; shift
# rest of the parameters are files or directories to list
local -n files="${files_variable_name}"
local file
local tmp_files
local pattern=''
if [[ -n "${ignored_extensions}" ]]; then
pattern='\.('"${ignored_extensions//,/|}"')$'
fi
files=()
for file; do
tmp_files=()
if [[ -d "${file}" ]]; then
readarray -d '' tmp_files < <(find "${file}" ! -type d -print0)
elif [[ -e "${file}" ]]; then
tmp_files+=( "${file}" )
fi
if [[ -z "${pattern}" ]]; then
files+=( "${tmp_files[@]}" )
continue
fi
for file in "${tmp_files[@]}"; do
if [[ "${file}" =~ ${pattern} ]]; then
continue
fi
files+=( "${file}" )
done
done
}
# --

View File

@ -0,0 +1,156 @@
#!/bin/bash
#
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# >>> This file is supposed to be SOURCED from the repository ROOT. <<<
#
# garbage_collect() should be called after sourcing.
#
# OPTIONAL INPUT
# - Number of (recent) versions to keep. Defaults to 50.
# - PURGE_VERSIONS (Env variable). Space-separated list of versions to purge
# instead of all but the 50 most recent ones.
# - DRY_RUN (Env variable). Set to "y" to just list what would be done but not
# actually purge anything.
# Flatcar CI automation garbage collector.
# This script removes development (non-official) build artifacts:
# - SDK tarballs, build step containers, and vendor images on buildcache
# - tags from the scripts repository
#
# Garbage collection is based on development (non-official) version tags
# in the scripts repo. The newest 50 builds will be retained,
# all older builds will be purged (50 is the default, see OPTIONAL INPUT above).
function garbage_collect() {
# Run a subshell, so the traps, environment changes and global
# variables are not spilled into the caller.
(
set -euo pipefail
_garbage_collect_impl "${@}"
)
}
# --
function _garbage_collect_impl() {
local keep="${1:-50}"
local dry_run="${DRY_RUN:-}"
local purge_versions="${PURGE_VERSIONS:-}"
local versions_detected="$(git tag -l --sort=-committerdate \
| grep -E '(main|alpha|beta|stable|lts)-[0-9]+\.[0-9]+\.[0-9]+\-.*' \
| grep -vE '(-pro)$')"
echo "######## Full list of version(s) found ########"
echo "${versions_detected}" | awk '{printf "%5d %s\n", NR, $0}'
if [ -z "${purge_versions}" ] ; then
keep="$((keep + 1))" # for tail -n+...
purge_versions="$(echo "${versions_detected}" \
| tail -n+"${keep}")"
else
# make sure we only accept dev versions
purge_versions="$(echo "${purge_versions}" | sed 's/ /\n/g' \
| grep -E '(main|alpha|beta|stable|lts)-[0-9]+\.[0-9]+\.[0-9]+\-.*' \
| grep -vE '(-pro)$')"
fi
source ci-automation/ci_automation_common.sh
local sshcmd="$(gen_sshcmd)"
echo
echo "######## The following version(s) will be purged ########"
if [ "$dry_run" = "y" ] ; then
echo
echo "(NOTE this is just a dry run since DRY_RUN=y)"
echo
fi
echo "${purge_versions}" | awk -v keep="${keep}" '{if ($0 == "") next; printf "%5d %s\n", NR + keep - 1, $0}'
echo
echo
local version
for version in ${purge_versions}; do
echo "--------------------------------------------"
echo
echo "#### Processing version '${version}' ####"
echo
git checkout "${version}" -- sdk_container/.repo/manifests/version.txt
source sdk_container/.repo/manifests/version.txt
# Assuming that the SDK build version also has the same OS version
local os_vernum="${FLATCAR_VERSION}"
local os_docker_vernum="$(vernum_to_docker_image_version "${FLATCAR_VERSION}")"
# Remove container image tarballs and SDK tarball (if applicable)
#
local rmpat=""
rmpat="${BUILDCACHE_PATH_PREFIX}/sdk/*/${os_vernum}/"
rmpat="${rmpat} ${BUILDCACHE_PATH_PREFIX}/containers/${os_docker_vernum}/flatcar-sdk-*"
rmpat="${rmpat} ${BUILDCACHE_PATH_PREFIX}/containers/${os_docker_vernum}/flatcar-packages-*"
rmpat="${rmpat} ${BUILDCACHE_PATH_PREFIX}/boards/*/${os_vernum}/"
rmpat="${rmpat} ${BUILDCACHE_PATH_PREFIX}/containers/${os_docker_vernum}/flatcar-images-*"
rmpat="${rmpat} ${BUILDCACHE_PATH_PREFIX}/images/*/${os_vernum}/"
rmpat="${rmpat} ${BUILDCACHE_PATH_PREFIX}/testing/${os_vernum}/"
echo "## The following files will be removed ##"
$sshcmd "${BUILDCACHE_USER}@${BUILDCACHE_SERVER}" \
"ls -la ${rmpat} || true"
if [ "$dry_run" != "y" ] ; then
set -x
$sshcmd "${BUILDCACHE_USER}@${BUILDCACHE_SERVER}" \
"rm -rf ${rmpat}"
set +x
else
echo "## (DRY_RUN=y so not doing anything) ##"
fi
# Remove container image directory if empty
#
rmpat="${BUILDCACHE_PATH_PREFIX}/containers/${os_docker_vernum}/"
echo "## Checking if container directory is empty and can be removed (it's OK if this fails) ##"
echo "## The following directory will be removed if below output is empty: '${rmpat}' ##"
$sshcmd "${BUILDCACHE_USER}@${BUILDCACHE_SERVER}" \
"ls -la ${rmpat} || true"
if [ "$dry_run" != "y" ] ; then
set -x
$sshcmd "${BUILDCACHE_USER}@${BUILDCACHE_SERVER}" \
"rmdir ${rmpat} || true"
set +x
else
echo "## (DRY_RUN=y so not doing anything) ##"
fi
# Remove git tag (local and remote)
#
echo "## The following TAG will be deleted: '${version}' ##"
if [ "$dry_run" != "y" ] ; then
set -x
git tag -d "${version}"
git push --delete origin "${version}"
set +x
else
echo "## (DRY_RUN=y so not doing anything) ##"
fi
done
local mantle_ref
mantle_ref=$(cat sdk_container/.repo/manifests/mantle-container)
docker run --pull always --rm --net host \
--env AZURE_AUTH_CREDENTIALS --env AZURE_PROFILE \
--env AWS_ACCESS_KEY_ID --env AWS_SECRET_ACCESS_KEY \
--env DIGITALOCEAN_TOKEN_JSON \
--env EQUINIXMETAL_KEY --env EQUINIXMETAL_PROJECT \
--env GCP_JSON_KEY \
--env VMWARE_ESX_CREDS \
-w /work -v "$PWD":/work "${mantle_ref}" /work/ci-automation/garbage_collect_cloud.sh
}
# --

View File

@ -0,0 +1,19 @@
#!/bin/bash
set -euo pipefail
timeout --signal=SIGQUIT 60m ore aws gc --access-id "${AWS_ACCESS_KEY_ID}" --secret-key "${AWS_SECRET_ACCESS_KEY}"
timeout --signal=SIGQUIT 60m ore do gc --config-file=<(echo "${DIGITALOCEAN_TOKEN_JSON}" | base64 --decode)
timeout --signal=SIGQUIT 60m ore gcloud gc --json-key <(echo "${GCP_JSON_KEY}" | base64 --decode)
# Because the Azure file gets read multiple times it can't be passed like <(cmd) because bash backs this FD
# by a pipe meaning the data is gone after reading. We can create an FD (the FD number is assigned to
# variable through exec {NAME}) manually and use a file under /tmp to back it instead, allowing multiple
# reads.
echo "${AZURE_PROFILE}" | base64 --decode > /tmp/azure_profile
exec {azure_profile}</tmp/azure_profile
rm /tmp/azure_profile
echo "${AZURE_AUTH_CREDENTIALS}" | base64 --decode > /tmp/azure_auth
exec {azure_auth}</tmp/azure_auth
rm /tmp/azure_auth
timeout --signal=SIGQUIT 60m ore azure gc --duration 6h \
--azure-profile="/proc/$$/fd/${azure_profile}" --azure-auth="/proc/$$/fd/${azure_auth}"
timeout --signal=SIGQUIT 60m ore equinixmetal gc --duration 6h \
--project="${EQUINIXMETAL_PROJECT}" --gs-json-key=<(echo "${GCP_JSON_KEY}" | base64 --decode) --api-key="${EQUINIXMETAL_KEY}"

View File

@ -0,0 +1,31 @@
# Common gpg setup code to be sourced by other scripts in this
# directory. It will set up GnuPG home directory, possibly with a key
# from SIGNING_KEY environment variable.
#
# After this file is sourced, SIGNER is always defined and exported,
# even if empty. SIGNING_KEY is clobbered.
: ${SIGNING_KEY:=''}
: ${SIGNER:=''}
if [[ "${HOME}/.gnupg" -ef "${PWD}/.gnupg" ]]; then
echo 'Do not source ${BASH_SOURCE} directly in your home directory - it will clobber your GnuPG directory!' >&2
exit 1
fi
export GNUPGHOME="${PWD}/.gnupg"
rm -rf "${GNUPGHOME}"
trap 'rm -rf "${GNUPGHOME}"' EXIT
mkdir --mode=0700 "${GNUPGHOME}"
# Sometimes this directory is not automatically created thus making
# further private key imports to fail. Let's create it here as a
# workaround.
mkdir -p --mode=0700 "${GNUPGHOME}/private-keys-v1.d/"
if [[ -n "${SIGNING_KEY}" ]] && [[ -n "${SIGNER}" ]]; then
gpg --import "${SIGNING_KEY}"
else
SIGNER=''
fi
export SIGNER
# Clobber signing key variable, we don't need it any more.
export SIGNING_KEY=''

113
ci-automation/image.sh Normal file
View File

@ -0,0 +1,113 @@
#!/bin/bash
#
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# >>> This file is supposed to be SOURCED from the repository ROOT. <<<
#
# image_build() should be called w/ the positional INPUT parameters below.
# Binary OS image build automation stub.
# This script will build the OS image from a pre-built packages container.
#
# PREREQUISITES:
#
# 1. SDK version and OS image version are recorded in sdk_container/.repo/manifests/version.txt
# 2. Scripts repo version tag of OS image version to be built is available and checked out.
# 3. Flatcar packages container is available via build cache server
# from "/containers/[VERSION]/flatcar-packages-[ARCH]-[FLATCAR_VERSION].tar.gz"
# or present locally. Container must contain binary packages and torcx artefacts.
#
# INPUT:
#
# 1. Architecture (ARCH) of the TARGET OS image ("arm64", "amd64").
#
# OPTIONAL INPUT:
#
# 1. SIGNER. Environment variable. Name of the owner of the artifact signing key.
# Defaults to nothing if not set - in such case, artifacts will not be signed.
# If provided, SIGNING_KEY environment variable should also be provided, otherwise this environment variable will be ignored.
#
# 2. SIGNING_KEY. Environment variable. The artifact signing key.
# Defaults to nothing if not set - in such case, artifacts will not be signed.
# If provided, SIGNER environment variable should also be provided, otherwise this environment variable will be ignored.
#
# OUTPUT:
#
# 1. OS image, dev container, related artifacts, and torcx packages pushed to buildcache.
# 2. "./ci-cleanup.sh" with commands to clean up temporary build resources,
# to be run after this step finishes / when this step is aborted.
# 3. If signer key was passed, signatures of artifacts from point 1, pushed along to buildcache.
# 4. DIGESTS of the artifacts from point 1, pushed to buildcache. If signer key was passed, armored ASCII files of the generated DIGESTS files too, pushed to buildcache.
function image_build() {
# Run a subshell, so the traps, environment changes and global
# variables are not spilled into the caller.
(
set -euo pipefail
_image_build_impl "${@}"
)
}
# --
function _image_build_impl() {
local arch="$1"
source sdk_lib/sdk_container_common.sh
local channel=""
channel="$(get_git_channel)"
source ci-automation/ci_automation_common.sh
source ci-automation/gpg_setup.sh
init_submodules
source sdk_container/.repo/manifests/version.txt
local vernum="${FLATCAR_VERSION}"
local docker_vernum="$(vernum_to_docker_image_version "${vernum}")"
local packages="flatcar-packages-${arch}"
local packages_image="${packages}:${docker_vernum}"
docker_image_from_buildcache "${packages}" "${docker_vernum}"
local image="flatcar-images-${arch}"
local image_container="${image}-${docker_vernum}"
local official_arg=""
if is_official "${vernum}"; then
export COREOS_OFFICIAL=1
official_arg="--official"
else
export COREOS_OFFICIAL=0
official_arg="--noofficial"
fi
# build image and related artifacts
./run_sdk_container -x ./ci-cleanup.sh -n "${image_container}" -C "${packages_image}" \
-v "${vernum}" \
mkdir -p "${CONTAINER_IMAGE_ROOT}"
./run_sdk_container -n "${image_container}" -C "${packages_image}" \
-v "${vernum}" \
./set_official --board="${arch}-usr" "${official_arg}"
./run_sdk_container -n "${image_container}" -C "${packages_image}" \
-v "${vernum}" \
./build_image --board="${arch}-usr" --group="${channel}" \
--output_root="${CONTAINER_IMAGE_ROOT}" \
--only_store_compressed \
--torcx_root="${CONTAINER_TORCX_ROOT}" prodtar container
# copy resulting images + push to buildcache
local images_out="images/"
rm -rf "${images_out}"
./run_sdk_container -n "${image_container}" -C "${packages_image}" \
-v "${vernum}" \
mv "${CONTAINER_IMAGE_ROOT}/${arch}-usr/" "./${images_out}/"
# Delete uncompressed generic image before signing and upload
rm "images/latest/flatcar_production_image.bin" "images/latest/flatcar_production_update.bin"
create_digests "${SIGNER}" "images/latest/"*
sign_artifacts "${SIGNER}" "images/latest/"*
copy_to_buildcache "images/${arch}/${vernum}/" "images/latest/"*
}
# --

View File

@ -0,0 +1,122 @@
#!/bin/bash
#
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# >>> This file is supposed to be SOURCED from the repository ROOT. <<<
#
# image_changes() should be called w/ the positional INPUT parameters below.
# OS image differences display stub.
# This script will display the differences between the last released image and the currently built one.
#
# PREREQUISITES:
#
# 1. Artifacts describing the built image (kernel config, contents, packages, etc.) must be present in build cache server.
# 2. Scripts repo version tag of OS image version to be built is available and checked out.
#
# INPUT:
#
# 1. Architecture (ARCH) of the TARGET OS image ("arm64", "amd64").
#
# OPTIONAL INPUT:
#
# (none)
#
# OUTPUT:
#
# 1. Currently the script prints the image differences compared to the last release and the changelog for the release notes but doesn't store it yet in the buildcache.
function image_changes() {
# Run a subshell, so the traps, environment changes and global
# variables are not spilled into the caller.
(
set -euo pipefail
_image_changes_impl "${@}"
)
}
# --
function _image_changes_impl() {
local arch="$1"
source sdk_lib/sdk_container_common.sh
local channel=""
channel="$(get_git_channel)"
source ci-automation/ci_automation_common.sh
source ci-automation/gpg_setup.sh
init_submodules
source sdk_container/.repo/manifests/version.txt
local vernum="${FLATCAR_VERSION}"
echo "==================================================================="
export BOARD_A="${arch}-usr"
export FROM_A="release"
if [ "${channel}" = "developer" ]; then
NEW_CHANNEL="alpha"
else
NEW_CHANNEL="${channel}"
fi
NEW_CHANNEL_VERSION_A=$(curl -fsSL --retry-delay 1 --retry 60 --retry-connrefused --retry-max-time 60 --connect-timeout 20 "https://${NEW_CHANNEL}.release.flatcar-linux.net/${BOARD_A}/current/version.txt" | grep -m 1 FLATCAR_VERSION= | cut -d = -f 2)
MAJOR_A=$(echo "${NEW_CHANNEL_VERSION_A}" | cut -d . -f 1)
MAJOR_B=$(echo "${FLATCAR_VERSION}" | cut -d . -f 1)
# When the major version for the new channel is different, a transition has happened and we can find the previous release in the old channel
if [ "${MAJOR_A}" != "${MAJOR_B}" ]; then
case "${NEW_CHANNEL}" in
lts)
CHANNEL_A=stable
;;
stable)
CHANNEL_A=beta
;;
*)
CHANNEL_A=alpha
;;
esac
VERSION_A=$(curl -fsSL --retry-delay 1 --retry 60 --retry-connrefused --retry-max-time 60 --connect-timeout 20 "https://${CHANNEL_A}.release.flatcar-linux.net/${BOARD_A}/current/version.txt" | grep -m 1 FLATCAR_VERSION= | cut -d = -f 2)
else
CHANNEL_A="${NEW_CHANNEL}"
VERSION_A="${NEW_CHANNEL_VERSION_A}"
fi
export VERSION_A
export CHANNEL_A
export FROM_B="bincache"
export VERSION_B="${vernum}"
export BOARD_B="${arch}-usr"
# CHANNEL_B is unused
echo "== Image differences compared to ${CHANNEL_A} ${VERSION_A} =="
NEW_VERSION=$(git tag --points-at HEAD)
cd ..
rm -rf flatcar-build-scripts
git clone "https://github.com/flatcar/flatcar-build-scripts"
# Don't fail the job
set +e
echo "Package updates, compared to ${CHANNEL_A} ${VERSION_A}:"
FILE=flatcar_production_image_packages.txt flatcar-build-scripts/package-diff "${VERSION_A}" "${VERSION_B}"
echo
echo "Image file changes, compared to ${CHANNEL_A} ${VERSION_A}:"
FILE=flatcar_production_image_contents.txt FILESONLY=1 CUTKERNEL=1 flatcar-build-scripts/package-diff "${VERSION_A}" "${VERSION_B}"
echo
echo "Image kernel config changes, compared to ${CHANNEL_A} ${VERSION_A}:"
FILE=flatcar_production_image_kernel_config.txt flatcar-build-scripts/package-diff "${VERSION_A}" "${VERSION_B}"
echo
echo "Image file size change (includes /boot, /usr and the default rootfs partitions), compared to ${CHANNEL_A} ${VERSION_A}:"
FILE=flatcar_production_image_contents.txt CALCSIZE=1 flatcar-build-scripts/package-diff "${VERSION_A}" "${VERSION_B}"
echo
BASE_URL="http://${BUILDCACHE_SERVER}/images/${arch}/${vernum}"
echo "Image URL: ${BASE_URL}/flatcar_production_image.bin.bz2"
echo
# Provide a python3 command for the CVE DB parsing
export PATH="$PATH:$PWD/scripts/ci-automation/python-bin"
# The first changelog we print is always against the previous version of the new channel (is only same as CHANNEL_A VERSION_A without a transition)
flatcar-build-scripts/show-changes "${NEW_CHANNEL}-${NEW_CHANNEL_VERSION_A}" "${NEW_VERSION}"
# See if a channel transition happened and print the changelog against CHANNEL_A VERSION_A which is the previous release
if [ "${CHANNEL_A}" != "${NEW_CHANNEL}" ]; then
flatcar-build-scripts/show-changes "${CHANNEL_A}-${VERSION_A}" "${NEW_VERSION}"
fi
set -e
}
# --

View File

@ -0,0 +1,120 @@
#!/bin/bash
#
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# >>> This file is supposed to be SOURCED from the repository ROOT. <<<
#
# packages_tag() should be called w/ the positional INPUT parameters below.
# build tag automation stub.
# This script will update the versionfile with the OS packages version to build,
# and will add a version tag (see INPUT) to the scripts repo.
#
# PREREQUISITES:
#
# 1. SDK version is recorded in sdk_container/.repo/manifests/version.txt
# 2. SDK container is either
# - available via ghcr.io/flatcar/flatcar-sdk-[ARCH]:[VERSION] (official SDK release)
# OR
# - available via build cache server "/containers/[VERSION]/flatcar-sdk-[ARCH]-[VERSION].tar.gz"
# (dev SDK)
#
# INPUT:
#
# 1. Version of the TARGET OS image to build (string).
# The version pattern '(alpha|beta|stable|lts)-MMMM.m.p' (e.g. 'alpha-3051.0.0')
# denotes a "official" build, i.e. a release build to be published.
# Use any version diverging from the pattern (e.g. 'alpha-3051.0.0-nightly-4302') for development / CI builds.
# A tag of this version will be created in the scripts repo and pushed upstream.
#
#
# OPTIONAL INPUT:
#
# 2. coreos-overlay repository tag to use (commit-ish).
# Optional - use scripts repo sub-modules as-is if not set.
# This version will be checked out / pulled from remote in the coreos-overlay git submodule.
# The submodule config will be updated to point to this version before the TARGET SDK tag is created and pushed.
#
# 3. portage-stable repository tag to use (commit-ish).
# Optional - use scripts repo sub-modules as-is if not set.
# This version will be checked out / pulled from remote in the portage-stable git submodule.
# The submodule config will be updated to point to this version before the TARGET SDK tag is created and pushed.
#
# OUTPUT:
#
# 1. Updated scripts repository
# - version tag w/ submodules
# - sdk_container/.repo/manifests/version.txt denotes new FLATCAR OS version
# 2. "./skip-build" as flag file to signal that the build should stop
function packages_tag() {
# Run a subshell, so the traps, environment changes and global
# variables are not spilled into the caller.
(
set -euo pipefail
_packages_tag_impl "${@}"
)
}
# --
function _packages_tag_impl() {
local version="$1"
local coreos_git="${2:-}"
local portage_git="${3:-}"
source ci-automation/ci_automation_common.sh
source ci-automation/gpg_setup.sh
init_submodules
check_version_string "${version}"
source sdk_container/.repo/manifests/version.txt
local sdk_version="${FLATCAR_SDK_VERSION}"
if [ -n "${coreos_git}" ] ; then
update_submodule "coreos-overlay" "${coreos_git}"
fi
if [ -n "${portage_git}" ] ; then
update_submodule "portage-stable" "${portage_git}"
fi
# Create new tag in scripts repo w/ updated versionfile + submodules.
# Also push the changes to the branch ONLY IF we're doing a nightly
# build of the 'main'/'flatcar-MAJOR' branch AND we're definitely ON the respective branch
# (`scripts` and submodules).
local push_branch="false"
if [[ "${version}" =~ ^(stable|alpha|beta|lts)-[0-9.]+-nightly-[-0-9]+$ ]] \
&& [[ "$(git rev-parse --abbrev-ref HEAD)" =~ ^flatcar-[0-9]+$ ]] \
&& [[ "$(git -C sdk_container/src/third_party/coreos-overlay/ rev-parse --abbrev-ref HEAD)" =~ ^flatcar-[0-9]+$ ]] \
&& [[ "$(git -C sdk_container/src/third_party/portage-stable/ rev-parse --abbrev-ref HEAD)" =~ ^flatcar-[0-9]+$ ]] ; then
push_branch="true"
local existing_tag=""
existing_tag=$(git tag --points-at HEAD) # exit code is always 0, output may be empty
# If the found tag is a release or nightly tag, we stop this build if there are no changes
if [[ "${existing_tag}" =~ ^(stable|alpha|beta|lts)-[0-9.]+(|-nightly-[-0-9]+)$ ]]; then
local ret=0
git diff --exit-code "${existing_tag}" || ret=$?
if [[ ret -eq 0 ]]; then
touch ./skip-build
echo "Creating ./skip-build flag file, indicating that the build must not to continue because no new tag got created as there are no changes since tag ${existing_tag}" >&2
return 0
elif [[ ret -eq 1 ]]; then
echo "Found changes since last tag ${existing_tag}" >&2
else
echo "Error: Unexpected git diff return code (${ret})" >&2
return 1
fi
fi
fi
# Create version file
(
source sdk_lib/sdk_container_common.sh
create_versionfile "$sdk_version" "$version"
)
update_and_push_version "${version}" "${push_branch}"
}
# --

136
ci-automation/packages.sh Normal file
View File

@ -0,0 +1,136 @@
#!/bin/bash
#
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# >>> This file is supposed to be SOURCED from the repository ROOT. <<<
#
# packages_build() should be called w/ the positional INPUT parameters below.
# OS image binary packages build automation stub.
# This script will use an SDK container to build packages for an OS image.
#
# PREREQUISITES:
#
# 1. SDK version and OS image version are recorded in sdk_container/.repo/manifests/version.txt
# 2. Scripts repo version tag of OS image version to be built is available and checked out.
# 3. SDK container is either
# - available via ghcr.io/flatcar/flatcar-sdk-[ARCH]:[VERSION] (official SDK release)
# OR
# - available via build cache server "/containers/[VERSION]/flatcar-sdk-[ARCH]-[VERSION].tar.gz"
# (dev SDK)
#
# INPUT:
#
# 1. Architecture (ARCH) of the TARGET OS image ("arm64", "amd64").
#
#
# OPTIONAL INPUT:
#
# 2. SIGNER. Environment variable. Name of the owner of the artifact signing key.
# Defaults to nothing if not set - in such case, artifacts will not be signed.
# If provided, SIGNING_KEY environment variable should also be provided, otherwise this environment variable will be ignored.
#
# 3. SIGNING_KEY. Environment variable. The artifact signing key.
# Defaults to nothing if not set - in such case, artifacts will not be signed.
# If provided, SIGNER environment variable should also be provided, otherwise this environment variable will be ignored.
#
# OUTPUT:
#
# 1. Exported container image "flatcar-packages-[ARCH]-[VERSION].tar.gz" with binary packages
# pushed to buildcache, and torcx_manifest.json pushed to "images/${arch}/${vernum}/"
# (for use with tests).
# 2. "./ci-cleanup.sh" with commands to clean up temporary build resources,
# to be run after this step finishes / when this step is aborted.
# 3. If signer key was passed, signatures of artifacts from point 1, pushed along to buildcache.
# 4. DIGESTS of the artifacts from point 1, pushed to buildcache. If signer key was passed, armored ASCII files of the generated DIGESTS files too, pushed to buildcache.
function packages_build() {
# Run a subshell, so the traps, environment changes and global
# variables are not spilled into the caller.
(
set -euo pipefail
_packages_build_impl "${@}"
)
}
# --
function _packages_build_impl() {
local arch="$1"
source ci-automation/ci_automation_common.sh
source ci-automation/gpg_setup.sh
init_submodules
source sdk_container/.repo/manifests/version.txt
local sdk_version="${FLATCAR_SDK_VERSION}"
# Get SDK from either the registry or import from build cache
# This is a NOP if the image is present locally.
local sdk_name="flatcar-sdk-${arch}"
local docker_sdk_vernum="$(vernum_to_docker_image_version "${sdk_version}")"
docker_image_from_registry_or_buildcache "${sdk_name}" "${docker_sdk_vernum}"
local sdk_image="$(docker_image_fullname "${sdk_name}" "${docker_sdk_vernum}")"
echo "docker image rm -f '${sdk_image}'" >> ./ci-cleanup.sh
# Set name of the packages container for later rename / export
local vernum="${FLATCAR_VERSION}"
local docker_vernum="$(vernum_to_docker_image_version "${vernum}")"
local packages_container="flatcar-packages-${arch}-${docker_vernum}"
local torcx_pkg_url="https://${BUILDCACHE_SERVER}/images/${arch}/${vernum}/torcx"
source sdk_lib/sdk_container_common.sh
if is_official "${vernum}"; then
# A channel returned by get_git_channel should not ever be
# "developer" here, because it's an official build done from
# one of the maintenance branches. So if the channel happens
# to be "developer", then you are doing it wrong (releasing
# from the main branch?).
torcx_pkg_url="https://$(get_git_channel).release.flatcar-linux.net/${arch}-usr/${vernum}/torcx"
fi
# Build packages; store packages and torcx output in container
./run_sdk_container -x ./ci-cleanup.sh -n "${packages_container}" -v "${vernum}" \
-C "${sdk_image}" \
mkdir -p "${CONTAINER_TORCX_ROOT}"
./run_sdk_container -n "${packages_container}" -v "${vernum}" \
-C "${sdk_image}" \
./build_packages --board="${arch}-usr" \
--torcx_output_root="${CONTAINER_TORCX_ROOT}" \
--torcx_extra_pkg_url="${torcx_pkg_url}"
# copy torcx manifest and docker tarball for publishing
local torcx_tmp="__build__/torcx_tmp"
rm -rf "${torcx_tmp}"
mkdir "${torcx_tmp}"
./run_sdk_container -n "${packages_container}" -v "${vernum}" \
-C "${sdk_image}" \
cp -r "${CONTAINER_TORCX_ROOT}/" \
"${torcx_tmp}"
# run_sdk_container updates the version file, use that version from here on
source sdk_container/.repo/manifests/version.txt
local vernum="${FLATCAR_VERSION}"
local docker_vernum="$(vernum_to_docker_image_version "${vernum}")"
local packages_image="flatcar-packages-${arch}"
# generate image + push to build cache
docker_commit_to_buildcache "${packages_container}" "${packages_image}" "${docker_vernum}"
# Publish torcx manifest and docker tarball to "images" cache so tests can pull it later.
create_digests "${SIGNER}" \
"${torcx_tmp}/torcx/${arch}-usr/latest/torcx_manifest.json" \
"${torcx_tmp}/torcx/pkgs/${arch}-usr/docker/"*/*.torcx.tgz
sign_artifacts "${SIGNER}" \
"${torcx_tmp}/torcx/${arch}-usr/latest/torcx_manifest.json"* \
"${torcx_tmp}/torcx/pkgs/${arch}-usr/docker/"*/*.torcx.tgz*
copy_to_buildcache "images/${arch}/${vernum}/torcx" \
"${torcx_tmp}/torcx/${arch}-usr/latest/torcx_manifest.json"*
copy_to_buildcache "images/${arch}/${vernum}/torcx" \
"${torcx_tmp}/torcx/pkgs/${arch}-usr/docker/"*/*.torcx.tgz*
}
# --

View File

@ -0,0 +1,99 @@
#!/bin/bash
#
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# >>> This file is supposed to be SOURCED from the repository ROOT. <<<
#
# push_packages() should be called w/ the positional INPUT parameters below.
# OS image binary packages publisher automation stub.
# This script will publish the packages from a pre-built packages container to
# the buildcache server, effectively turning the build cache into a
# binary packages server for the SDK.
#
# PREREQUISITES:
#
# 1. SDK version and OS image version are recorded in sdk_container/.repo/manifests/version.txt
# 2. Scripts repo version tag of OS image version to be built is available and checked out.
# 3. Flatcar packages container is available via build cache server
# from "/containers/[VERSION]/flatcar-packages-[ARCH]-[FLATCAR_VERSION].tar.gz"
# or present locally. Container must contain binary packages and torcx artifacts.
#
# INPUT:
#
# 1. Architecture (ARCH) of the TARGET OS image ("arm64", "amd64").
#
# OPTIONAL INPUT:
#
# 1. SIGNER. Environment variable. Name of the owner of the artifact signing key.
# Defaults to nothing if not set - in such case, artifacts will not be signed.
# If provided, SIGNING_KEY environment variable should also be provided, otherwise this environment variable will be ignored.
#
# 2. SIGNING_KEY. Environment variable. The artifact signing key.
# Defaults to nothing if not set - in such case, artifacts will not be signed.
# If provided, SIGNER environment variable should also be provided, otherwise this environment variable will be ignored.
#
# OUTPUT:
#
# 1. Binary packages published to buildcache at "boards/[ARCH]-usr/[VERSION]/pkgs".
# 2. "./ci-cleanup.sh" with commands to clean up temporary build resources,
# to be run after this step finishes / when this step is aborted.
# 3. If signer key was passed, signatures of artifacts from point 1, pushed along to buildcache.
# This function is run _inside_ the SDK container
function image_build__copy_to_bincache() {
local arch="$1"
local version="$2"
source ci-automation/ci_automation_common.sh
# change the owner of the files and directories in __build__ back
# to ourselves, otherwise we could fail to sign the artifacts as
# we lacked write permissions in the directory of the signed
# artifact
local uid=$(id --user)
local gid=$(id --group)
cd /build/$arch-usr/var/lib/portage/pkgs/
sudo chown --recursive "${uid}:${gid}" .
sign_artifacts "${SIGNER}" *
copy_to_buildcache "boards/$arch-usr/$version/pkgs" *
}
# --
function push_packages() {
# Run a subshell, so the traps, environment changes and global
# variables are not spilled into the caller.
(
set -euo pipefail
_push_packages_impl "${@}"
)
}
# --
function _push_packages_impl() {
local arch="$1"
source ci-automation/ci_automation_common.sh
source ci-automation/gpg_setup.sh
init_submodules
source sdk_container/.repo/manifests/version.txt
local vernum="${FLATCAR_VERSION}"
local docker_vernum="$(vernum_to_docker_image_version "${vernum}")"
local packages="flatcar-packages-${arch}"
local packages_image="${packages}:${docker_vernum}"
docker_image_from_buildcache "${packages}" "${docker_vernum}"
local cmd="source ci-automation/push_pkgs.sh"
cmd="$cmd; image_build__copy_to_bincache '$arch' '$vernum'"
local my_name="flatcar-packages-publisher-${arch}-${docker_vernum}"
./run_sdk_container -x ./ci-cleanup.sh -n "${my_name}" -C "${packages_image}" \
bash -c "$cmd"
}
# --

View File

@ -0,0 +1,6 @@
#!/bin/sh
# Expects to be invoked as interpreter through a shebang
FOLDER="$(dirname "$(readlink -f "$1")")"
docker pull docker.io/python:alpine 2>/dev/null >/dev/null
# Map the current and the script folder, install the pip package needed for flatcar-build-scripts/show-fixed-kernel-cves.py
exec docker run --rm -i -v "${FOLDER}:${FOLDER}" -v "${PWD}:${PWD}" -w "${PWD}" docker.io/python:alpine sh -c "pip install packaging 2>/dev/null >/dev/null; python3 $*"

405
ci-automation/release.sh Normal file
View File

@ -0,0 +1,405 @@
#!/bin/bash
# Copyright (c) 2022 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# >>> This file is supposed to be SOURCED from the repository ROOT. <<<
#
# release_build() is currently called with no positional INPUT parameters but uses the signing env vars.
# Release build automation stub.
# This script will release the image build from bincache to the cloud offers.
#
# PREREQUISITES:
#
# 1. SDK version and OS image version are recorded in sdk_container/.repo/manifests/version.txt
# 2. Scripts repo version tag of OS image version to be built is available and checked out.
# 3. Mantle container docker image reference is stored in sdk_container/.repo/manifests/mantle-container.
# 4. Vendor image and torcx docker tarball + manifest to run tests for are available on buildcache
# ( images/[ARCH]/[FLATCAR_VERSION]/ )
# 5. SDK container is either
# - available via ghcr.io/flatcar/flatcar-sdk-[ARCH]:[VERSION] (official SDK release)
# OR
# - available via build cache server "/containers/[VERSION]/flatcar-sdk-[ARCH]-[VERSION].tar.gz"
# (dev SDK)
#
# INPUT:
#
# (none)
#
# OPTIONAL INPUT:
#
# 1. SIGNER. Environment variable. Name of the owner of the artifact signing key.
# Defaults to nothing if not set - in such case, artifacts will not be signed.
# If provided, SIGNING_KEY environment variable should also be provided, otherwise this environment variable will be ignored.
#
# 2. SIGNING_KEY. Environment variable. The artifact signing key.
# Defaults to nothing if not set - in such case, artifacts will not be signed.
# If provided, SIGNER environment variable should also be provided, otherwise this environment variable will be ignored.
#
# 3. REGISTRY_USERNAME. Environment variable. The username to use for Docker registry login.
# Defaults to nothing if not set - in such case, SDK container will not be pushed.
#
# 4. REGISTRY_PASSWORD. Environment variable. The password to use for Docker registry login.
# Defaults to nothing if not set - in such case, SDK container will not be pushed.
#
# 5. Cloud credentials as secrets via the environment variables AZURE_PROFILE, AZURE_AUTH_CREDENTIALS,
# AWS_CREDENTIALS, AWS_MARKETPLACE_CREDENTIALS, AWS_MARKETPLACE_ARN, AWS_CLOUDFORMATION_CREDENTIALS,
# GCP_JSON_KEY, GOOGLE_RELEASE_CREDENTIALS.
#
# OUTPUT:
#
# 1. The cloud images are published with mantle's plume and ore tools
# 2. The AWS AMI text files are pushed to buildcache ( images/[ARCH]/[FLATCAR_VERSION]/ )
# 3. "./ci-cleanup.sh" with commands to clean up temporary build resources,
# to be run after this step finishes / when this step is aborted.
# 4. If signer key was passed, signatures of artifacts from point 1, pushed along to buildcache.
# 5. DIGESTS of the artifacts from point 1, pushed to buildcache. If signer key was passed, armored ASCII files of the generated DIGESTS files too, pushed to buildcache.
function release_build() {
# Run a subshell, so the traps, environment changes and global
# variables are not spilled into the caller.
(
set -euo pipefail
_release_build_impl "${@}"
)
}
function _inside_mantle() {
# Run a subshell for the same reasons as above
(
set -euo pipefail
source sdk_lib/sdk_container_common.sh
source ci-automation/ci_automation_common.sh
source sdk_container/.repo/manifests/version.txt
# Needed because we are not the SDK container here
source sdk_container/.env
CHANNEL="$(get_git_channel)"
VERSION="${FLATCAR_VERSION}"
azure_profile_config_file=""
secret_to_file azure_profile_config_file "${AZURE_PROFILE}"
azure_auth_config_file=""
secret_to_file azure_auth_config_file "${AZURE_AUTH_CREDENTIALS}"
aws_credentials_config_file=""
secret_to_file aws_credentials_config_file "${AWS_CREDENTIALS}"
aws_marketplace_credentials_file=""
secret_to_file aws_marketplace_credentials_file "${AWS_MARKETPLACE_CREDENTIALS}"
gcp_json_key_path=""
secret_to_file gcp_json_key_path "${GCP_JSON_KEY}"
google_release_credentials_file=""
secret_to_file google_release_credentials_file "${GOOGLE_RELEASE_CREDENTIALS}"
for platform in aws azure; do
for arch in amd64 arm64; do
# Create a folder where plume stores flatcar_production_ami_*txt and flatcar_production_ami_*json
# for later push to bincache
rm -rf "${platform}-${arch}"
mkdir "${platform}-${arch}"
pushd "${platform}-${arch}"
# For pre-release we don't use the Google Cloud token because it's not needed
# and we don't want to upload the AMIs to GCS anymore
# (change https://github.com/flatcar/mantle/blob/bc6bc232677c45e389feb221da295cc674882f8c/cmd/plume/prerelease.go#L663-L667
# if you want to add GCP release code in plume pre-release instead of plume release)
plume pre-release --force \
--debug \
--platform="${platform}" \
--aws-credentials="${aws_credentials_config_file}" \
--azure-profile="${azure_profile_config_file}" \
--azure-auth="${azure_auth_config_file}" \
--gce-json-key=none \
--board="${arch}-usr" \
--channel="${CHANNEL}" \
--version="${FLATCAR_VERSION}" \
--write-image-list="images.json"
popd
done
done
for arch in amd64 arm64; do
# Create a folder where plume stores any temporarily downloaded files
rm -rf "release-${arch}"
mkdir "release-${arch}"
pushd "release-${arch}"
export product="${CHANNEL}-${arch}"
pid=$(jq -r ".[env.product]" ../product-ids.json)
# If the channel is 'stable' and the arch 'amd64', we add the stable-pro-amd64 product ID to the product IDs.
# The published AMI ID is the same for both offer.
[[ "${CHANNEL}" == "stable" ]] && [[ "${arch}" == "amd64" ]] && pid="${pid},$(jq -r '.["stable-pro-amd64"]' ../product-ids.json)"
plume release \
--debug \
--aws-credentials="${aws_credentials_config_file}" \
--aws-marketplace-credentials="${aws_marketplace_credentials_file}" \
--publish-marketplace \
--access-role-arn="${AWS_MARKETPLACE_ARN}" \
--product-ids="${pid}" \
--azure-profile="${azure_profile_config_file}" \
--azure-auth="${azure_auth_config_file}" \
--gce-json-key="${gcp_json_key_path}" \
--gce-release-key="${google_release_credentials_file}" \
--board="${arch}-usr" \
--channel="${CHANNEL}" \
--version="${VERSION}"
popd
done
# Future: move this to "plume release", in the past this was done in "update-cloudformation-template"
aws_cloudformation_credentials_file=""
secret_to_file aws_cloudformation_credentials_file "${AWS_CLOUDFORMATION_CREDENTIALS}"
export AWS_SHARED_CREDENTIALS_FILE="${aws_cloudformation_credentials_file}"
rm -rf cloudformation-files
mkdir cloudformation-files
for arch in amd64 arm64; do
generate_templates "aws-${arch}/flatcar_production_ami_all.json" "${CHANNEL}" "${arch}-usr"
done
aws s3 cp --recursive --acl public-read cloudformation-files/ "s3://flatcar-prod-ami-import-eu-central-1/dist/aws/"
)
}
function publish_sdk() {
local docker_sdk_vernum="$1"
local sdk_name=""
# If the registry password or the registry username is not set, we leave early.
[[ -z "${REGISTRY_PASSWORD}" ]] || [[ -z "${REGISTRY_USERNAME}" ]] && return
(
# Don't print the password to stderr when logging in
set +x
local container_registry=""
container_registry=$(echo "${sdk_container_common_registry}" | cut -d / -f 1)
echo "${REGISTRY_PASSWORD}" | docker login "${container_registry}" -u "${REGISTRY_USERNAME}" --password-stdin
)
# Docker images are pushed in the container registry.
for a in all amd64 arm64; do
sdk_name="flatcar-sdk-${a}"
docker_image_from_registry_or_buildcache "${sdk_name}" "${docker_sdk_vernum}"
docker push "${sdk_container_common_registry}/flatcar-sdk-${a}:${docker_sdk_vernum}"
done
}
function _release_build_impl() {
source sdk_lib/sdk_container_common.sh
source ci-automation/ci_automation_common.sh
source ci-automation/gpg_setup.sh
init_submodules
source sdk_container/.repo/manifests/version.txt
# Needed because we are not the SDK container here
source sdk_container/.env
local sdk_version="${FLATCAR_SDK_VERSION}"
local docker_sdk_vernum=""
docker_sdk_vernum="$(vernum_to_docker_image_version "${sdk_version}")"
local vernum="${FLATCAR_VERSION}"
local docker_vernum=""
docker_vernum="$(vernum_to_docker_image_version "${vernum}")"
local container_name="flatcar-publish-${docker_vernum}"
local mantle_ref
mantle_ref=$(cat sdk_container/.repo/manifests/mantle-container)
# A job on each worker prunes old mantle images (docker image prune), no need to do it here
echo "docker rm -f '${container_name}'" >> ./ci-cleanup.sh
touch sdk_container/.env # This file should already contain the required credentials as env vars
docker run --pull always --rm --name="${container_name}" --net host \
-w /work -v "$PWD":/work "${mantle_ref}" bash -c "source ci-automation/release.sh; _inside_mantle"
# Push flatcar_production_ami_*txt and flatcar_production_ami_*json to the right bincache folder
for arch in amd64 arm64; do
sudo chown -R "$USER:$USER" "aws-${arch}"
create_digests "${SIGNER}" "aws-${arch}/flatcar_production_ami_"*txt "aws-${arch}/flatcar_production_ami_"*json
sign_artifacts "${SIGNER}" "aws-${arch}/flatcar_production_ami_"*txt "aws-${arch}/flatcar_production_ami_"*json
copy_to_buildcache "images/${arch}/${vernum}/" "aws-${arch}/flatcar_production_ami_"*txt* "aws-${arch}/flatcar_production_ami_"*json*
done
publish_sdk "${docker_sdk_vernum}"
echo "===="
echo "Done, now you can copy the images to Origin"
echo "===="
# Future: trigger copy to Origin in a secure way
# Future: trigger update payload signing
# Future: trigger website update
# Future: trigger release email sending
# Future: trigger push to nebraska
# Future: trigger Origin symlink switch
}
TEMPLATE='
{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Flatcar Linux on EC2: https://kinvolk.io/docs/flatcar-container-linux/latest/installing/cloud/aws-ec2/",
"Mappings" : {
"RegionMap" : {
###AMIS###
}
},
"Parameters": {
"InstanceType" : {
"Description" : "EC2 HVM instance type (m3.medium, etc).",
"Type" : "String",
"Default" : "m3.medium",
"ConstraintDescription" : "Must be a valid EC2 HVM instance type."
},
"ClusterSize": {
"Default": "3",
"MinValue": "3",
"MaxValue": "12",
"Description": "Number of nodes in cluster (3-12).",
"Type": "Number"
},
"DiscoveryURL": {
"Description": "An unique etcd cluster discovery URL. Grab a new token from https://discovery.etcd.io/new?size=<your cluster size>",
"Type": "String"
},
"AdvertisedIPAddress": {
"Description": "Use 'private' if your etcd cluster is within one region or 'public' if it spans regions or cloud providers.",
"Default": "private",
"AllowedValues": ["private", "public"],
"Type": "String"
},
"AllowSSHFrom": {
"Description": "The net block (CIDR) that SSH is available to.",
"Default": "0.0.0.0/0",
"Type": "String"
},
"KeyPair" : {
"Description" : "The name of an EC2 Key Pair to allow SSH access to the instance.",
"Type" : "String"
}
},
"Resources": {
"FlatcarSecurityGroup": {
"Type": "AWS::EC2::SecurityGroup",
"Properties": {
"GroupDescription": "Flatcar Linux SecurityGroup",
"SecurityGroupIngress": [
{"IpProtocol": "tcp", "FromPort": "22", "ToPort": "22", "CidrIp": {"Ref": "AllowSSHFrom"}}
]
}
},
"Ingress4001": {
"Type": "AWS::EC2::SecurityGroupIngress",
"Properties": {
"GroupName": {"Ref": "FlatcarSecurityGroup"}, "IpProtocol": "tcp", "FromPort": "4001", "ToPort": "4001", "SourceSecurityGroupId": {
"Fn::GetAtt" : [ "FlatcarSecurityGroup", "GroupId" ]
}
}
},
"Ingress2379": {
"Type": "AWS::EC2::SecurityGroupIngress",
"Properties": {
"GroupName": {"Ref": "FlatcarSecurityGroup"}, "IpProtocol": "tcp", "FromPort": "2379", "ToPort": "2379", "SourceSecurityGroupId": {
"Fn::GetAtt" : [ "FlatcarSecurityGroup", "GroupId" ]
}
}
},
"Ingress2380": {
"Type": "AWS::EC2::SecurityGroupIngress",
"Properties": {
"GroupName": {"Ref": "FlatcarSecurityGroup"}, "IpProtocol": "tcp", "FromPort": "2380", "ToPort": "2380", "SourceSecurityGroupId": {
"Fn::GetAtt" : [ "FlatcarSecurityGroup", "GroupId" ]
}
}
},
"FlatcarServerAutoScale": {
"Type": "AWS::AutoScaling::AutoScalingGroup",
"Properties": {
"AvailabilityZones": {"Fn::GetAZs": ""},
"LaunchConfigurationName": {"Ref": "FlatcarServerLaunchConfig"},
"MinSize": "3",
"MaxSize": "12",
"DesiredCapacity": {"Ref": "ClusterSize"},
"Tags": [
{"Key": "Name", "Value": { "Ref" : "AWS::StackName" }, "PropagateAtLaunch": true}
]
}
},
"FlatcarServerLaunchConfig": {
"Type": "AWS::AutoScaling::LaunchConfiguration",
"Properties": {
"ImageId" : { "Fn::FindInMap" : [ "RegionMap", { "Ref" : "AWS::Region" }, "AMI" ]},
"InstanceType": {"Ref": "InstanceType"},
"KeyName": {"Ref": "KeyPair"},
"SecurityGroups": [{"Ref": "FlatcarSecurityGroup"}],
"UserData" : { "Fn::Base64":
{ "Fn::Join": [ "", [
"#cloud-config\n\n",
"coreos:\n",
" etcd2:\n",
" discovery: ", { "Ref": "DiscoveryURL" }, "\n",
" advertise-client-urls: http://$", { "Ref": "AdvertisedIPAddress" }, "_ipv4:2379\n",
" initial-advertise-peer-urls: http://$", { "Ref": "AdvertisedIPAddress" }, "_ipv4:2380\n",
" listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001\n",
" listen-peer-urls: http://$", { "Ref": "AdvertisedIPAddress" }, "_ipv4:2380\n",
" units:\n",
" - name: etcd2.service\n",
" command: start\n",
" - name: fleet.service\n",
" command: start\n"
] ]
}
}
}
}
}
}
'
function generate_templates() {
local IFILE="$1"
local CHANNEL="$2"
local BOARD="$3"
local TMPFILE=""
local ARCHTAG=""
local REGIONS=("eu-central-1"
"ap-northeast-1"
"ap-northeast-2"
# "ap-northeast-3" # Disabled for now because we do not have access
"af-south-1"
"ca-central-1"
"ap-south-1"
"sa-east-1"
"ap-southeast-1"
"ap-southeast-2"
"ap-southeast-3"
"us-east-1"
"us-east-2"
"us-west-2"
"us-west-1"
"eu-west-1"
"eu-west-2"
"eu-west-3"
"eu-north-1"
"eu-south-1"
"ap-east-1"
"me-south-1")
if [ "${BOARD}" = "amd64-usr" ]; then
ARCHTAG=""
elif [ "${BOARD}" = "arm64-usr" ]; then
ARCHTAG="-arm64"
else
echo "No architecture tag defined for board \"${BOARD}\""
exit 1
fi
TMPFILE=$(mktemp)
>${TMPFILE}
for region in "${REGIONS[@]}"; do
echo " \"${region}\" : {" >> ${TMPFILE}
echo -n ' "AMI" : ' >> ${TMPFILE}
cat "${IFILE}" | jq ".[] | map(select(.name == \"${region}\")) | .[0] | .\"hvm\"" >> ${TMPFILE}
echo " }," >> ${TMPFILE}
done
truncate -s-2 ${TMPFILE}
echo "${TEMPLATE}" | perl -i -0pe "s/###AMIS###/$(cat -- ${TMPFILE})/g" > "cloudformation-files/flatcar-${CHANNEL}${ARCHTAG}-hvm.template"
rm "${TMPFILE}"
}

View File

@ -0,0 +1,152 @@
#!/bin/bash
#
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# >>> This file is supposed to be SOURCED from the repository ROOT. <<<
#
# sdk_bootstrap() should be called w/ the positional INPUT parameters below.
# Bootstrap SDK build automation stub.
# This script will use a seed SDK container + tarball to bootstrap a
# new SDK tarball.
#
# INPUT:
#
# 1. Version of the SEED SDK to use (string).
# The seed SDK tarball must be available on https://mirror.release.flatcar-linux.net/sdk/ ...
# The seed SDK container must be available from https://github.com/orgs/flatcar/packages
# (via ghcr.io/flatcar/flatcar-sdk-all:[VERSION]).
#
# 2. Version of the TARGET SDK to build (string).
# The version pattern 'MMMM.m.p' (e.g. '3051.0.0') denotes a "official" build, i.e. a release build to be published.
# Use any version diverging from the pattern (e.g. '3051.0.0-nightly-4302') for development / CI builds.
# A free-standing tagged commit will be created in the scripts repo and pushed upstream.
#
# OPTIONAL INPUT:
#
# 3. coreos-overlay repository tag to use (commit-ish).
# This version will be checked out / pulled from remote in the coreos-overlay git submodule.
# The submodule config will be updated to point to this version before the TARGET SDK tag is created and pushed.
# Leave empty to use coreos-overlay as-is.
#
# 4. portage-stable repository tag to use (commit-ish).
# This version will be checked out / pulled from remote in the portage-stable git submodule.
# The submodule config will be updated to point to this version before the TARGET SDK tag is created and pushed.
# Leave empty to use portage-stable as-is.
#
# 5. ARCH. Environment variable. Target architecture for the SDK to run on.
# Either "amd64" or "arm64"; defaults to "amd64" if not set.
#
# 6. SIGNER. Environment variable. Name of the owner of the artifact signing key.
# Defaults to nothing if not set - in such case, artifacts will not be signed.
# If provided, SIGNING_KEY environment variable should also be provided, otherwise this environment variable will be ignored.
#
# 7. SIGNING_KEY. Environment variable. The artifact signing key.
# Defaults to nothing if not set - in such case, artifacts will not be signed.
# If provided, SIGNER environment variable should also be provided, otherwise this environment variable will be ignored.
#
# OUTPUT:
#
# 1. SDK tarball (gentoo catalyst output) of the new SDK, pushed to buildcache.
# 2. Updated scripts repository
# - version tag w/ submodules
# - sdk_container/.repo/manifests/version.txt denotes new SDK version
# 3. "./ci-cleanup.sh" with commands to clean up temporary build resources,
# to be run after this step finishes / when this step is aborted.
# 4. If signer key was passed, signatures of artifacts from point 1, pushed along to buildcache.
# 5. DIGESTS of the artifacts from point 1, pushed to buildcache. If signer key was passed, armored ASCII files of the generated DIGESTS files too, pushed to buildcache.
function sdk_bootstrap() {
# Run a subshell, so the traps, environment changes and global
# variables are not spilled into the caller.
(
set -euo pipefail
_sdk_bootstrap_impl "${@}"
)
}
# --
function _sdk_bootstrap_impl() {
local seed_version="$1"
local version="$2"
local coreos_git="${3-}"
local portage_git="${4-}"
: ${ARCH:="amd64"}
source ci-automation/ci_automation_common.sh
source ci-automation/gpg_setup.sh
init_submodules
check_version_string "${version}"
if [ -n "${coreos_git}" ] ; then
update_submodule "coreos-overlay" "${coreos_git}"
fi
if [ -n "${portage_git}" ] ; then
update_submodule "portage-stable" "${portage_git}"
fi
# Create new tag in scripts repo w/ updated versionfile + submodules.
# Also push the changes to the branch ONLY IF we're doing a nightly
# build of the 'main' branch AND we're definitely ON the main branch
# (`scripts` and submodules).
local push_branch="false"
if [[ "${version}" =~ ^main-[0-9.]+-nightly-[-0-9]+$ ]] \
&& [ "$(git rev-parse --abbrev-ref HEAD)" = "main" ] \
&& [ "$(git -C sdk_container/src/third_party/coreos-overlay/ rev-parse --abbrev-ref HEAD)" = "main" ] \
&& [ "$(git -C sdk_container/src/third_party/portage-stable/ rev-parse --abbrev-ref HEAD)" = "main" ] ; then
push_branch="true"
local existing_tag=""
existing_tag=$(git tag --points-at HEAD) # exit code is always 0, output may be empty
# If the found tag is a nightly tag, we stop this build if there are no changes
if [[ "${existing_tag}" =~ ^main-[0-9.]+-nightly-[-0-9]+$ ]]; then
local ret=0
git diff --exit-code "${existing_tag}" || ret=$?
if [ "$ret" = "0" ]; then
echo "Stopping build because there are no changes since tag ${existing_tag}" >&2
return 0
elif [ "$ret" = "1" ]; then
echo "Found changes since last tag ${existing_tag}" >&2
else
echo "Error: Unexpected git diff return code (${ret})" >&2
return 1
fi
fi
fi
local vernum="${version#*-}" # remove alpha-,beta-,stable-,lts- version tag
local git_vernum="${vernum}"
# Update FLATCAR_VERSION[_ID], BUILD_ID, and SDK in versionfile
(
source sdk_lib/sdk_container_common.sh
create_versionfile "${vernum}"
)
update_and_push_version "${version}" "${push_branch}"
./bootstrap_sdk_container -x ./ci-cleanup.sh "${seed_version}" "${vernum}"
# push SDK tarball to buildcache
# Get Flatcar version number format (separator is '+' instead of '-',
# equal to $(strip_version_prefix "$version")
source sdk_container/.repo/manifests/version.txt
local dest_tarball="flatcar-sdk-${ARCH}-${FLATCAR_SDK_VERSION}.tar.bz2"
# change the owner of the files and directories in __build__ back
# to ourselves, otherwise we could fail to sign the artifacts as
# we lacked write permissions in the directory of the signed
# artifact
local uid=$(id --user)
local gid=$(id --group)
sudo chown --recursive "${uid}:${gid}" __build__
(
cd "__build__/images/catalyst/builds/flatcar-sdk"
create_digests "${SIGNER}" "${dest_tarball}"
sign_artifacts "${SIGNER}" "${dest_tarball}"*
copy_to_buildcache "sdk/${ARCH}/${FLATCAR_SDK_VERSION}" "${dest_tarball}"*
)
}
# --

View File

@ -0,0 +1,79 @@
#!/bin/bash
#
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# >>> This file is supposed to be SOURCED from the repository ROOT. <<<
#
# sdk_container_build() should be called w/ the positional INPUT parameters below.
# SDK container builder automation stub.
# This script will build an SDK container w/ board support from an SDK tarball.
# NOTE that SDK tarball and scripts repo must have the same version or building
# the SDK container will fail.
#
# PREREQUISITES:
#
# 1. SDK version is recorded in sdk_container/.repo/manifests/version.txt and a matching
# SDK tarball is available on BUILDCACHE/sdk/[ARCH]/[VERSION]/flatcar-sdk-[ARCH]-[VERSION].tar.bz2
#
# OPTIONAL INPUT:
#
# 2. ARCH. Environment variable. Target architecture for the SDK to run on.
# Either "amd64" or "arm64"; defaults to "amd64" if not set.
#
# 3. SIGNER. Environment variable. Name of the owner of the artifact signing key.
# Defaults to nothing if not set - in such case, artifacts will not be signed.
# If provided, SIGNING_KEY environment variable should also be provided, otherwise this environment variable will be ignored.
#
# 4. SIGNING_KEY. Environment variable. The artifact signing key.
# Defaults to nothing if not set - in such case, artifacts will not be signed.
# If provided, SIGNER environment variable should also be provided, otherwise this environment variable will be ignored.
#
# OUTPUT:
#
# 1. SDK container image of the new SDK, published to buildcache.
# 2. "./ci-cleanup.sh" with commands to clean up temporary build resources,
# to be run after this step finishes / when this step is aborted.
# 3. If signer key was passed, signatures of artifacts from point 1, pushed along to buildcache.
# 4. DIGESTS of the artifacts from point 1, pushed to buildcache. If signer key was passed, armored ASCII files of the generated DIGESTS files too, pushed to buildcache.
function sdk_container_build() {
# Run a subshell, so the traps, environment changes and global
# variables are not spilled into the caller.
(
set -euo pipefail
_sdk_container_build_impl "${@}"
)
}
# --
function _sdk_container_build_impl() {
: ${ARCH:="amd64"}
source ci-automation/ci_automation_common.sh
source ci-automation/gpg_setup.sh
init_submodules
source sdk_container/.repo/manifests/version.txt
local vernum="${FLATCAR_SDK_VERSION}"
local sdk_tarball="flatcar-sdk-${ARCH}-${vernum}.tar.bz2"
# __build__ is in .dockerignore, so the tarball is excluded from build context
mkdir -p __build__
copy_from_buildcache "sdk/${ARCH}/${vernum}/${sdk_tarball}" "./__build__"
# This will update the SDK_VERSION in versionfile
./build_sdk_container_image -x ./ci-cleanup.sh ./__build__/"${sdk_tarball}"
# push artifacts to build cache
local docker_vernum="$(vernum_to_docker_image_version "${vernum}")"
docker_image_to_buildcache "${CONTAINER_REGISTRY}/flatcar-sdk-all" "${docker_vernum}"
docker_image_to_buildcache "${CONTAINER_REGISTRY}/flatcar-sdk-amd64" "${docker_vernum}"
docker_image_to_buildcache "${CONTAINER_REGISTRY}/flatcar-sdk-arm64" "${docker_vernum}"
}
# --

View File

@ -0,0 +1,308 @@
#!/bin/bash
#
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Helper script for extracting information from TAP files and for merging multiple
# TAP files into one report.
# The script uses a temporary SQLite DB for querying and for result generation.
#
# Brief usage overview (scroll down for parameters etc.):
# tap_ingest_tapfile - add test results from tap file to the DB
# tap_list_vendors - list all vendors TAP files have been ingested for
# tap_failed_tests_for_vendor - list all tests that never succeded even once, per vendor
# tap_generate_report - generate a merged test report
TAPFILE_HELPER_DBNAME="results.sqlite3"
# wrapper around sqlite3 w/ retries if DB is locked
function __sqlite3_wrapper() {
local dbfile="${TAPFILE_HELPER_DBNAME}"
local params=""
while [[ "$1" == -* ]] ; do
params="$params $1"
shift
done
while true; do
sqlite3 "${dbfile}" $params "PRAGMA foreign_keys = ON;$@"
local ret="$?"
if [ "$ret" -ne 5 ] ; then
return $ret
fi
local sleep="$((1 + $RANDOM % 5))"
echo "Retrying in ${sleep} seconds." >&2
sleep "${sleep}"
done
}
# --
# Initialise the DB if it wasn't yet.
function __db_init() {
__sqlite3_wrapper '
CREATE TABLE IF NOT EXISTS "test_case" (
"id" INTEGER,
"name" TEXT UNIQUE,
PRIMARY KEY("id")
);
CREATE TABLE IF NOT EXISTS "vendor" (
"id" INTEGER,
"name" TEXT UNIQUE,
PRIMARY KEY("id")
);
CREATE TABLE IF NOT EXISTS "test_run" (
"id" INTEGER NOT NULL,
"result" INTEGER NOT NULL,
"output" TEXT,
"case_id" INTEGER NOT NULL,
"run" INTEGER NOT NULL,
"vendor_id" INTEGER,
PRIMARY KEY("id"),
FOREIGN KEY("case_id") REFERENCES "test_case"("id"),
FOREIGN KEY("vendor_id") REFERENCES "vendor"("id"),
UNIQUE (case_id, run, vendor_id)
);
'
}
# --
# Read tapfile into temporary DB.
# INPUT:
# 1: <tapfile> - tapfile to ingest
# 2: <vendor> - vendor (qemu, azure, aws, etc...)
# 3: <run> - re-run iteration
function tap_ingest_tapfile() {
local tapfile="${1}"
local vendor="${2}"
local run="${3}"
local result=""
local test_name=""
local in_error_message=false
if ! [ -f "${TAPFILE_HELPER_DBNAME}" ] ; then
__db_init
fi
# Wrap all SQL commands in a transaction to speed up INSERTs
# We will commit intermediately if there's an error message to insert so the
# error message file can be reused.
local SQL="BEGIN TRANSACTION;"
local has_error_message="false"
# run the parse loop in a subshell and clean up temporary error message file on exit
(
local error_message_file="$(mktemp)"
trap "rm -f '${error_message_file}'" EXIT
# Example TAP input:
# ok - coreos.auth.verify
# ok - coreos.locksmith.tls
# not ok - cl.filesystem
# ---
# Error: "--- FAIL: cl.filesystem/deadlinks (1.86s)\n files.go:90: Dead symbolic links found: [/var/lib/flatcar-oem-gce/usr/lib64/python3.9/site-packages/certifi-3021.3.16-py3.9.egg-info]"
# ...
# ok - cl.cloudinit.script
# ok - kubeadm.v1.22.0.flannel.base
while read -r line; do
if [[ "${line}" == "1.."* ]] ; then continue; fi
if [ "${line}" = "---" ] ; then # note: read removes leading whitespaces
in_error_message=true
continue
fi
if $in_error_message ; then
if [ "${line}" = "..." ] ; then
in_error_message=false
has_error_message="true"
else
# remove special characters and unicode. Jenkins TAP parser don't unicode.
echo -e "$line" \
| LC_ALL=C sed -e 's/^Error: "--- FAIL: /"/' -e 's/^[[:space:]]*//' \
-e "s/[>\\\"']/_/g" -e 's/[[:space:]]/ /g' \
-e 's/.\{200\}/&\n/g' \
-e 's/[^\x1F-\x7F]/?/g' \
>> "${error_message_file}"
continue
fi
else
test_name="$(echo "${line}" | sed 's/^[^-]* - //')"
local result_string
result_string="$(echo "${line}" | sed 's/ - .*//')"
result=0
if [ "${result_string}" = "ok" ] ; then
result=1
fi
fi
local test_output="/dev/null"
if [ "${has_error_message}" = "true" ] ; then
test_output="${error_message_file}"
fi
SQL="${SQL}INSERT OR IGNORE INTO test_case(name) VALUES ('${test_name}');"
SQL="${SQL}INSERT OR IGNORE INTO vendor(name) VALUES ('${vendor}');"
SQL="${SQL}INSERT OR REPLACE INTO test_run(run,result,output,case_id,vendor_id)
VALUES ('${run}','${result}', readfile('${test_output}'),
(SELECT id FROM test_case WHERE name='${test_name}'),
(SELECT id FROM vendor WHERE name='${vendor}'));"
if [ "${has_error_message}" = "true" ] ; then
SQL="${SQL}COMMIT;"
__sqlite3_wrapper "${SQL}"
truncate --size 0 "${error_message_file}"
has_error_message="false"
SQL="BEGIN TRANSACTION;"
fi
done < "$tapfile"
SQL="${SQL}COMMIT;"
__sqlite3_wrapper "${SQL}"
)
}
# --
# Print a list of all vendors we've seen so far.
function tap_list_vendors() {
__sqlite3_wrapper 'SELECT DISTINCT name from vendor;'
}
# --
# List tests that never succeeded for a given vendor.
# INPUT:
# 1: <vendor> - Vendor name to check for failed test runs
function tap_failed_tests_for_vendor() {
local vendor="$1"
__sqlite3_wrapper "
SELECT failed.name FROM test_case AS failed
WHERE EXISTS (
SELECT * FROM test_run AS t, vendor AS v, test_case AS c
WHERE t.vendor_id=v.id AND t.case_id=c.id
AND v.name='${vendor}'
AND c.name=failed.name
)
AND NOT exists (
SELECT * FROM test_run AS t, vendor AS v, test_case AS c
WHERE t.vendor_id=v.id AND t.case_id=c.id
AND v.name='${vendor}'
AND c.name=failed.name
AND t.result=1 );"
}
# --
# Print the tap file from contents of the database.
# INPUT:
# 1: <arch> - Architecture to be included in the first line of the report
# 2: <version> - OS version tested, to be included in the first line of the report
# 3: <include_transient_errors> - If set to "true" then debug output of transient test failures
# is included in the result report.
function tap_generate_report() {
local arch="$1"
local version="$2"
local full_error_report="${3:-false}"
local count
count="$(__sqlite3_wrapper 'SELECT count(name) FROM test_case;')"
local vendors
vendors="$(__sqlite3_wrapper 'SELECT name FROM vendor;' | tr '\n' ' ')"
echo "1..$((count+1))"
echo "ok - Version: ${version}, Architecture: ${arch}"
echo " ---"
echo " Platforms tested: ${vendors}"
echo " ..."
# Print result line for every test, including platforms it succeeded on
# and transient failed runs.
__sqlite3_wrapper 'SELECT DISTINCT name from test_case;' | \
while read -r test_name; do
# "ok" if the test succeeded at least once for all vendors that run the test,
# "not ok" otherwise.
local verdict
verdict="$(__sqlite3_wrapper "
SELECT failed.name FROM vendor AS failed
WHERE EXISTS (
SELECT * FROM test_run AS t, vendor AS v, test_case AS c
WHERE t.vendor_id=v.id AND t.case_id=c.id
AND v.name=failed.name
AND c.name='${test_name}'
)
AND NOT exists (
SELECT * FROM test_run AS t, vendor AS v, test_case AS c
WHERE t.vendor_id=v.id AND t.case_id=c.id
AND v.name=failed.name
AND c.name='${test_name}'
AND t.result=1 );
")"
if [ -n "${verdict}" ] ; then
verdict="not ok"
else
verdict="ok"
fi
# Generate a list of vendors and respective runs, in a single line.
function list_runs() {
local res="$1"
__sqlite3_wrapper -csv "
SELECT v.name, t.run FROM test_run AS t, vendor AS v, test_case AS c
WHERE t.vendor_id=v.id AND t.case_id=c.id
AND c.name='${test_name}'
AND t.result=${res}
ORDER BY v.name;" \
| awk -F, '{ if (t && (t != $1)) {
printf t " " r "); "
r="";}
t=$1
if (r)
r=r ", " $2
else
r="(" $2 ; }
END { if (t) print t r ")"; }'
}
local succeded
succeded="$(list_runs 1)"
local failed
failed="$(list_runs 0)"
echo "${verdict} - ${test_name}"
echo " ---"
if [ -n "${succeded}" ] ; then
echo " Succeeded: ${succeded}"
fi
if [ -n "${failed}" ] ; then
echo " Failed: ${failed}"
if [ "${verdict}" = "not ok" -o "${full_error_report}" = "true" ] ; then
# generate diagnostic output, per failed run.
__sqlite3_wrapper -csv "
SELECT v.name, t.run
FROM test_run AS t, vendor AS v, test_case AS c
WHERE t.vendor_id=v.id AND t.case_id=c.id
AND c.name='${test_name}'
AND t.result=0
ORDER BY t.run DESC;" | \
sed 's/,/ /' | \
while read -r vendor run; do
echo " Error messages for ${vendor}, run ${run}:"
__sqlite3_wrapper -csv "
SELECT t.output FROM test_run AS t, test_case AS c
WHERE t.case_id=c.id
AND c.name='${test_name}'
AND t.run='${run}';" | \
sed 's/"/ /g' | \
awk '{print " L" NR ": \"" $0 "\""}'
done
fi
fi
echo " ..."
done
}
# --

252
ci-automation/test.sh Normal file
View File

@ -0,0 +1,252 @@
#!/bin/bash
#
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# >>> This file is supposed to be SOURCED from the repository ROOT. <<<
#
# test_run() should be called w/ the positional INPUT parameters below.
# Test scenarios runner stub.
# This script will run test scenarios for a single image type.
# Tests will be started inside the mantle container.
# This script is generic and will use a vendor-specific test runner from
# "ci-automation/vendor-testing/<image>.sh.
#
# PREREQUISITES:
#
# 1. SDK version and OS image version are recorded in sdk_container/.repo/manifests/version.txt
# 2. Scripts repo version tag of OS image version to be built is available and checked out.
# 3. Mantle container docker image reference is stored in sdk_container/.repo/manifests/mantle-container.
# 4. Vendor image and torcx docker tarball + manifest to run tests for are available on buildcache
# ( images/[ARCH]/[FLATCAR_VERSION]/ )
#
# INPUT:
#
# 1. Architecture (ARCH) of the TARGET vm images ("arm64", "amd64").
# 2. Image type to be tested. One of:
# ami, azure, azure_pro, digitalocean, gce, gce_pro, packet, qemu, qemu_uefi, vmware
#
# OPTIONAL INPUT:
#
# 3. List of tests / test patterns. Defaults to "*" (all tests).
# All positional arguments after the first 2 (see above) are tests / patterns of tests to run.
#
# MAX_RETRIES. Environment variable. Number of re-runs to overcome transient failures. Defaults to 20.
# PARALLEL_TESTS. Environment variable. Number of test cases to run in parallel.
# Default is image / vendor specific and defined in ci-automation/ci-config.env.
#
# OUTPUT:
#
# 1. 2 merged TAP reports with all test runs / vendors.
# - a "summary" report which contains error messages only for tests which never succeeded (per vendor).
# - a "detailed" report which also contains error messages of transient failures which succeeded after re-runs.
# These reports will be updated after each (re-)run of each vendor, making the test job safe
# to abort at any point - the previous runs' results won't be lost.
# 2. All intermediate kola tap reports, kola debug output, and merged tap reports (from 1.) published
# to buildcache at testing/[VERSION]/[ARCH]/[IMAGE]
# 3. "./ci-cleanup.sh" with commands to clean up temporary build resources,
# to be run after this step finishes / when this step is aborted.
#
#
# LOW-LEVEL / VENDOR SPECIFIC scripts API
#
# Vendor scripts are provided with their own sub-directory and are expected to CD into there before
# creating any artifacts (see vendor script argument 1 below).
# The torcx manifest is supplied in
# ../
# relative to the vendor sub-directory. The manifest is updated to include a URL pointing to the docker
# torcx tarball on the build cache (for the docker.torcx-manifest-pkgs test).
#
# Vendor specific scripts are called with the following positional arguments:
# 1 - Toplevel tests directory
# It contains some additional files needed for running the tests (like torcx manifest or file with channel information).
# 2 - Working directory for the tests.
# The vendor script is expected to keep all artifacts it produces in that directory.
# 3 - Architecture to test.
# 4 - Version number to test.
# 5 - Output TAP file.
# All following arguments specify test cases / test case patterns to run.
#
# The vendor tests should source ci-automation/vendor_test.sh script
# as a first step - it will do some common steps that the vendor
# script would need to make anyway. For more information, please refer
# to the vendor_test.sh file.
# Download torcx manifest and modify URLs pointing to the origin
# server to point to the build cache. This is because the tests for
# releases are run before artifacts are uploaded to the origin
# server. This would make kola's docker.torcx-manifest-pkgs test to
# fail.
function __prepare_torcx() {
local arch="$1"
local vernum="$2"
local workdir="$3"
copy_from_buildcache "images/${arch}/${vernum}/torcx/torcx_manifest.json" "${workdir}"
# Change URLs from:
#
# https://${channel}.release.flatcar-linux.net/${arch}-usr/${vernum}/torcx/…
#
# to:
#
# https://bincache.flatcar-linux.net/images/${arch}/${vernum}/torcx/…
#
# This is done in two parts - replacing host part and arch part.
#
# Replace 'https://${channel}.release.flatcar-linux.net/' with
# 'https://bincache.flatcar-linux.net/' matching the initial "url"
# JSON key too.
local host_match='\("url":\s*"https://\)[a-z]\+\.release\([^/]\+/\)'
local host_replace='\1bincache\2'
# Replace '${arch}-usr/` part with 'images/${arch}/'.
local arch_match='\([a-z0-9]\+\)-usr/'
local arch_replace='images/\3/'
sed \
-e "s#${host_match}${arch_match}#${host_replace}${arch_replace}#g" \
"${workdir}/torcx_manifest.json" \
>"${workdir}/torcx_manifest_new.json"
mv "${workdir}/torcx_manifest.json" "${workdir}/torcx_manifest.json.original"
mv "${workdir}/torcx_manifest_new.json" "${workdir}/torcx_manifest.json"
}
# --
function test_run() {
# Run a subshell, so the traps, environment changes and global
# variables are not spilled into the caller.
(
set -euo pipefail
_test_run_impl "${@}"
)
}
# --
function _test_run_impl() {
local arch="$1" ; shift
local image="$1"; shift
# default to all tests
if [ $# -le 0 ] ; then
set -- '*'
fi
local retries="${MAX_RETRIES:-20}"
local skip_copy_to_bincache=${SKIP_COPY_TO_BINCACHE:-0}
source ci-automation/tapfile_helper_lib.sh
source ci-automation/ci_automation_common.sh
source sdk_lib/sdk_container_common.sh
init_submodules
source sdk_container/.repo/manifests/version.txt
local vernum="${FLATCAR_VERSION}"
local docker_vernum
docker_vernum="$(vernum_to_docker_image_version "${vernum}")"
local work_dir="${TEST_WORK_DIR}"
local tests_dir="${work_dir}/${image}"
mkdir -p "${tests_dir}"
# Store git version and git channel as files inside ${work_dir}.
# This information might not be available inside the docker
# container if this directory is not a main git repo, but rather a
# git worktree.
get_git_version >"${work_dir}/git_version"
get_git_channel >"${work_dir}/git_channel"
local container_name="flatcar-tests-${arch}-${docker_vernum}-${image}"
local mantle_ref
mantle_ref=$(cat sdk_container/.repo/manifests/mantle-container)
# Make the torcx artifacts available to test implementation
__prepare_torcx "${arch}" "${vernum}" "${work_dir}"
local tap_merged_summary="results-${image}.tap"
local tap_merged_detailed="results-${image}-detailed.tap"
local retry=""
local success=false
local print_give_up=true
local failed_tests=()
# A job on each worker prunes old mantle images (docker image prune)
echo "docker rm -f '${container_name}'" >> ./ci-cleanup.sh
# Vendor tests may need to know if it is a first run or a rerun
touch "${work_dir}/first_run"
for retry in $(seq "${retries}"); do
local tapfile="results-run-${retry}.tap"
local failfile="failed-run-${retry}.txt"
# Ignore retcode since tests are flaky. We'll re-run failed tests and
# determine success based on test results (tapfile).
set +e
touch sdk_container/.env
docker run --pull always --rm --name="${container_name}" --privileged --net host -v /dev:/dev \
-w /work -v "$PWD":/work "${mantle_ref}" \
bash -c "set -o noglob && source sdk_container/.env && ci-automation/vendor-testing/${image}.sh \
\"${work_dir}\" \
\"${tests_dir}\" \
\"${arch}\" \
\"${vernum}\" \
\"${tapfile}\" \
$*"
set -e
rm -f "${work_dir}/first_run"
docker run --pull always --rm --name="${container_name}" --privileged --net host -v /dev:/dev \
-w /work -v "$PWD":/work "${mantle_ref}" \
ci-automation/test_update_reruns.sh \
"${arch}" "${vernum}" "${image}" "${retry}" \
"${tests_dir}/${tapfile}" \
"${tests_dir}/${failfile}" \
"${tap_merged_summary}" \
"${tap_merged_detailed}"
readarray -t failed_tests <"${tests_dir}/${failfile}"
if [ "${#failed_tests[@]}" -eq 0 ] ; then
echo "########### All tests succeeded. ###########"
success=true
print_give_up=false
break
fi
if retest_cycle_broken; then
echo "########### Test cycle requested to break ###########"
echo "Failed tests:"
printf '%s\n' "${failed_tests[@]}"
echo "-----------"
print_give_up=false
break
fi
echo "########### Some tests failed and will be re-run (${retry} / ${retries}). ###########"
echo "Failed tests:"
printf '%s\n' "${failed_tests[@]}"
echo "-----------"
set -- "${failed_tests[@]}"
done
if ${print_give_up}; then
echo "########### All re-runs exhausted ($retries). Giving up. ###########"
fi
if [ ${skip_copy_to_bincache} -eq 0 ];then
# publish kola output, TAP files to build cache
copy_to_buildcache "testing/${vernum}/${arch}/${image}" \
"${tests_dir}/_kola_temp"
copy_to_buildcache "testing/${vernum}/${arch}/${image}" \
"${tests_dir}/"*.tap
copy_to_buildcache "testing/${vernum}/${arch}/${image}" \
"${tap_merged_summary}"
copy_to_buildcache "testing/${vernum}/${arch}/${image}" \
"${tap_merged_detailed}"
fi
if ! $success; then
return 1
fi
}
# --

View File

@ -0,0 +1,26 @@
#!/bin/bash
#
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Helper script for test.sh to update the test failures text file.
# test.sh uses this to determine which tests need to re-run.
# This script is run within the SDK container.
set -euo pipefail
arch="$1"
vernum="$2"
image="$3"
retry="$4"
tapfile="$5"
failfile="$6"
merged_summary="$7"
merged_detailed="$8"
source ci-automation/tapfile_helper_lib.sh
tap_ingest_tapfile "${tapfile}" "${image}" "${retry}"
tap_failed_tests_for_vendor "${image}" > "${failfile}"
tap_generate_report "${arch}" "${vernum}" > "${merged_summary}"
tap_generate_report "${arch}" "${vernum}" "true" > "${merged_detailed}"

View File

@ -0,0 +1,4 @@
FROM alpine
RUN apk add pigz
ENTRYPOINT [ "/usr/bin/pigz" ]
CMD [ "-h" ]

View File

@ -0,0 +1,73 @@
#!/bin/bash
#
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Helper for fetching a CI stage container image.
set -euo pipefail
function fetch_image_usage() {
local version="$1"
echo "Usage: fetch_image [-a <arch>] [-v <version>] <stage>."
echo "Fetch and docker load a container image of a CI build stage."
echo " <stage> - CI build stage to fetch:"
echo " sdk - fetch & install the plain SDK docker image. Note that this only works for the"
echo " 'main' branch since maintenance branches don't build an SDK."
echo " packages - fetch the packages (SDK + binary packages) container image."
echo " image - fetch the images (SDK + packages + image) container image."
echo " -v <version> - Custom version to fetch instead of branch version '${version}'"
echo " -a <arch> - OS image target architecture - 'arm64' or 'amd64'. Defaults to 'amd64'."
}
# --
function fetch_image() {
local stage
local arch="amd64"
local version="${3:-}"
local script_root="$(dirname "${BASH_SOURCE[0]}")/../.."
source "${script_root}/ci-automation/ci_automation_common.sh"
local vernum="$(source "${script_root}/sdk_container/.repo/manifests/version.txt";
echo "${FLATCAR_VERSION}")"
local docker_vernum="$(vernum_to_docker_image_version "${vernum}")"
while [ 0 -lt $# ] ; do
case "$1" in
-h) usage; exit 0;;
-v) docker_vernum="$2"; shift; shift;;
-a) arch="$2"; shift; shift;;
*) if [ -n "${stage:-}" ] ; then
echo "ERROR: Spurious positional argument(s): '$@'"
fetch_image_usage "${vernum}"
exit 1
fi
stage="$1"
shift;;
esac
done
local image
case "${stage}" in
sdk) image="flatcar-sdk-${arch}";;
packages) image="flatcar-packages-${arch}";;
image) image="flatcar-images-${arch}";;
*) echo "ERROR: unknown build stage '$1'"
fetch_image_usage "${docker_vernum}"
exit 1;;
esac
echo "Fetching '${image}:${docker_vernum}'. Depending on your connection this may take a while."
docker_image_from_buildcache "${image}" "${docker_vernum}"
echo "Done! Use"
echo " ./run_sdk_container -t -C ${image}:${docker_vernum}"
echo "to start."
}
# --
if [ "$(basename "$0")" = "fetch_image.sh" ] ; then
fetch_image $@
fi

View File

@ -0,0 +1,82 @@
#!/bin/bash
# Copyright (c) 2022 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
set -euo pipefail
# Test execution script for the AWS vendor image.
# This script is supposed to run in the mantle container.
source ci-automation/vendor_test.sh
board="${CIA_ARCH}-usr"
escaped_vernum="${CIA_VERNUM//+/-}"
image_name="ci-${escaped_vernum}-${CIA_ARCH}"
aws_instance_type_var="AWS_${CIA_ARCH}_INSTANCE_TYPE"
aws_instance_type="${!aws_instance_type_var}"
more_aws_instance_types_var="AWS_${CIA_ARCH}_MORE_INSTANCE_TYPES"
set -o noglob # there shouldn't be any instance types with asterisks
# in it, but…
more_aws_instance_types=( ${!more_aws_instance_types_var} )
set +o noglob
vmdk='flatcar_production_ami_vmdk_image.vmdk'
tarball="${vmdk}.bz2"
if [[ "${AWS_AMI_ID}" == "" ]]; then
if [[ -f "${vmdk}" ]]; then
echo "++++ ${CIA_TESTSCRIPT}: using existing ${vmdk} for ${CIA_VERNUM} (${CIA_ARCH}) ++++"
else
echo "++++ ${CIA_TESTSCRIPT}: downloading ${tarball} for ${CIA_VERNUM} (${CIA_ARCH}) ++++"
copy_from_buildcache "images/${CIA_ARCH}/${CIA_VERNUM}/${tarball}" .
lbunzip2 "${tarball}"
fi
aws_bucket="flatcar-kola-ami-import-${AWS_REGION}"
aws_s3_path="s3://${aws_bucket}/${escaped_vernum}/${board}/"
trap 'ore -d aws delete --region="${AWS_REGION}" --board="${board}" --name="${image_name}" --ami-name="${image_name}" --file="${vmdk}" --bucket "${aws_s3_path}"' EXIT
ore aws initialize --region="${AWS_REGION}" --bucket "${aws_bucket}"
AWS_AMI_ID=$(ore aws upload --force --region="${AWS_REGION}" --board="${board}" --name="${image_name}" --ami-name="${image_name}" --ami-description="Flatcar Test ${image_name}" --file="${vmdk}" --bucket "${aws_s3_path}" | jq -r .HVM)
echo "++++ ${CIA_TESTSCRIPT}: created new AMI ${AWS_AMI_ID} (will be removed after testing) ++++"
fi
run_kola_tests() {
local instance_type="${1}"; shift
local instance_tapfile="${1}"; shift
timeout --signal=SIGQUIT 6h \
kola run \
--board="${board}" \
--basename="${image_name}" \
--channel="${CIA_CHANNEL}" \
--offering='basic' \
--parallel="${AWS_PARALLEL}" \
--platform=aws \
--aws-ami="${AWS_AMI_ID}" \
--aws-region="${AWS_REGION}" \
--aws-type="${instance_type}" \
--aws-iam-profile="${AWS_IAM_PROFILE}" \
--tapfile="${instance_tapfile}" \
--torcx-manifest="${CIA_TORCX_MANIFEST}" \
"${@}"
}
query_kola_tests() {
shift; # ignore the instance type
kola list --platform=aws --filter "${@}"
}
# these are set in ci-config.env
export AWS_ACCESS_KEY_ID
export AWS_SECRET_ACCESS_KEY
run_kola_tests_on_instances \
"${aws_instance_type}" \
"${CIA_TAPFILE}" \
"${CIA_FIRST_RUN}" \
"${more_aws_instance_types[@]}" \
'--' \
'cl.internet' \
'--' \
"${@}"

View File

@ -0,0 +1,90 @@
#!/bin/bash
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
set -euo pipefail
# Test execution script for the azure vendor image.
# This script is supposed to run in the mantle container.
source ci-automation/vendor_test.sh
# $@ now contains tests / test patterns to run
board="${CIA_ARCH}-usr"
basename="ci-${CIA_VERNUM//+/-}-${CIA_ARCH}"
azure_instance_type_var="AZURE_${CIA_ARCH}_MACHINE_SIZE"
azure_instance_type="${!azure_instance_type_var}"
azure_vnet_subnet_name="jenkins-vnet-${AZURE_LOCATION}"
azure_profile_config_file=''
secret_to_file azure_profile_config_file "${AZURE_PROFILE}"
azure_auth_config_file=''
secret_to_file azure_auth_config_file "${AZURE_AUTH_CREDENTIALS}"
# Fetch the Azure image if not present
if [ -f "${AZURE_IMAGE_NAME}" ] ; then
echo "++++ ${CIA_TESTSCRIPT}: Using existing ${AZURE_IMAGE_NAME} for testing ${CIA_VERNUM} (${CIA_ARCH}) ++++"
else
echo "++++ ${CIA_TESTSCRIPT}: downloading ${AZURE_IMAGE_NAME} for ${CIA_VERNUM} (${CIA_ARCH}) ++++"
copy_from_buildcache "images/${CIA_ARCH}/${CIA_VERNUM}/${AZURE_IMAGE_NAME}.bz2" .
cp --sparse=always <(lbzcat "${AZURE_IMAGE_NAME}.bz2") "${AZURE_IMAGE_NAME}"
rm "${AZURE_IMAGE_NAME}.bz2"
fi
if [[ "${CIA_ARCH}" == "arm64" ]]; then
AZURE_USE_GALLERY="--azure-use-gallery"
fi
run_kola_tests() {
local instance_type="${1}"; shift
local instance_tapfile="${1}"; shift
local hyperv_gen="V2"
if [ "${instance_type}" = "V1" ]; then
hyperv_gen="V1"
instance_type="${azure_instance_type}"
fi
# Align timeout with ore azure gc --duration parameter
timeout --signal=SIGQUIT 6h \
kola run \
--board="${board}" \
--basename="${basename}" \
--parallel="${AZURE_PARALLEL}" \
--offering=basic \
--platform=azure \
--azure-image-file="${AZURE_IMAGE_NAME}" \
--azure-location="${AZURE_LOCATION}" \
--azure-profile="${azure_profile_config_file}" \
--azure-auth="${azure_auth_config_file}" \
--torcx-manifest="${CIA_TORCX_MANIFEST}" \
--tapfile="${instance_tapfile}" \
--azure-size="${instance_type}" \
--azure-hyper-v-generation="${hyperv_gen}" \
${AZURE_USE_GALLERY} \
${azure_vnet_subnet_name:+--azure-vnet-subnet-name=${azure_vnet_subnet_name}} \
${AZURE_USE_PRIVATE_IPS:+--azure-use-private-ips=${AZURE_USE_PRIVATE_IPS}} \
"${@}"
}
query_kola_tests() {
shift; # ignore the instance type
kola list --platform=azure --filter "${@}"
}
other_instance_types=()
if [[ "${CIA_ARCH}" = 'amd64' ]]; then
other_instance_types+=('V1')
fi
run_kola_tests_on_instances \
"${azure_instance_type}" \
"${CIA_TAPFILE}" \
"${CIA_FIRST_RUN}" \
"${other_instance_types[@]}" \
'--' \
'cl.internet' \
'--' \
"${@}"

View File

@ -0,0 +1,57 @@
#!/bin/bash
# Copyright (c) 2022 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
set -euo pipefail
# Test execution script for the Digital Ocean vendor image.
# This script is supposed to run in the mantle container.
source ci-automation/vendor_test.sh
# We never ran Digital Ocean on arm64, so for now fail it as an
# unsupported option.
if [[ "${CIA_ARCH}" == "arm64" ]]; then
echo "1..1" > "${CIA_TAPFILE}"
echo "not ok - all Digital Ocean tests" >> "${CIA_TAPFILE}"
echo " ---" >> "${CIA_TAPFILE}"
echo " ERROR: ARM64 tests not supported on Digital Ocean." | tee -a "${CIA_TAPFILE}"
echo " ..." >> "${CIA_TAPFILE}"
break_retest_cycle
exit 1
fi
image_name="ci-${CIA_VERNUM//+/-}"
image_url="$(url_from_template "${DIGITALOCEAN_IMAGE_URL_TEMPLATE}" "${CIA_ARCH}" "${CIA_CHANNEL}" 'https' "${CIA_VERNUM}")"
config_file=''
secret_to_file config_file "${DIGITALOCEAN_TOKEN_JSON}"
ore do create-image \
--config-file="${config_file}" \
--region="${DIGITALOCEAN_REGION}" \
--name="${image_name}" \
--url="${image_url}"
trap 'ore do delete-image \
--name="${image_name}" \
--config-file="${config_file}"' EXIT
set -x
timeout --signal=SIGQUIT 4h\
kola run \
--do-size="${DIGITALOCEAN_MACHINE_SIZE}" \
--do-region="${DIGITALOCEAN_REGION}" \
--basename="${image_name}" \
--do-config-file="${config_file}" \
--do-image="${image_name}" \
--parallel="${DIGITALOCEAN_PARALLEL}" \
--platform=do \
--channel="${CIA_CHANNEL}" \
--tapfile="${CIA_TAPFILE}" \
--torcx-manifest="${CIA_TORCX_MANIFEST}" \
"${@}"
set +x

View File

@ -0,0 +1,65 @@
#!/bin/bash
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
set -euo pipefail
# Test execution script for the Equinix Metal vendor image.
# This script is supposed to run in the mantle container.
# This script requires the PXE images to be built.
source ci-automation/vendor_test.sh
# Equinix Metal ARM server are not yet hourly available in the default `SV` metro
equinixmetal_metro_var="EQUINIXMETAL_${CIA_ARCH}_METRO"
equinixmetal_metro="${!equinixmetal_metro_var}"
EQUINIXMETAL_INSTANCE_TYPE_VAR="EQUINIXMETAL_${CIA_ARCH}_INSTANCE_TYPE"
EQUINIXMETAL_INSTANCE_TYPE="${!EQUINIXMETAL_INSTANCE_TYPE_VAR}"
MORE_INSTANCE_TYPES_VAR="EQUINIXMETAL_${CIA_ARCH}_MORE_INSTANCE_TYPES"
MORE_INSTANCE_TYPES=( ${!MORE_INSTANCE_TYPES_VAR} )
# The maximum is 6h coming from the ore GC duration parameter
timeout=6h
BASE_URL="http://${BUILDCACHE_SERVER}/images/${CIA_ARCH}/${CIA_VERNUM}"
run_kola_tests() {
local instance_type="${1}"; shift
local instance_tapfile="${1}"; shift
timeout --signal=SIGQUIT "${timeout}" \
kola run \
--board="${CIA_ARCH}-usr" \
--basename="ci-${CIA_VERNUM/+/-}-${CIA_ARCH}" \
--platform=equinixmetal \
--tapfile="${instance_tapfile}" \
--parallel="${EQUINIXMETAL_PARALLEL}" \
--torcx-manifest="${CIA_TORCX_MANIFEST}" \
--equinixmetal-image-url="${BASE_URL}/${EQUINIXMETAL_IMAGE_NAME}" \
--equinixmetal-installer-image-kernel-url="${BASE_URL}/${PXE_KERNEL_NAME}" \
--equinixmetal-installer-image-cpio-url="${BASE_URL}/${PXE_IMAGE_NAME}" \
--equinixmetal-metro="${equinixmetal_metro}" \
--equinixmetal-plan="${instance_type}" \
--equinixmetal-project="${EQUINIXMETAL_PROJECT}" \
--equinixmetal-storage-url="${EQUINIXMETAL_STORAGE_URL}" \
--gce-json-key=<(set +x; echo "${GCP_JSON_KEY}" | base64 --decode) \
--equinixmetal-api-key="${EQUINIXMETAL_KEY}" \
"${@}"
}
query_kola_tests() {
shift; # ignore the instance type
kola list --platform=equinixmetal --filter "${@}"
}
run_kola_tests_on_instances \
"${EQUINIXMETAL_INSTANCE_TYPE}" \
"${CIA_TAPFILE}" \
"${CIA_FIRST_RUN}" \
"${MORE_INSTANCE_TYPES[@]}" \
'--' \
'cl.internet' \
'--' \
"${@}"

View File

@ -0,0 +1,82 @@
#!/bin/bash
# Copyright (c) 2022 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
set -euo pipefail
# Test execution script for the GCE vendor image.
# This script is supposed to run in the mantle container.
source ci-automation/vendor_test.sh
# We never run GCE on arm64, so for now fail it as an
# unsupported option.
if [[ "${CIA_ARCH}" == "arm64" ]]; then
echo "1..1" > "${CIA_TAPFILE}"
echo "not ok - all GCE tests" >> "${CIA_TAPFILE}"
echo " ---" >> "${CIA_TAPFILE}"
echo " ERROR: ARM64 tests not supported on GCE." | tee -a "${CIA_TAPFILE}"
echo " ..." >> "${CIA_TAPFILE}"
break_retest_cycle
exit 1
fi
GCP_JSON_KEY_PATH=''
secret_to_file GCP_JSON_KEY_PATH "${GCP_JSON_KEY}"
copy_from_buildcache "images/${CIA_ARCH}/${CIA_VERNUM}/${GCE_IMAGE_NAME}" .
gcloud auth activate-service-account --key-file "${GCP_JSON_KEY_PATH}"
gsutil rm -r "${GCE_GCS_IMAGE_UPLOAD}/${CIA_ARCH}-usr/${CIA_VERNUM}" || true
gsutil cp "${GCE_IMAGE_NAME}" "${GCE_GCS_IMAGE_UPLOAD}/${CIA_ARCH}-usr/${CIA_VERNUM}/${GCE_IMAGE_NAME}"
family="ci"
image_name="${family}-${CIA_VERNUM//[+.]/-}"
ore gcloud delete-images --json-key="${GCP_JSON_KEY_PATH}" "${image_name}" || true
ore gcloud create-image \
--board="${CIA_ARCH}-usr" \
--family="${family}" \
--json-key="${GCP_JSON_KEY_PATH}" \
--source-root="${GCE_GCS_IMAGE_UPLOAD}" \
--source-name="${GCE_IMAGE_NAME}" \
--version="${CIA_VERNUM}"
trap 'ore gcloud delete-images \
--json-key="${GCP_JSON_KEY_PATH}" \
"${image_name}" ; gsutil rm -r "${GCE_GCS_IMAGE_UPLOAD}/${CIA_ARCH}-usr/${CIA_VERNUM}" || true' EXIT
run_kola_tests() {
local instance_type="${1}"; shift
local instance_tapfile="${1}"; shift
local extra_arg=()
if [ "${instance_type}" = "gvnic" ]; then
extra_arg+=("--gce-gvnic")
fi
timeout --signal=SIGQUIT 6h \
kola run \
--basename="${image_name}" \
--gce-image="${image_name}" \
--gce-json-key="${GCP_JSON_KEY_PATH}" \
--gce-machinetype="${GCE_MACHINE_TYPE}" \
"${extra_arg[@]}" \
--parallel="${GCE_PARALLEL}" \
--platform=gce \
--channel="${CIA_CHANNEL}" \
--tapfile="${instance_tapfile}" \
--torcx-manifest="${CIA_TORCX_MANIFEST}" \
"${@}"
}
query_kola_tests() {
shift; # ignore the instance type
kola list --platform=gce --filter "${@}"
}
run_kola_tests_on_instances \
"default" \
"${CIA_TAPFILE}" \
"${CIA_FIRST_RUN}" \
"gvnic" \
'--' \
'cl.internet' \
'--' \
"${@}"

View File

@ -0,0 +1,58 @@
#!/bin/bash
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
set -euo pipefail
# Test execution script for the qemu vendor image.
# This script is supposed to run in the mantle container.
source ci-automation/vendor_test.sh
# ARM64 qemu tests only supported on UEFI
if [ "${CIA_ARCH}" = "arm64" ] && [ "${CIA_TESTSCRIPT}" != "qemu_uefi.sh" ] ; then
echo "1..1" > "${CIA_TAPFILE}"
echo "not ok - all qemu tests" >> "${CIA_TAPFILE}"
echo " ---" >> "${CIA_TAPFILE}"
echo " ERROR: ARM64 tests only supported on qemu_uefi." | tee -a "${CIA_TAPFILE}"
echo " ..." >> "${CIA_TAPFILE}"
break_retest_cycle
exit 1
fi
# Fetch image and BIOS if not present
if [ -f "${QEMU_IMAGE_NAME}" ] ; then
echo "++++ ${CIA_TESTSCRIPT}: Using existing ${QEMU_IMAGE_NAME} for testing ${CIA_VERNUM} (${CIA_ARCH}) ++++"
else
echo "++++ ${CIA_TESTSCRIPT}: downloading ${QEMU_IMAGE_NAME} for ${CIA_VERNUM} (${CIA_ARCH}) ++++"
rm -f "${QEMU_IMAGE_NAME}.bz2"
copy_from_buildcache "images/${CIA_ARCH}/${CIA_VERNUM}/${QEMU_IMAGE_NAME}.bz2" .
lbunzip2 "${QEMU_IMAGE_NAME}.bz2"
fi
bios="${QEMU_BIOS}"
if [ "${CIA_TESTSCRIPT}" = "qemu_uefi.sh" ] ; then
bios="${QEMU_UEFI_BIOS}"
if [ -f "${bios}" ] ; then
echo "++++ ${CIA_TESTSCRIPT}: Using existing ${bios} ++++"
else
echo "++++ ${CIA_TESTSCRIPT}: downloading ${bios} for ${CIA_VERNUM} (${CIA_ARCH}) ++++"
copy_from_buildcache "images/${CIA_ARCH}/${CIA_VERNUM}/${bios}" .
fi
fi
set -x
kola run \
--board="${CIA_ARCH}-usr" \
--parallel="${QEMU_PARALLEL}" \
--platform=qemu \
--qemu-bios="${bios}" \
--qemu-image="${QEMU_IMAGE_NAME}" \
--tapfile="${CIA_TAPFILE}" \
--torcx-manifest="${CIA_TORCX_MANIFEST}" \
--qemu-skip-mangle \
"${@}"
set +x

View File

@ -0,0 +1 @@
qemu.sh

View File

@ -0,0 +1,64 @@
#!/bin/bash
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
set -euo pipefail
# Test execution script for the update payload using the previous
# release as starting point, and doing a second update from the current
# build to itself again.
# This script is supposed to run in the mantle container.
source ci-automation/vendor_test.sh
if [ "$*" != "" ] && [ "$*" != "*" ] && [ "$*" != "cl.update.payload" ]; then
echo "1..1" > "${CIA_TAPFILE}"
echo "not ok - all qemu update tests" >> "${CIA_TAPFILE}"
echo " ---" >> "${CIA_TAPFILE}"
echo " ERROR: Only cl.update.payload is supported, got '$*'." | tee -a "${CIA_TAPFILE}"
echo " ..." >> "${CIA_TAPFILE}"
break_retest_cycle
exit 1
fi
mkdir -p tmp/
if [ -f tmp/flatcar_test_update.gz ] ; then
echo "++++ ${CIA_TESTSCRIPT}: Using existing ./tmp/flatcar_test_update.gz for testing ${CIA_VERNUM} (${CIA_ARCH}) ++++"
else
echo "++++ ${CIA_TESTSCRIPT}: downloading flatcar_test_update.gz for ${CIA_VERNUM} (${CIA_ARCH}) ++++"
copy_from_buildcache "images/${CIA_ARCH}/${CIA_VERNUM}/flatcar_test_update.gz" tmp/
fi
if [ -f tmp/flatcar_production_image_previous.bin ] ; then
echo "++++ ${CIA_TESTSCRIPT}: Using existing ./tmp/flatcar_production_image_previous.bin for testing update to ${CIA_VERNUM} (${CIA_ARCH}) from previous ${CIA_CHANNEL} ++++"
else
echo "++++ ${CIA_TESTSCRIPT}: downloading flatcar_production_image_previous.bin from previous ${CIA_CHANNEL} ++++"
rm -f tmp/flatcar_production_image_previous.bin.bz2
curl -fsSLO --retry-delay 1 --retry 60 --retry-connrefused --retry-max-time 60 --connect-timeout 20 "https://${CIA_CHANNEL}.release.flatcar-linux.net/${CIA_ARCH}-usr/current/flatcar_production_image.bin.bz2"
mv flatcar_production_image.bin.bz2 tmp/flatcar_production_image_previous.bin.bz2
lbunzip2 -k -f tmp/flatcar_production_image_previous.bin.bz2
fi
bios="${QEMU_BIOS}"
if [ "${CIA_ARCH}" = "arm64" ]; then
bios="${QEMU_UEFI_BIOS}"
if [ -f "${bios}" ] ; then
echo "++++ qemu_update.sh: Using existing ./${bios} ++++"
else
echo "++++ qemu_update.sh: downloading ${bios} for ${CIA_VERNUM} (${CIA_ARCH}) ++++"
copy_from_buildcache "images/${CIA_ARCH}/${CIA_VERNUM}/${bios}" .
fi
fi
kola run \
--board="${CIA_ARCH}-usr" \
--parallel="${QEMU_PARALLEL}" \
--platform=qemu \
--qemu-bios="${bios}" \
--qemu-image=tmp/flatcar_production_image_previous.bin \
--tapfile="${CIA_TAPFILE}" \
--torcx-manifest="${CIA_TORCX_MANIFEST}" \
--update-payload=tmp/flatcar_test_update.gz \
--qemu-skip-mangle \
cl.update.payload

View File

@ -0,0 +1,68 @@
#!/bin/bash
# Copyright (c) 2022 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
set -euo pipefail
# Test execution script for the VMware ESX vendor image.
# This script is supposed to run in the mantle container.
source ci-automation/vendor_test.sh
# We never ran VMware ESX on arm64, so for now fail it as an
# unsupported option.
if [[ "${CIA_ARCH}" == "arm64" ]]; then
echo "1..1" > "${CIA_TAPFILE}"
echo "not ok - all qemu tests" >> "${CIA_TAPFILE}"
echo " ---" >> "${CIA_TAPFILE}"
echo " ERROR: ARM64 tests not supported on VMware ESX." | tee -a "${CIA_TAPFILE}"
echo " ..." >> "${CIA_TAPFILE}"
break_retest_cycle
exit 1
fi
# Fetch image if not present.
if [ -f "${VMWARE_ESX_IMAGE_NAME}" ] ; then
echo "++++ ${CIA_TESTSCRIPT}: Using existing ${work_dir}/${VMWARE_ESX_IMAGE_NAME} for testing ${CIA_VERNUM} (${CIA_ARCH}) ++++"
else
echo "++++ ${CIA_TESTSCRIPT}: downloading ${VMWARE_ESX_IMAGE_NAME} for ${CIA_VERNUM} (${CIA_ARCH}) ++++"
copy_from_buildcache "images/${CIA_ARCH}/${CIA_VERNUM}/${VMWARE_ESX_IMAGE_NAME}" .
fi
config_file=''
secret_to_file config_file "${VMWARE_ESX_CREDS}"
# If we are using static IPs, then delete every VM that is running
# because we'll use all available spots. This is to avoid entering a
# broken state if there are some left-over VMs from manual usage or a
# forcefully terminated job.
#
# The assumption here is that we can do it without any interference
# with other CI Vms, because we have acquired a resource lock to those
# VMs.
static_ips="$(jq '.["default"]["static_ips"]' "${config_file}")"
if [[ "${static_ips}" -ne 0 ]]; then
ore esx --esx-config-file "${config_file}" remove-vms || :
fi
kola_test_basename="ci-${CIA_VERNUM//+/-}"
trap 'ore esx --esx-config-file "${config_file}" remove-vms \
--pattern "${kola_test_basename}*" || :' EXIT
set -x
sudo timeout --signal=SIGQUIT 2h kola run \
--board="${CIA_ARCH}-usr" \
--basename="${kola_test_basename}" \
--channel="${CIA_CHANNEL}" \
--platform=esx \
--tapfile="${CIA_TAPFILE}" \
--parallel="${VMWARE_ESX_PARALLEL}" \
--torcx-manifest="${CIA_TORCX_MANIFEST}" \
--esx-config-file "${config_file}" \
--esx-ova-path "${VMWARE_ESX_IMAGE_NAME}" \
"${@}"
set +x

View File

@ -0,0 +1,391 @@
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Vendor test helper script. Sourced by vendor tests. Does some
# initial setup.
#
#
# The initial setup consist of creating the vendor working directory
# for the vendor test script, specifying the variables described below
# and changing the current working directory to the vendor working
# directory.
#
#
# The vendor test script is expected to keep all artifacts it produces
# in its current working directory.
#
#
# The script specifies the following variables for the vendor test
# script to use:
#
# CIA_VERNUM:
# Image version. In case of developer builds it comes with a suffix,
# so it looks like "3217.0.0+nightly-20220422-0155". For release
# builds the version will be without suffix, so it looks like
# "3217.0.0". Whether the build is a release or a developer one is
# reflected in CIA_BUILD_TYPE variable described below.
#
# CIA_ARCH:
# Architecture to test. Currently it is either "amd64" or "arm64".
#
# CIA_TAPFILE:
# Where the TAP reports should be written. Usually just passed to
# kola throught the --tapfile parameter.
#
# CIA_CHANNEL:
# A channel. Either "alpha", "beta", "stable" or "lts". Used to find
# the last release for the update check.
#
# CIA_TESTSCRIPT:
# Name of the vendor script. May be useful in some messages.
#
# CIA_GIT_VERSION:
# The most recent tag for the current commit.
#
# CIA_BUILD_TYPE:
# It's either "release" or "developer", based on the CIA_VERNUM
# variable.
#
# CIA_TORCX_MANIFEST:
# Path to the Torcx manifest. Usually passed to kola through the
# --torcx-manifest parameter.
#
# CIA_FIRST_RUN:
# 1 if this is a first run, 0 if it is a rerun of failed tests.
#
#
# After this script is sourced, the parameters in ${@} specify test
# cases / test case patterns to run.
# "ciavts" stands for Continuous Integration Automation Vendor Test
# Setup. This prefix is used to easily unset all the variables with
# this prefix before leaving this file.
ciavts_main_work_dir="${1}"; shift
ciavts_work_dir="${1}"; shift
ciavts_arch="${1}"; shift
ciavts_vernum="${1}"; shift
ciavts_tapfile="${1}"; shift
# $@ now contains tests / test patterns to run
source ci-automation/ci_automation_common.sh
mkdir -p "${ciavts_work_dir}"
ciavts_testscript=$(basename "${0}")
ciavts_git_version=$(cat "${ciavts_main_work_dir}/git_version")
ciavts_channel=$(cat "${ciavts_main_work_dir}/git_channel")
if [[ "${ciavts_channel}" = 'developer' ]]; then
ciavts_channel='alpha'
fi
# If vernum is like 3200.0.0+whatever, it's a developer build,
# otherwise it's a release build.
ciavts_type='developer'
if [[ "${ciavts_vernum%%+*}" = "${ciavts_vernum}" ]]; then
ciavts_type='release'
fi
# Make these paths absolute to avoid problems when changing
# directories.
ciavts_tapfile="${PWD}/${ciavts_work_dir}/${ciavts_tapfile}"
ciavts_torcx_manifest="${PWD}/${ciavts_main_work_dir}/torcx_manifest.json"
ciavts_first_run=0
if [[ -f "${ciavts_main_work_dir}/first_run" ]]; then
ciavts_first_run=1
fi
echo "++++ Running ${ciavts_testscript} inside ${ciavts_work_dir} ++++"
cd "${ciavts_work_dir}"
CIA_VERNUM="${ciavts_vernum}"
CIA_ARCH="${ciavts_arch}"
CIA_TAPFILE="${ciavts_tapfile}"
CIA_CHANNEL="${ciavts_channel}"
CIA_TESTSCRIPT="${ciavts_testscript}"
CIA_GIT_VERSION="${ciavts_git_version}"
CIA_BUILD_TYPE="${ciavts_type}"
CIA_TORCX_MANIFEST="${ciavts_torcx_manifest}"
CIA_FIRST_RUN="${ciavts_first_run}"
# Unset all variables with ciavts_ prefix now.
unset -v "${!ciavts_@}"
# Prefixes all test names in the tap file with a given prefix, so the
# test name like "cl.basic" will become "extra-test.[${prefix}].cl.basic".
#
# Typical use:
# prefix_tap_file "${instance_type}" "${tapfile}"
#
# Parameters:
# 1 - prefix
# 2 - tap file, modified in place
function prefix_tap_file() {
local prefix="${1}"; shift
local tap_file="${1}"; shift
# drop the dots from prefix
local actual_prefix="extra-test.[${prefix}]."
sed --in-place --expression 's/^\(\s*\(not\)\?\s*ok[^-]*\s*-\s*\)\(\S\)/\1'"${actual_prefix}"'\3/g' "${tap_file}"
}
# Filters the test names, so it puts only the real names of the
# prefixed tests into the chosen variable. For example for prefix
# "foo", it will ignore the test name like "cl.basic", but will print
# "cl.internet" for a test name like "extra-test.[foo].cl.internet".
# "*" is treated specially - it will be inserted into the chosen
# variable if it is passed.
#
# Typical use:
# filter_prefixed_tests tests_to_run "${instance_type}" "${@}"
# if [[ "${#tests_to_run[@]}" -gt 0 ]]; then …; fi
#
# Parameters:
# 1 - name of an array variable where the filtering results will be stored
# 2 - prefix
# @ - test names
function filter_prefixed_tests() {
local var_name="${1}"; shift
local prefix="${1}"; shift
# rest of the parameters are test names
local -n results="${var_name}"
local name
local stripped_name
# clear the array, so it will contain results of current filtering
# only
results=()
for name; do
stripped_name="${name#extra-test.\[${prefix}\].}"
if [[ "${stripped_name}" != "${name}" ]]; then
results+=( "${stripped_name}" )
continue
elif [[ "${name}" = '*' ]]; then
results+=( '*' )
fi
done
}
# Filters out the extra tests from the passed test names. Ignored test
# names begin with "extra-test.". The results of the filtering are
# inserted into the chosen variable.
#
# Typical use:
# filter_out_prefixed_tests tests_to_run "${@}"
# if [[ "${#tests_to_run[@]}" -gt 0 ]]; then …; fi
#
# Parameters:
# 1 - name of an array variable where the filtering results will be stored
# @ - test names
function filter_out_prefixed_tests() {
local var_name="${1}"; shift
local -n results="${var_name}"
local name
# clear the array, so it will contain results of current filtering
# only
results=()
for name; do
if [[ "${name#extra-test.}" = "${name}" ]]; then
results+=( "${name}" )
fi
done
}
# Merges into the first (main) tap file the contents of other tap
# files. It is very simple - the function assumes that all the tap
# files begin with a line like:
#
# 1..${number_of_tests}
#
# Other lines that are processed should begin like:
#
# (not)? ok - ${test_name}
#
# Any other lines are copied verbatim.
#
# The other tap files should already be preprocessed by
# prefix_tap_file to avoid duplicated test names.
#
# Typical use:
# merge_tap_files "${tap_file}" extra-validation-*.tap
# rm -f extra-validation-*.tap
#
# Parameters:
# 1 - main tap file
# @ - other tap files
function merge_tap_files() {
local main_tap_file="${1}"; shift
# rest of the parameters are other tap files
local main_test_count=0
if [[ -f "${main_tap_file}" ]]; then
main_test_count=$(head --lines=1 "${main_tap_file}" | grep --only-matching '[0-9]\+$')
fi
local other_test_count
local other_tap_file
local tmp_tap_file="${main_tap_file}.mtf.tmp"
for other_tap_file; do
if [[ ! -f "${other_tap_file}" ]]; then
continue
fi
other_test_count=$(head --lines=1 "${other_tap_file}" | grep --only-matching '[0-9]\+$' || echo 0 )
((main_test_count+=other_test_count))
done
echo "1..${main_test_count}" >"${tmp_tap_file}"
if [[ -f "${main_tap_file}" ]]; then
tail --lines=+2 "${main_tap_file}" >>"${tmp_tap_file}"
fi
for other_tap_file; do
if [[ ! -f "${other_tap_file}" ]]; then
continue
fi
tail --lines=+2 "${other_tap_file}" >>"${tmp_tap_file}"
done
mv --force "${tmp_tap_file}" "${main_tap_file}"
}
# Runs or reruns the tests on the main instance and other
# instances. Other instances usually run a subset of tests only.
#
# For this function to work, the caller needs to define two functions
# beforehand:
#
# run_kola_tests that takes the following parameters:
# 1 - instance type
# 2 - tap file
# @ - tests to run
#
# query_kola_tests that takes the following parameters:
# 1 - instance type
# @ - tests to run
# This function should print the names of the tests to run. Every line
# of the output should have one test name to run. Any other cruft in
# the line will be ignored.
#
# Typical use:
# function run_kola_tests() {
# local instance_type="${1}"; shift
# local tap_file="${1}"; shift
# kola run … "${@}"
# }
#
# function query_kola_tests() {
# local instance_type="${1}"; shift
# kola list … "${@}"
# }
#
# args=(
# "${main_instance}"
# "${CIA_TAPFILE}"
# "${CIA_FIRST_RUN}"
# "${other_instance_types[@]}"
# '--'
# 'cl.internet'
# '--'
# "${tests_to_run[@]}"
# )
# run_kola_tests_on_instances "${args[@]}"
#
# Parameters:
# 1 - main instance type - there all the tests are being run
# 2 - main tap file
# 3 - if this is first run (1 if it is, 0 if it is a rerun)
# @ - other instance types followed by double dash (--) followed by
# test names for other instances to filter from the tests to be
# run followed by double dash, followed by tests to be run or
# rerun
function run_kola_tests_on_instances() {
local main_instance_type="${1}"; shift
local main_tapfile="${1}"; shift
local is_first_run="${1}"; shift
local other_instance_types=()
local other_tests=()
local arg
while [[ "${#}" -gt 0 ]]; do
arg="${1}"; shift
if [[ "${arg}" = '--' ]]; then
break
fi
other_instance_types+=( "${arg}" )
done
while [[ "${#}" -gt 0 ]]; do
arg="${1}"; shift
if [[ "${arg}" = '--' ]]; then
break
fi
other_tests+=( "${arg}" )
done
# rest of the parameters are tests to be run or rerun
local instance_type
local queried_tests
local instance_tests=()
local tests_on_instances_running=0
local other_tests_for_fgrep
other_tests_for_fgrep="$(printf '%s\n' "${other_tests[@]}")"
for instance_type in "${other_instance_types[@]}"; do
# On first run we usually pass the canonical test names like
# cl.basic, cl.internet or *, so we decide which tests should
# be run on the other instances based on this list. On the
# other hand, the rerun will contain names of the failed tests
# only, and those are specific - if a test failed on the main
# instance, the name of the test will be like cl.basic; if a
# test failed on other instance, the name of the test will be
# like extra-test.[…].cl.basic. So in case of reruns, we want
# to filter the extra tests first then we decide which tests
# should be run.
if [[ "${is_first_run}" -eq 1 ]]; then
set -o noglob # noglob should not be necessary, as
# query_kola_tests shouldn't return a
# wildcard, but better to be safe than sorry
queried_tests="$(query_kola_tests "${instance_type}" "${@}")"
instance_tests=( $(grep --only-matching --fixed-strings "${other_tests_for_fgrep}" <<<"${queried_tests}" || :) )
set +o noglob
else
filter_prefixed_tests instance_tests "${instance_type}" "${@}"
fi
if [[ "${#instance_tests[@]}" -gt 0 ]]; then
tests_on_instances_running=1
(
local instance_tapfile="instance_${instance_type}_validate.tap"
set +e
set -x
local output
output=$(run_kola_tests "${instance_type}" "${instance_tapfile}" "${instance_tests[@]}" 2>&1)
set +x
set -e
local escaped_instance_type
escaped_instance_type="$(sed -e 's/[\/&]/\\&/g' <<<"${instance_type}")"
printf "=== START ${instance_type} ===\n%s\n=== END ${instance_type} ===\n" "$(sed -e "s/^/${escaped_instance_type}: /g" <<<"${output}")"
prefix_tap_file "${instance_type}" "${instance_tapfile}"
) &
fi
done
local main_tests=()
filter_out_prefixed_tests main_tests "${@}"
if [[ "${#main_tests[@]}" -gt 0 ]]; then
# run in a subshell, so the set -x and set +e do not pollute
# the outer environment
(
set +e
set -x
run_kola_tests "${main_instance_type}" "${main_tapfile}" "${main_tests[@]}"
true
)
fi
if [[ "${tests_on_instances_running}" -eq 1 ]]; then
wait
merge_tap_files "${main_tapfile}" 'instance_'*'_validate.tap'
rm -f 'instance_'*'_validate.tap'
fi
}

139
ci-automation/vms.sh Normal file
View File

@ -0,0 +1,139 @@
#!/bin/bash
#
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# >>> This file is supposed to be SOURCED from the repository ROOT. <<<
#
# vm_build() should be called w/ the positional INPUT parameters below.
# Vendor images build automation stub.
# This script will build one or more vendor images ("vm") using a pre-built packages container.
#
# PREREQUISITES:
#
# 1. SDK version and OS image version are recorded in sdk_container/.repo/manifests/version.txt
# 2. Scripts repo version tag of OS image version to be built is available and checked out.
# 3. Flatcar packages container is available via build cache server
# from "/containers/[VERSION]/flatcar-images-[ARCH]-[FLATCAR_VERSION].tar.gz"
# or present locally. Must contain packages.
# 4. The generic Flatcar image must be present in build cache server.
#
# INPUT:
#
# 1. Architecture (ARCH) of the TARGET vm images ("arm64", "amd64").
# 2. Image formats to be built. Can be multiple, separated by spaces.
# Run ./image_to_vm.sh -h in the SDK to get a list of supported images.
#
# OPTIONAL INPUT:
#
# 1. SIGNER. Environment variable. Name of the owner of the artifact signing key.
# Defaults to nothing if not set - in such case, artifacts will not be signed.
# If provided, SIGNING_KEY environment variable should also be provided, otherwise this environment variable will be ignored.
#
# 2. SIGNING_KEY. Environment variable. The artifact signing key.
# Defaults to nothing if not set - in such case, artifacts will not be signed.
# If provided, SIGNER environment variable should also be provided, otherwise this environment variable will be ignored.
#
# OUTPUT:
#
# 1. Exported VM image(s), pushed to buildcache ( images/[ARCH]/[FLATCAR_VERSION]/ )
# 2. "./ci-cleanup.sh" with commands to clean up temporary build resources,
# to be run after this step finishes / when this step is aborted.
# 3. If signer key was passed, signatures of artifacts from point 1, pushed along to buildcache.
# 4. DIGESTS of the artifacts from point 1, pushed to buildcache. If signer key was passed, armored ASCII files of the generated DIGESTS files too, pushed to buildcache.
function vm_build() {
# Run a subshell, so the traps, environment changes and global
# variables are not spilled into the caller.
(
set -euo pipefail
_vm_build_impl "${@}"
)
}
# --
function _vm_build_impl() {
local arch="$1"
shift
# $@ now contains image formats to build
source ci-automation/ci_automation_common.sh
source ci-automation/gpg_setup.sh
init_submodules
source sdk_container/.repo/manifests/version.txt
local vernum="${FLATCAR_VERSION}"
local docker_vernum="$(vernum_to_docker_image_version "${vernum}")"
local packages="flatcar-packages-${arch}"
local packages_image="${packages}:${docker_vernum}"
docker_image_from_buildcache "${packages}" "${docker_vernum}"
local vms="flatcar-vms-${arch}"
local vms_container="${vms}-${docker_vernum}"
# automatically add PXE to formats if we build for Equinix Metal (packet).
local has_packet=0
local has_pxe=0
for format; do
[[ "${format}" = 'packet' ]] || [[ "${format}" = 'equinix_metal' ]] && has_packet=1
[[ "${format}" = 'pxe' ]] && has_pxe=1
done
[[ ${has_packet} -eq 1 ]] && [[ ${has_pxe} -eq 0 ]] && set -- 'pxe' "${@}"
# Convert platform names (also used to find the test scripts) to image formats they entail
formats="$*"
if echo "$formats" | tr ' ' '\n' | grep -q '^vmware'; then
formats=$(echo "$formats" | tr ' ' '\n' | sed '/vmware.*/d')
formats+=" vmware vmware_insecure vmware_ova vmware_raw"
fi
if echo "$formats" | tr ' ' '\n' | grep -q -P '^(ami|aws)'; then
formats=$(echo "$formats" | tr ' ' '\n' | sed '/ami.*/d' | sed '/aws/d')
formats+=" ami ami_vmdk"
fi
# Keep compatibility with SDK scripts where "equinix_metal" remains unknown.
formats=$(echo "$formats" | tr ' ' '\n' | sed 's/equinix_metal/packet/g')
local images_in="images-in/"
rm -rf "${images_in}"
copy_dir_from_buildcache "images/${arch}/${vernum}/" "${images_in}"
lbunzip2 "${images_in}/flatcar_production_image.bin.bz2"
./run_sdk_container -x ./ci-cleanup.sh -n "${vms_container}" -C "${packages_image}" \
-v "${vernum}" \
mkdir -p "${CONTAINER_IMAGE_ROOT}/${arch}-usr/latest"
./run_sdk_container -n "${vms_container}" -C "${packages_image}" \
-v "${vernum}" \
mv "${images_in}" "${CONTAINER_IMAGE_ROOT}/${arch}-usr/latest-input"
for format in ${formats}; do
echo " ################### VENDOR '${format}' ################### "
COMPRESSION_FORMAT="bz2"
if [[ "${format}" =~ ^(openstack|openstack_mini|digitalocean)$ ]];then
COMPRESSION_FORMAT="gz,bz2"
fi
./run_sdk_container -n "${vms_container}" -C "${packages_image}" \
-v "${vernum}" \
./image_to_vm.sh --format "${format}" --board="${arch}-usr" \
--from "${CONTAINER_IMAGE_ROOT}/${arch}-usr/latest-input" \
--to "${CONTAINER_IMAGE_ROOT}/${arch}-usr/latest" \
--image_compression_formats="${COMPRESSION_FORMAT}" \
--only_store_compressed
done
# copy resulting images + push to buildcache
local images_out="images/"
rm -rf "${images_out}"
./run_sdk_container -n "${vms_container}" -C "${packages_image}" \
-v "${vernum}" \
mv "${CONTAINER_IMAGE_ROOT}/${arch}-usr/" "./${images_out}/"
create_digests "${SIGNER}" "images/latest/"*
sign_artifacts "${SIGNER}" "images/latest/"*
copy_to_buildcache "images/${arch}/${vernum}/" "images/latest/"*
}
# --

40
retag-for-jenkins Executable file
View File

@ -0,0 +1,40 @@
#!/bin/bash
set -euo pipefail
if [ $# -lt 1 ] || [ "$1" = "-h" ] || [ "$1" = "--help" ]; then
echo "Usage: $0 TAG"
echo "Rebases the free-standing git tag <TAG> on the current branch"
echo "(make sure you don't have any uncommited local changes)"
echo "E.g., after a Jenkins build 'alpha-9999.99.99-mytest' was started based on"
echo "the scripts branch 'mybranch' and you have new changes on the branch,"
echo "check out your branch and run"
echo " $0 alpha-9999.99.99-mytest"
echo
echo "This is required when testing 'scripts' changes with Jenkins and a leaf job fails,"
echo "so that instead of restarting the whole build from the 'packages' job, you can"
echo "restart the leaf job after retagging. Note: Just starting a leaf job with your"
echo "branch as reference is not valid because it would overwrite the nightly build"
echo "artifacts!"
echo
echo "TODO: Add feature to update coreos-overlay/portage-stable submodule refs"
exit 1
fi
TAG="$1"
BRANCH=$(git rev-parse --abbrev-ref HEAD)
git fetch --force --tags origin
BUILD_PATCH=$(git format-patch --output=/dev/stdout "${TAG}~1..${TAG}")
git checkout --recurse-submodules "${TAG}"
git reset --hard "${BRANCH}"
echo "${BUILD_PATCH}" | git am -3 || {
git checkout "${TAG}" -- sdk_container/.repo/manifests/version.txt
git add sdk_container/.repo/manifests/version.txt
git am --continue
# This does not handle submodule conflicts: It should use the one
# from the TAG (similar to version.txt) unless an explicit new
# reference was specified
} || { echo "Failed to resolve conflict, continue manually" >&2 ; exit 1 ; }
git tag -d "${TAG}"
git tag "${TAG}"
git push --force origin "${TAG}"

150
run_sdk_container Executable file
View File

@ -0,0 +1,150 @@
#!/bin/bash
#
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
set -euo pipefail
cd $(dirname "$0")
source sdk_lib/sdk_container_common.sh
arch="all"
name=""
os_version="$(get_git_version)"
sdk_version="$(get_sdk_version_from_versionfile)"
custom_image=""
tty=""
remove=""
cleanup=""
usage() {
echo " Usage:"
echo " $0 [-t] [-v <version>] [-V sdk version] [-a arch] [-n <name> ] [-x <script>] [-C custom-container] [--rm] [container-command]"
echo " Start an SDK container of a given SDK release version."
echo " This will create the container if it does not exist, otherwise start the existing container."
echo " If the container is already running then it will exec into the container."
echo
echo " container-command - command to be run in the container instead of"
echo " an interactive shell."
echo " -t Attach docker to a TTY (docker -t)"
echo " -v <version> - Sourcetree (OS image) version to use."
echo " Defaults to '$os_version' (current git commit)."
echo " FLATCAR_VERSION[_ID] in '$sdk_container_common_versionfile'"
echo " will be updated accordingly."
echo " -V <SDK ver> - SDK version to use. Defaults to '${sdk_version}'"
echo " (FLATCAR_SDK_VERSION from '$sdk_container_common_versionfile')."
echo " -a <amd64|arm64|all> - Target architecture (board support) of the SDK."
echo " 'all' (the default) contains support for both amd64 and arm64."
echo " -n <name> - Custom name to use for the container."
echo " --rm Remove container afterwards"
echo " -x <script> - For each resource generated during build (container etc.)"
echo " add a cleanup line to <script> which, when run, will free"
echo " the resource. Useful for CI."
echo " -C - Use an entirely custom container image instead of the SDK's"
echo " $sdk_container_common_registry/flatcar-sdk-[ARCH]:[SDK VERSION]"
echo " Useful for CI."
echo " -h Print this help."
echo
}
# --
while [ 0 -lt $# ] ; do
case "$1" in
-h) usage; exit 0;;
--help) usage; exit 0;;
-t) tty="-t"; shift;;
-v) os_version="$2"; shift; shift;;
-V) sdk_version="$2"; shift; shift;;
-a) arch="$2"; shift; shift;;
-n) name="$2"; shift; shift;;
--rm) remove=true; shift;;
-x) cleanup="$2"; shift; shift;;
-C) custom_image="$2"; shift; shift;;
*) break;;
esac
done
if [ -n "$custom_image" ] ; then
container_image_name="${custom_image}"
else
docker_sdk_vernum="$(vernum_to_docker_image_version "${sdk_version}")"
container_image_name="$sdk_container_common_registry/flatcar-sdk-${arch}:${docker_sdk_vernum}"
fi
create_versionfile "$sdk_version" "$os_version"
if [ -z "$name" ] ; then
docker_sdk_vernum="$(vernum_to_docker_image_version "${sdk_version}")"
docker_os_vernum="$(vernum_to_docker_image_version "${os_version}")"
name="flatcar-sdk-${arch}-${docker_sdk_vernum}_os-${docker_os_vernum}"
fi
filter="^/"
if "${is_podman}"; then
filter=""
fi
stat="$($docker ps --all --no-trunc --filter name="${filter}$name\$" --format '{{.Status}}'\
| cut -f1 -d' ')"
# pass SDK related environment variables and gcloud auth
# into container
setup_sdk_env
setup_gsutil
mkdir -p "__build__/images"
mkdir -p "sdk_container/.cache/sdks"
hostname="${name:0:63}"
hostname="${hostname//./_}"
if [ -n "$cleanup" ] ; then
echo "$docker container rm -f '${name}'" >> "$cleanup"
fi
if [ -z "$stat" ] ; then
yell "Creating a new container '$name'"
gpg_volumes=$(gnupg_ssh_gcloud_mount_opts)
if [ -z "$custom_image" ]; then
(
source ci-automation/ci_automation_common.sh
docker_image_from_registry_or_buildcache "flatcar-sdk-${arch}" "${docker_sdk_vernum}"
)
fi
$docker create $tty -i \
-v /dev:/dev \
-v "$(pwd)/sdk_container:/mnt/host/source/" \
-v "$(pwd)/sdk_container/git-override/.git-coreos-overlay:/mnt/host/source/src/third_party/coreos-overlay/.git" \
-v "$(pwd)/sdk_container/git-override/.git-portage-stable:/mnt/host/source/src/third_party/portage-stable/.git" \
-v "$(pwd)/__build__/images:/mnt/host/source/src/build" \
-v "$(pwd):/mnt/host/source/src/scripts" \
$gpg_volumes \
--privileged \
--network host \
-e SDK_USER_ID="$(id -u)" \
-e SDK_GROUP_ID="$(id -g)" \
--name="$name" \
--hostname="$hostname" \
--entrypoint /bin/bash \
"${container_image_name}" -l
fi
if [ "$stat" != "Up" ] ; then
yell "Starting stopped container '$name'"
if [ "${remove}" = "true" ]; then
remove_command="$docker rm -f $name"
else
remove_command=":"
fi
trap "$docker stop -t 0 $name ; ${remove_command}" EXIT
$docker start "$name"
fi
# Workaround: The SDK expects to be able to write to /etc/hosts
$docker exec "$name" sh -c 'cp /etc/hosts /etc/hosts2; umount /etc/hosts ; mv /etc/hosts2 /etc/hosts'
$docker exec $tty -i "$name" /mnt/host/source/src/scripts/sdk_lib/sdk_entry.sh "$@"

View File

@ -0,0 +1 @@
ghcr.io/flatcar-linux/mantle:git-11159768f92f7a16ffa8681fbe3998aaff60a79b

View File

@ -0,0 +1,6 @@
NAME="Flatcar Container Linux by Kinvolk"
ID=flatcar
ID_LIKE=coreos
ANSI_COLOR="38;5;75"
HOME_URL="https://flatcar.org/"
BUG_REPORT_URL="https://issues.flatcar.org"

View File

@ -0,0 +1,4 @@
FLATCAR_VERSION=3301.0.0+nightly-20220715-2100
FLATCAR_VERSION_ID=3301.0.0
FLATCAR_BUILD_ID="nightly-20220715-2100"
FLATCAR_SDK_VERSION=3301.0.0+nightly-20220715-2100

View File

@ -0,0 +1 @@
gitdir: ../../scripts/.git/modules/sdk_container/src/third_party/coreos-overlay

View File

@ -0,0 +1 @@
gitdir: ../../scripts/.git/modules/sdk_container/src/third_party/portage-stable

View File

@ -0,0 +1,5 @@
# GIT overrides for submodules
In the SDK container, the scripts root is at a different relative path to the submodules.
The overrides in this directory are into `coreos-overlay/.git` and `portage-stable/.git` so the submodules can find their parents.

@ -0,0 +1 @@
Subproject commit 159b0048db19748bc624850e4c0cda0167951544

@ -0,0 +1 @@
Subproject commit f54be57551aa95b419727d010b053dfa29fb1ad1

9
sdk_lib/90_env_keep Normal file
View File

@ -0,0 +1,9 @@
Defaults env_keep += "FLATCAR_BUILD_ID COREOS_OFFICIAL \
EMAIL GIT_AUTHOR_EMAIL GIT_AUTHOR_NAME \
GIT_COMMITTER_EMAIL GIT_COMMITTER_NAME \
GIT_PROXY_COMMAND GIT_SSH RSYNC_PROXY \
GNUPGHOME GPG_AGENT_INFO SSH_AUTH_SOCK \
BOTO_PATH GOOGLE_APPLICATION_CREDENTIALS \
USE FEATURES PORTAGE_USERNAME FORCE_STAGES \
SIGNER \
all_proxy ftp_proxy http_proxy https_proxy no_proxy"

View File

@ -0,0 +1,29 @@
ARG VERSION
FROM flatcar-sdk-build:${VERSION} as meta
ARG RMARCH
ARG RMCROSS
RUN if [ -n "$RMCROSS" ]; then \
sudo crossdev --clean --force "$RMCROSS"; \
fi
RUN if [ -n "$RMARCH" ]; then \
sudo rm -rf /build/$RMARCH; \
sudo rm -f /usr/local/bin/*-$RMARCH; \
fi
# Note: .repo/manifests/version.txt will survive this. That's intended.
RUN sudo rm -rf /mnt/host/source/*
FROM scratch
COPY --from=meta / /
COPY --from=meta --chown=sdk:sdk /home/sdk /home/sdk
RUN chown -R sdk:sdk /mnt/host/source
# This is not used when starting the container via ./run_sdk_container
# but it's useful for standalone container use.
RUN mkdir -p /mnt/host/source/src/scripts
COPY --chown=sdk:sdk sdk_lib/sdk_init_selfcontained.sh /mnt/host/source/src/
ENTRYPOINT /home/sdk/sdk_entry.sh

View File

@ -0,0 +1,17 @@
ARG VERSION
FROM flatcar-sdk-import:${VERSION}
ARG BINHOST
ARG OFFICIAL=0
# mark build as official where appropriate
RUN echo "export COREOS_OFFICIAL=$OFFICIAL" > /mnt/host/source/.env
RUN /home/sdk/sdk_entry.sh ./setup_board --board="arm64-usr" --binhost="${BINHOST}/arm64-usr"
RUN /home/sdk/sdk_entry.sh ./setup_board --board="arm64-usr" --regen_configs
RUN /home/sdk/sdk_entry.sh ./setup_board --board="amd64-usr" --binhost="${BINHOST}/amd64-usr"
RUN /home/sdk/sdk_entry.sh ./setup_board --board="amd64-usr" --regen_configs
RUN rm /mnt/host/source/.env
RUN rm -rf /home/sdk/toolchain-pkgs

View File

@ -0,0 +1,58 @@
ARG VERSION
FROM flatcar-sdk-tarball:${VERSION}
# Make build scripts believe we're in the SDK chroot (which technically, we are)
RUN touch /etc/debian_chroot
RUN chmod 644 /etc/passwd
RUN chmod 644 /etc/group
# User "root" is not in /etc/passwd / group in the SDK tarball
RUN echo 'root:x:0:0:root:/root:/bin/bash' >>/etc/passwd
RUN echo 'root:x:0:' >>/etc/group
RUN if ! grep -q portage /etc/group ; then \
echo "portage::250:portage" >>/etc/group; \
fi
RUN if ! grep -q portage /etc/passwd; then \
echo "portage:x:250:250:portage:/var/tmp/portage:/bin/false" >>/etc/passwd; \
fi
# fix "Unable to unshare: EPERM ..." in containers
# (see https://github.com/gentoo/gentoo-docker-images/issues/81)
RUN echo 'export FEATURES="-ipc-sandbox -network-sandbox -pid-sandbox"' \
>> /etc/skel/.bashrc
RUN groupadd sdk
RUN useradd -g sdk -G portage sdk
RUN echo "sdk ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/sdk-user
RUN rm -rf /mnt/host/source
COPY --chown=sdk:sdk sdk_container/ /mnt/host/source
COPY --chown=sdk:sdk . /mnt/host/source/src/scripts
RUN chown sdk:sdk /mnt/host/source
COPY sdk_lib/90_env_keep /etc/sudoers.d/90_env_keep
RUN chmod 0440 /etc/sudoers.d/90_env_keep
USER sdk:sdk
RUN mkdir -p /mnt/host/source/src/scripts /mnt/host/source/src/build
RUN ln -s /mnt/host/source /home/sdk/trunk
RUN rm /home/sdk/.bashrc
RUN cp /etc/skel/.bashrc /home/sdk
RUN echo "cd /home/sdk/trunk/src/scripts" >> /home/sdk/.bashrc
RUN echo 'export PATH="$PATH:/usr/local/bin:/usr/local/sbin"' >> /home/sdk/.bashrc
# user and SDK environment variables pass-through into container
RUN echo "if [ -f /mnt/host/source/.env ]; then source /mnt/host/source/.env; fi" >> /home/sdk/.bashrc
RUN echo "if [ -f /mnt/host/source/.sdkenv ]; then source /mnt/host/source/.sdkenv; fi" >> /home/sdk/.bashrc
COPY --chown=sdk:sdk sdk_lib/sdk_entry.sh /home/sdk
RUN chmod 755 /home/sdk/sdk_entry.sh
USER root:root
# This should be a NOP; if you see packages being rebuilt
# it's likely that submodules and SDK tarball are out of sync
RUN /home/sdk/sdk_entry.sh ./update_chroot --toolchain_boards="amd64-usr arm64-usr"
ENTRYPOINT /home/sdk/sdk_entry.sh

View File

@ -0,0 +1,21 @@
ARG BASE
FROM ${BASE}
COPY --chown=sdk:sdk sdk_container/ /mnt/host/source
COPY --chown=sdk:sdk . /mnt/host/source/src/scripts
# Disable all sandboxing for SDK updates since some core packages
# (like GO) fail to build from a permission error otherwise.
RUN cp /home/sdk/.bashrc /home/sdk/.bashrc.bak
RUN echo 'export FEATURES="-sandbox -usersandbox -ipc-sandbox -network-sandbox -pid-sandbox"' \
>> /home/sdk/.bashrc
RUN chown sdk:sdk /mnt/host/source
RUN /home/sdk/sdk_entry.sh ./update_chroot --toolchain_boards="amd64-usr arm64-usr"
RUN /home/sdk/sdk_entry.sh ./setup_board --board="arm64-usr" --regen_configs
RUN /home/sdk/sdk_entry.sh ./setup_board --board="amd64-usr" --regen_configs
# Restore original .bashrc to remove sandbox disablement
RUN mv /home/sdk/.bashrc.bak /home/sdk/.bashrc
RUN chown sdk:sdk /home/sdk/.bashrc

View File

@ -0,0 +1,291 @@
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# This file contains common functions used across SDK container scripts.
#
# globals
#
sdk_container_common_versionfile="sdk_container/.repo/manifests/version.txt"
sdk_container_common_registry="ghcr.io/flatcar"
sdk_container_common_env_file="sdk_container/.sdkenv"
# Check for podman and docker; use docker if present, podman alternatively.
# Podman needs 'sudo' since we need privileged containers for the SDK.
is_podman=false
if command -v podman >/dev/null; then
# podman is present
if command -v docker >/dev/null ; then
# docker is present, too
if docker help | grep -q -i podman; then
# "docker" is actually podman.
# NOTE that 'docker --version' does not reliably work for podman detection
# since 'podman' uses argv[0] in its version string.
# A symlink docker->podman will result in 'podman' using the 'docker' argv[0].
is_podman=true
fi
else
# docker is not present
is_podman=true
fi
fi
docker="docker"
if "${is_podman}"; then
docker="sudo podman"
fi
# Common "echo" function
function yell() {
echo -e "\n###### $@ ######"
}
# --
# Guess the SDK version from the current git commit.
#
function get_git_version() {
local tag="$(git tag --points-at HEAD)"
if [ -z "$tag" ] ; then
git describe --tags
else
echo "$tag"
fi
}
# --
function get_sdk_version_from_versionfile() {
( source "$sdk_container_common_versionfile"; echo "$FLATCAR_SDK_VERSION"; )
}
# --
function get_version_from_versionfile() {
( source "$sdk_container_common_versionfile"; echo "$FLATCAR_VERSION"; )
}
# --
# return true if a given version number is an official build
#
function is_official() {
local vernum="$1"
local official="$(echo "$vernum" | sed -n 's/^[0-9]\+\.[0-9]\+\.[0-9]\+$/true/p')"
test -n "$official"
}
# --
# extract the build ID suffix from a version string ("alpha-3244.0.1-nightly2" => "nightly2")
#
function build_id_from_version() {
local version="$1"
# support vernums and versions ("alpha-"... is optional)
echo "${version}" | sed -n 's/^\([a-z]\+-\)\?[0-9.]\+[-+]\(.*\)$/\2/p'
}
# --
# Get channel from a version string ("alpha-3244.0.1-nightly2" => "alpha")
#
function channel_from_version() {
local version="$1"
local channel=""
channel=$(echo "${version}" | cut -d - -f 1)
if [ "${channel}" != "alpha" ] && [ "${channel}" != "beta" ] && [ "${channel}" != "stable" ] && [ "${channel}" != "lts" ]; then
channel="developer"
fi
echo "${channel}"
}
# --
function get_git_channel() {
channel_from_version "$(get_git_version)"
}
# --
# extract the version number (w/o build ID) from a version string ("alpha-3244.0.1-nightly2" => "3244.0.1")
#
function vernum_from_version() {
local version="$1"
# support vernums and versions ("alpha-"... is optional)
echo "${version}" | sed -n 's/^\([a-z]\+-\)\?\([0-9.]\+\).*/\2/p'
}
# --
# Strip prefix from version string if present ("alpha-3233.0.0[-...]" => "3233.0.0[-...]")
# and add a "+[build suffix]" if this is a non-official build. The "+" matches the version
# string generation in the build scripts.
function strip_version_prefix() {
local version="$1"
local build_id="$(build_id_from_version "${version}")"
local version_id="$(vernum_from_version "${version}")"
if [ -n "${build_id}" ] ; then
echo "${version_id}+${build_id}"
else
echo "${version_id}"
fi
}
# --
# Derive docker-safe image version string from vernum.
#
function vernum_to_docker_image_version() {
local vernum="$1"
echo "$vernum" | sed 's/[+]/-/g'
}
# --
# Creates the Flatcar build / SDK version file.
# Must be called from the script root.
#
# In the versionfile, FLATCAR_VERSION is the OS image version _number_ plus a build ID if this is no
# official build. The FLATCAR_VERSION_ID is the plain vernum w/o build ID - it's the same as FLATCAR_VERSION
# for official builds. The FLATCAR_BUILD_ID is the build ID suffix for non-official builds.
# Lastly, the FLATCAR_SDK_VERSION is the full version number (including build ID if no official SDK release)
# the OS image is to be built with.
#
function create_versionfile() {
local sdk_version="$1"
local os_version="${2:-$sdk_version}"
local build_id="$(build_id_from_version "${os_version}")"
local version_id="$(vernum_from_version "${os_version}")"
sdk_version="$(strip_version_prefix "${sdk_version}")"
os_version="$(strip_version_prefix "${os_version}")"
yell "Writing versionfile '$sdk_container_common_versionfile' to SDK '$sdk_version', OS '$os_version'."
cat >"$sdk_container_common_versionfile" <<EOF
FLATCAR_VERSION=${os_version}
FLATCAR_VERSION_ID=${version_id}
FLATCAR_BUILD_ID="${build_id}"
FLATCAR_SDK_VERSION=${sdk_version}
EOF
}
# --
#
# Set up SDK environment variables.
# Environment vars are put in a file that is sourced by the container's
# .bashrc (if present). GNUPGHOME and SSH_AUTH_SOCK are set
# to container-specific paths if applicable.
function setup_sdk_env() {
local var
rm -f "$sdk_container_common_env_file"
# conditionally set up gnupg, ssh socket, and gcloud auth / boto
# depending on availability on the host
GNUPGHOME="${GNUPGHOME:-$HOME/.gnupg}"
if [ -d "${GNUPGHOME}" ] ; then
echo "GNUPGHOME=\"/home/sdk/.gnupg\"" >> "$sdk_container_common_env_file"
echo "export GNUPGHOME" >> "$sdk_container_common_env_file"
export GNUPGHOME
fi
if [ -e "${SSH_AUTH_SOCK:-}" ] ; then
local sockname="$(basename "${SSH_AUTH_SOCK}")"
echo "SSH_AUTH_SOCK=\"/run/sdk/ssh/$sockname\"" >> "$sdk_container_common_env_file"
echo "export SSH_AUTH_SOCK" >> "$sdk_container_common_env_file"
fi
# keep in sync with 90_env_keep
for var in FLATCAR_BUILD_ID COREOS_OFFICIAL \
EMAIL GIT_AUTHOR_EMAIL GIT_AUTHOR_NAME \
GIT_COMMITTER_EMAIL GIT_COMMITTER_NAME \
GIT_PROXY_COMMAND GIT_SSH RSYNC_PROXY \
GPG_AGENT_INFO FORCE_STAGES \
SIGNER \
all_proxy ftp_proxy http_proxy https_proxy no_proxy; do
if [ -n "${!var:-}" ] ; then
echo "${var}=\"${!var}\"" >> "$sdk_container_common_env_file"
echo "export ${var}" >> "$sdk_container_common_env_file"
fi
done
}
# --
# Set up gcloud legacy creds (via GOOGLE_APPLICATION_CREDENTIALS)
# for the SDK container.
# This will also create a boto config right next to the
# GOOGLE_APPLICATION_CREDENTIALS json file.
function setup_gsutil() {
local creds="${GOOGLE_APPLICATION_CREDENTIALS:-$HOME/.config/gcloud/application_default_credentials.json}"
if [ ! -e "$creds" ]; then
return
fi
local creds_dir="$(dirname "$creds")"
local botofile="$creds_dir/boto-flatcar-sdk"
# TODO t-lo: move generation of boto file to sdk_entry so
# it's only created inside the container.
# read creds file and create boto file for gsutil
local tmp="$(mktemp)"
trap "rm -f '$tmp'" EXIT
local oauth_refresh="$(jq -r '.refresh_token' "$creds")"
local client_id="$(jq -r '.client_id' "$creds")"
local client_secret="$(jq -r '.client_secret' "$creds")"
cat >>"$tmp" <<EOF
[Credentials]
gs_oauth2_refresh_token = $oauth_refresh
[OAuth2]
client_id = $client_id
client_secret = $client_secret
EOF
mv "$tmp" "$botofile"
echo "BOTO_PATH=\"$botofile\"" >> "$sdk_container_common_env_file"
echo "export BOTO_PATH" >> "$sdk_container_common_env_file"
echo "GOOGLE_APPLICATION_CREDENTIALS=\"$creds\"" >> "$sdk_container_common_env_file"
echo "export GOOGLE_APPLICATION_CREDENTIALS" >> "$sdk_container_common_env_file"
BOTO_PATH="$botofile"
GOOGLE_APPLICATION_CREDENTIALS="$creds"
export BOTO_PATH
export GOOGLE_APPLICATION_CREDENTIALS
}
# --
# Generate volume mount command line options for docker
# to pass gpg, ssh, and gcloud auth host directories
# into the SDK container.
function gnupg_ssh_gcloud_mount_opts() {
local sdk_gnupg_home="/home/sdk/.gnupg"
local gpgagent_dir="/run/user/$(id -u)/gnupg"
# pass host GPG home and Agent directories to container
if [ -d "$GNUPGHOME" ] ; then
echo "-v $GNUPGHOME:$sdk_gnupg_home"
fi
if [ -d "$gpgagent_dir" ] ; then
echo "-v $gpgagent_dir:$gpgagent_dir"
fi
if [ -e "${SSH_AUTH_SOCK:-}" ] ; then
local sshsockdir="$(dirname "$SSH_AUTH_SOCK")"
echo "-v $sshsockdir:/run/sdk/ssh"
fi
if [ -e "${GOOGLE_APPLICATION_CREDENTIALS:-}" ] ; then
local creds_dir="$(dirname "${GOOGLE_APPLICATION_CREDENTIALS}")"
if [ -d "$creds_dir" ] ; then
echo "-v $creds_dir:$creds_dir"
fi
fi
}

73
sdk_lib/sdk_entry.sh Executable file
View File

@ -0,0 +1,73 @@
#!/bin/bash
if [ -n "${SDK_USER_ID:-}" ] ; then
# If the "core" user from /usr/share/baselayout/passwd has the same ID, allow to take it instead
usermod --non-unique -u $SDK_USER_ID sdk
fi
if [ -n "${SDK_GROUP_ID:-}" ] ; then
groupmod --non-unique -g $SDK_GROUP_ID sdk
fi
chown -R sdk:sdk /home/sdk
# Check if the OS image version we're working on is newer than
# the SDK container version and if it is, update the boards
# chroot portage conf to point to the correct binhost.
(
source /etc/lsb-release # SDK version in DISTRIB_RELEASE
source /mnt/host/source/.repo/manifests/version.txt # OS image version in FLATCAR_VERSION_ID
version="${FLATCAR_VERSION_ID}"
# If this is a nightly build tag we can use pre-built binaries directly from the
# build cache.
if [[ "${FLATCAR_BUILD_ID}" =~ ^nightly-.*$ ]] ; then
version="${FLATCAR_VERSION_ID}+${FLATCAR_BUILD_ID}"
fi
if [ "${version}" != "${DISTRIB_RELEASE}" ] ; then
for target in amd64-usr arm64-usr; do
if [ ! -d "/build/$target" ] ; then
continue
fi
if [ -f "/build/$target/etc/target-version.txt" ] ; then
source "/build/$target/etc/target-version.txt"
if [ "${TARGET_FLATCAR_VERSION}" = "${version}" ] ; then
continue # already updated
fi
fi
echo
echo "Updating board support in '/build/${target}' to use package cache for version '${version}'"
echo "---"
sudo su sdk -l -c "/home/sdk/trunk/src/scripts/setup_board --board='$target' --regen_configs_only"
echo "TARGET_FLATCAR_VERSION='${version}'" | sudo tee "/build/$target/etc/target-version.txt" >/dev/null
done
fi
)
# This is ugly.
# We need to sudo su - sdk -c so the SDK user gets a fresh login.
# 'sdk' is member of multiple groups, and plain docker USER only
# allows specifying membership of a single group.
# When a command is passed to the container, we run, respectively:
# sudo su - sdk -c "<command>".
# Then, we need to preserve whitespaces in arguments of commands
# passed to the container, e.g.
# ./update_chroot --toolchain_boards="amd64-usr arm64-usr".
# This is done via a separate ".cmd" file since we have used up
# our quotes for su -c "<cmd>" already.
if [ $# -gt 0 ] ; then
cmd="/home/sdk/.cmd"
echo -n "exec bash -l -i -c '" >"$cmd"
for arg in "$@"; do
echo -n "\"$arg\" " >>"$cmd"
done
echo "'" >>"$cmd"
chmod 755 "$cmd"
sudo su sdk -c "$cmd"
rc=$?
rm -f "$cmd"
exit $rc
else
exec sudo su -l sdk
fi

View File

@ -0,0 +1,36 @@
#!/bin/bash
echo "This script will initialise your Flatcar SDK container as a self-contained SDK."
echo "Please note that the preferred way of using the Flatcar SDK container is by cloning"
echo " https://github.com/flatcar/scripts"
echo "and using the ./run_sdk_container script."
echo
echo "Press [RETURN] to continue, CTRL+C to abort"
echo
read junk
unset junk
# --
function clone_version() {
local repo="$1"
local dest="$2"
local version="$3"
git clone https://github.com/flatcar/$repo "$dest"
git -C "${dest}" fetch --all
local tag=$(git -C "${dest}" tag -l | grep "${version}")
git -C "${dest}" checkout "$tag"
}
# --
version="$(source /mnt/host/source/.repo/manifests/version.txt; echo $FLATCAR_VERSION)"
mkdir -p /home/sdk/trunk/src/third_party/
clone_version scripts /home/sdk/trunk/src/scripts "$version"
clone_version portage-stable /home/sdk/trunk/src/third_party/portage-stable "$version"
clone_version coreos-overlay /home/sdk/trunk/src/third_party/coreos-overlay "$version"

8
settings.env Normal file
View File

@ -0,0 +1,8 @@
# Flatcar SDK settings
# Binary package caches, for releases and for development (nightlies etc.)
SETTING_BINPKG_SERVER_PROD="https://mirror.release.flatcar-linux.net"
# development servers / bin caches.
SETTING_BINPKG_SERVER_DEV="https://bucket.release.flatcar-linux.net/flatcar-jenkins"
SETTING_BINPKG_SERVER_DEV_CONTAINERISED="https://bincache.flatcar-linux.net"

100
update_sdk_container_image Executable file
View File

@ -0,0 +1,100 @@
#!/bin/bash
#
# Copyright (c) 2021 The Flatcar Maintainers.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script will update an SDK container image and create a new minor version.
set -eu
cd $(dirname "$0")
source sdk_lib/sdk_container_common.sh
os_version="$(get_version_from_versionfile)"
base_sdk_version="$(get_sdk_version_from_versionfile)"
new_sdk_version=""
keep="false"
cleanup=""
usage() {
echo " $0 - Update SDK container image."
echo " Create a new container image based on the current SDK ($base_sdk_version)"
echo " with current changes from coreos-overlay and portage-stable."
echo
echo " Just like build_sdk_container_image the resulting container comes in 3 flavours:"
echo " 1. flatcar-sdk-all - includes both ARM64 and AMD64 support"
echo " 2.+3. flatcar-sdk-(amd64|arm64) - only includes support for one target."
echo " Usage:"
echo " $0 [-k] [-x <script>] <new-sdk-version>"
echo
echo " <new-sdk-version> is the new SDK version to be built."
echo " -k - Keep intermediate container image."
echo " -x <script> - For each resource generated during build (container etc.)"
echo " add a cleanup line to <script> which, when run, will free"
echo " the resource. Useful for CI."
echo
}
# --
while [ 0 -lt $# ] ; do
case "$1" in
-h) usage; exit 0;;
-k) keep="true"; shift;;
-x) cleanup="$2"; shift; shift;;
*) if [ -z "$new_sdk_version" ] ; then
new_sdk_version="$1"; shift
else
echo "ERROR: spurious positional parameter '$@'."
usage
exit 1
fi;;
esac
done
if [ -z "$new_sdk_version" ] ; then
echo
echo "ERROR: missing target SDK version."
echo
usage
exit 1
fi
# --
docker_vernum="$(vernum_to_docker_image_version "${new_sdk_version}")"
sdk_build_image="flatcar-sdk-build:${docker_vernum}"
if [ -n "$cleanup" ] ; then
echo "$docker image rm -f '${sdk_build_image}'" >> "$cleanup"
fi
yell "Creating new SDK container image ${new_sdk_version} from ${base_sdk_version}"
create_versionfile "${new_sdk_version}" "${os_version}"
$docker build -t "${sdk_build_image}" \
--build-arg BASE="$sdk_container_common_registry/flatcar-sdk-all:${base_sdk_version}" \
-f sdk_lib/Dockerfile.sdk-update \
.
for a in all arm64 amd64; do
yell "Creating '$a' arch SDK image"
rmarch=""; rmcross=""
case $a in
arm64) rmarch="amd64-usr"; rmcross="x86_64-cros-linux-gnu";;
amd64) rmarch="arm64-usr"; rmcross="aarch64-cros-linux-gnu";;
esac
$docker build -t "$sdk_container_common_registry/flatcar-sdk-${a}:${docker_vernum}" \
--build-arg VERSION="${docker_vernum}" \
--build-arg RMARCH="${rmarch}" \
--build-arg RMCROSS="${rmcross}" \
-f sdk_lib/Dockerfile.lean-arch \
.
done
if ! $keep; then
yell "Cleaning up intermediate container image"
$docker rmi flatcar-sdk-build:"${docker_vernum}"
fi