mirror of
https://github.com/flatcar/scripts.git
synced 2025-08-23 15:31:05 +02:00
Merge pull request #2477 from flatcar/chewi/drop-old-code
Drop lots of obsolete CI, release signing, and upload code
This commit is contained in:
commit
4ca21a1907
@ -60,7 +60,6 @@ GENTOO_MIRRORS="${GENTOO_MIRRORS//https:\/\//http://}"
|
||||
export GENTOO_MIRRORS
|
||||
|
||||
catalyst_init "$@"
|
||||
check_gsutil_opts
|
||||
ROOT_OVERLAY=${TEMPDIR}/stage4_overlay
|
||||
|
||||
if [[ "$STAGES" =~ stage4 ]]; then
|
||||
@ -108,18 +107,6 @@ if [[ "$STAGES" =~ stage4 ]]; then
|
||||
verify_digests "${release_image}" "${release_contents}"
|
||||
|
||||
info "SDK ready: ${release_image}"
|
||||
|
||||
def_upload_path="${UPLOAD_ROOT}/sdk/${ARCH}/${FLAGS_version}"
|
||||
sign_and_upload_files "tarball" "${def_upload_path}" "" \
|
||||
"${release_image}" "${release_contents}" "${release_digests}"
|
||||
sign_and_upload_files "packages" "${def_upload_path}" "pkgs/" \
|
||||
"${BINPKGS}"/*
|
||||
|
||||
if [ -d "${BINPKGS}/crossdev" ]; then
|
||||
# Upload the SDK toolchain packages
|
||||
sign_and_upload_files "cross toolchain packages" "${def_upload_path}" \
|
||||
"toolchain/" "${BINPKGS}/crossdev"/*
|
||||
fi
|
||||
fi
|
||||
|
||||
command_completed
|
||||
|
@ -87,8 +87,6 @@ eval set -- "${FLAGS_ARGV:-prod}"
|
||||
# so will die prematurely if 'switch_to_strict_mode' is specified before now.
|
||||
switch_to_strict_mode
|
||||
|
||||
check_gsutil_opts
|
||||
|
||||
# If downloading packages is enabled ensure the board is configured properly.
|
||||
if [[ ${FLAGS_getbinpkg} -eq ${FLAGS_TRUE} ]]; then
|
||||
"${SRC_ROOT}/scripts/setup_board" --board="${FLAGS_board}" \
|
||||
@ -202,8 +200,6 @@ FLATCAR_BUILD_ID="${FLATCAR_BUILD_ID}"
|
||||
FLATCAR_SDK_VERSION=${FLATCAR_SDK_VERSION}
|
||||
EOF
|
||||
|
||||
upload_image "${BUILD_DIR}/version.txt"
|
||||
|
||||
# Create a named symlink.
|
||||
set_build_symlinks latest "${FLAGS_group}-latest"
|
||||
|
||||
@ -230,5 +226,3 @@ if [[ "${PROD_IMAGE}" -eq 1 ]]; then
|
||||
fi
|
||||
|
||||
command_completed
|
||||
|
||||
|
||||
|
@ -62,22 +62,13 @@ extract_update() {
|
||||
local image_name="$1"
|
||||
local disk_layout="$2"
|
||||
local update_path="${BUILD_DIR}/${image_name%_image.bin}_update.bin"
|
||||
local digest_path="${update_path}.DIGESTS"
|
||||
|
||||
"${BUILD_LIBRARY_DIR}/disk_util" --disk_layout="${disk_layout}" \
|
||||
extract "${BUILD_DIR}/${image_name}" "USR-A" "${update_path}"
|
||||
|
||||
# Compress image
|
||||
files_to_evaluate+=( "${update_path}" )
|
||||
declare -a compressed_images
|
||||
declare -a extra_files
|
||||
compress_disk_images files_to_evaluate compressed_images extra_files
|
||||
|
||||
# Upload compressed image
|
||||
upload_image -d "${digest_path}" "${compressed_images[@]}" "${extra_files[@]}"
|
||||
|
||||
# Upload legacy digests
|
||||
upload_legacy_digests "${digest_path}" compressed_images
|
||||
compress_disk_images files_to_evaluate
|
||||
|
||||
# For production as well as dev builds we generate a dev-key-signed update
|
||||
# payload for running tests (the signature won't be accepted by production systems).
|
||||
@ -87,8 +78,6 @@ extract_update() {
|
||||
-new_image "${update_path}" \
|
||||
-new_kernel "${BUILD_DIR}/${image_name%.bin}.vmlinuz" \
|
||||
-out_file "${update_test}"
|
||||
|
||||
upload_image "${update_test}"
|
||||
}
|
||||
|
||||
zip_update_tools() {
|
||||
@ -100,8 +89,6 @@ zip_update_tools() {
|
||||
export REPO_MANIFESTS_DIR SCRIPTS_DIR
|
||||
"${BUILD_LIBRARY_DIR}/generate_au_zip.py" \
|
||||
--arch "$(get_sdk_arch)" --output-dir "${BUILD_DIR}" --zip-name "${update_zip}"
|
||||
|
||||
upload_image "${BUILD_DIR}/${update_zip}"
|
||||
}
|
||||
|
||||
generate_update() {
|
||||
@ -123,16 +110,8 @@ generate_update() {
|
||||
|
||||
# Compress image
|
||||
declare -a files_to_evaluate
|
||||
declare -a compressed_images
|
||||
declare -a extra_files
|
||||
files_to_evaluate+=( "${update}.bin" )
|
||||
compress_disk_images files_to_evaluate compressed_images extra_files
|
||||
|
||||
# Upload images
|
||||
upload_image -d "${update}.DIGESTS" "${update}".{gz,zip} "${compressed_images[@]}" "${extra_files[@]}"
|
||||
|
||||
# Upload legacy digests
|
||||
upload_legacy_digests "${update}.DIGESTS" compressed_images
|
||||
compress_disk_images files_to_evaluate
|
||||
}
|
||||
|
||||
# ldconfig cannot generate caches for non-native arches.
|
||||
|
@ -115,20 +115,6 @@ create_dev_container() {
|
||||
finish_image "${image_name}" "${disk_layout}" "${root_fs_dir}" "${image_contents}" "${image_contents_wtd}"
|
||||
|
||||
declare -a files_to_evaluate
|
||||
declare -a compressed_images
|
||||
declare -a extra_files
|
||||
|
||||
files_to_evaluate+=( "${BUILD_DIR}/${image_name}" )
|
||||
compress_disk_images files_to_evaluate compressed_images extra_files
|
||||
|
||||
upload_image -d "${BUILD_DIR}/${image_name}.DIGESTS" \
|
||||
"${BUILD_DIR}/${image_contents}" \
|
||||
"${BUILD_DIR}/${image_contents_wtd}" \
|
||||
"${BUILD_DIR}/${image_packages}" \
|
||||
"${BUILD_DIR}/${image_licenses}" \
|
||||
"${compressed_images[@]}" \
|
||||
"${extra_files[@]}"
|
||||
|
||||
# Upload legacy digests
|
||||
upload_legacy_digests "${BUILD_DIR}/${image_name}.DIGESTS" compressed_images
|
||||
compress_disk_images files_to_evaluate
|
||||
}
|
||||
|
@ -22,8 +22,6 @@ SCRIPTS_DIR = os.environ['SCRIPTS_DIR']
|
||||
# GLOBALS
|
||||
STATIC_FILES = ['%s/version.txt' % REPO_MANIFESTS_DIR,
|
||||
'%s/common.sh' % SCRIPTS_DIR,
|
||||
'%s/core_pre_alpha' % SCRIPTS_DIR,
|
||||
'%s/core_roller_upload' % SCRIPTS_DIR,
|
||||
'%s/core_sign_update' % SCRIPTS_DIR,
|
||||
]
|
||||
|
||||
|
@ -85,24 +85,8 @@ finish_modify_image() {
|
||||
|
||||
|
||||
declare -a files_to_evaluate
|
||||
declare -a compressed_images
|
||||
declare -a extra_files
|
||||
|
||||
files_to_evaluate+=( "${DST_IMAGE}" )
|
||||
compress_disk_images files_to_evaluate compressed_images extra_files
|
||||
|
||||
upload_image -d "${DST_IMAGE}.DIGESTS" \
|
||||
"${compressed_images[@]}" \
|
||||
"${extra_files[@]}"
|
||||
|
||||
# Upload legacy digests
|
||||
upload_legacy_digests "${DST_IMAGE}.DIGESTS" compressed_images
|
||||
|
||||
for filename in "${EXTRA_FILES[@]}"; do
|
||||
if [[ -e "${BUILD_DIR}/${filename}" ]]; then
|
||||
upload_image "${BUILD_DIR}/${filename}"
|
||||
fi
|
||||
done
|
||||
compress_disk_images files_to_evaluate
|
||||
|
||||
set_build_symlinks "${FLAGS_group}-latest"
|
||||
|
||||
|
@ -131,7 +131,7 @@ create_prod_image() {
|
||||
sudo rsync -a --delete "${BUILD_DIR}/configroot/etc/portage" "${BUILD_DIR}/root_fs_dir2/etc"
|
||||
sudo mksquashfs "${BUILD_DIR}/root_fs_dir2" "${BUILD_DIR}/${image_sysext_base}" -noappend -xattrs-exclude '^btrfs.'
|
||||
sudo rm -rf "${BUILD_DIR}/root_fs_dir2"
|
||||
|
||||
|
||||
# clean-ups of things we do not need
|
||||
sudo rm ${root_fs_dir}/etc/csh.env
|
||||
sudo rm -rf ${root_fs_dir}/etc/env.d
|
||||
@ -180,56 +180,8 @@ EOF
|
||||
"${image_initrd_contents_wtd}" \
|
||||
"${image_disk_usage}"
|
||||
|
||||
# Upload
|
||||
local to_upload=(
|
||||
"${BUILD_DIR}/${image_contents}"
|
||||
"${BUILD_DIR}/${image_contents_wtd}"
|
||||
"${BUILD_DIR}/${image_packages}"
|
||||
"${BUILD_DIR}/${image_sbom}"
|
||||
"${BUILD_DIR}/${image_licenses}"
|
||||
"${BUILD_DIR}/${image_kernel}"
|
||||
"${BUILD_DIR}/${image_pcr_policy}"
|
||||
"${BUILD_DIR}/${image_grub}"
|
||||
"${BUILD_DIR}/${image_kconfig}"
|
||||
"${BUILD_DIR}/${image_initrd_contents}"
|
||||
"${BUILD_DIR}/${image_initrd_contents_wtd}"
|
||||
"${BUILD_DIR}/${image_disk_usage}"
|
||||
"${BUILD_DIR}/${image_sysext_base}"
|
||||
)
|
||||
|
||||
# append sysext inventories to uploads
|
||||
if [[ -n "${base_sysexts}" ]] ; then
|
||||
local inventory_file="" image_basename="${image_name%.bin}"
|
||||
|
||||
for inventory_file in "${image_contents}" "${image_contents_wtd}" "${image_disk_usage}" "${image_packages}" ; do
|
||||
local suffix="${inventory_file/${image_basename}/}" sysext=""
|
||||
|
||||
for sysext in ${base_sysexts//,/ }; do
|
||||
local name="${sysext%:*}"
|
||||
local sysext_inventory="${root_fs_sysexts_output_dir}/${name}${suffix}"
|
||||
if [[ ! -f "${sysext_inventory}" ]] ; then
|
||||
die "Sysext inventory file '${name}${suffix}' for '${inventory_file}' not found in '${root_fs_sysexts_output_dir}'"
|
||||
fi
|
||||
to_upload+=( "${sysext_inventory}" )
|
||||
done
|
||||
done
|
||||
fi
|
||||
|
||||
local files_to_evaluate=( "${BUILD_DIR}/${image_name}" )
|
||||
declare -a compressed_images
|
||||
declare -a extra_files
|
||||
compress_disk_images files_to_evaluate compressed_images extra_files
|
||||
to_upload+=( "${compressed_images[@]}" )
|
||||
to_upload+=( "${extra_files[@]}" )
|
||||
|
||||
# FIXME(bgilbert): no shim on arm64
|
||||
if [[ -f "${BUILD_DIR}/${image_shim}" ]]; then
|
||||
to_upload+=("${BUILD_DIR}/${image_shim}")
|
||||
fi
|
||||
upload_image -d "${BUILD_DIR}/${image_name}.DIGESTS" "${to_upload[@]}"
|
||||
|
||||
# Upload legacy digests
|
||||
upload_legacy_digests "${BUILD_DIR}/${image_name}.DIGESTS" compressed_images
|
||||
compress_disk_images files_to_evaluate
|
||||
}
|
||||
|
||||
create_prod_tar() {
|
||||
@ -246,13 +198,11 @@ create_prod_tar() {
|
||||
sudo umount "/mnt/${lodevbase}p9"
|
||||
sudo rmdir "/mnt/${lodevbase}p9"
|
||||
sudo losetup --detach "${lodev}"
|
||||
upload_image "${container}"
|
||||
}
|
||||
|
||||
create_prod_sysexts() {
|
||||
local image_name="$1"
|
||||
local image_sysext_base="${image_name%.bin}_sysext.squashfs"
|
||||
local to_upload=()
|
||||
for sysext in "${EXTRA_SYSEXTS[@]}"; do
|
||||
local name="flatcar-${sysext%:*}"
|
||||
local pkgs="${sysext#*:}"
|
||||
@ -273,15 +223,5 @@ create_prod_sysexts() {
|
||||
-private_key "/usr/share/update_engine/update-payload-key.key.pem" \
|
||||
-new_image "${BUILD_DIR}/${name}.raw" \
|
||||
-out_file "${BUILD_DIR}/flatcar_test_update-${name}.gz"
|
||||
to_upload+=(
|
||||
"${BUILD_DIR}/${name}.raw"
|
||||
"${BUILD_DIR}/${name}_contents.txt"
|
||||
"${BUILD_DIR}/${name}_contents_wtd.txt"
|
||||
"${BUILD_DIR}/${name}_disk_usage.txt"
|
||||
"${BUILD_DIR}/${name}_packages.txt"
|
||||
"${BUILD_DIR}/flatcar_test_update-${name}.gz"
|
||||
)
|
||||
done
|
||||
upload_image -d ${BUILD_DIR}/sysexts.DIGESTS "${to_upload[@]}"
|
||||
}
|
||||
|
||||
|
@ -2,36 +2,8 @@
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
GSUTIL_OPTS=
|
||||
UPLOAD_ROOT=
|
||||
UPLOAD_PATH=
|
||||
UPLOAD_DEFAULT=${FLAGS_FALSE}
|
||||
DEFAULT_IMAGE_COMPRESSION_FORMAT="bz2"
|
||||
|
||||
# Default upload root can be overridden from the environment.
|
||||
_user="${USER}"
|
||||
[[ ${USER} == "root" ]] && _user="${SUDO_USER}"
|
||||
: ${FLATCAR_UPLOAD_ROOT:=gs://users.developer.core-os.net/${_user}}
|
||||
unset _user
|
||||
|
||||
DEFINE_boolean parallel ${FLAGS_TRUE} \
|
||||
"Enable parallelism in gsutil."
|
||||
DEFINE_boolean upload ${UPLOAD_DEFAULT} \
|
||||
"Upload all packages/images via gsutil."
|
||||
DEFINE_boolean private ${FLAGS_TRUE} \
|
||||
"Upload the image as a private object."
|
||||
DEFINE_string upload_root "${FLATCAR_UPLOAD_ROOT}" \
|
||||
"Upload prefix, board/version/etc will be appended. Must be a gs:// URL."
|
||||
DEFINE_string upload_path "" \
|
||||
"Full upload path, overrides --upload_root. Must be a full gs:// URL."
|
||||
DEFINE_string download_root "" \
|
||||
"HTTP download prefix, board/version/etc will be appended."
|
||||
DEFINE_string download_path "" \
|
||||
"HTTP download path, overrides --download_root."
|
||||
DEFINE_string sign "" \
|
||||
"Sign all files to be uploaded with the given GPG key."
|
||||
DEFINE_string sign_digests "" \
|
||||
"Sign image DIGESTS files with the given GPG key."
|
||||
DEFINE_string image_compression_formats "${DEFAULT_IMAGE_COMPRESSION_FORMAT}" \
|
||||
"Compress the resulting images using thise formats. This option acceps a list of comma separated values. Options are: none, bz2, gz, zip, zst"
|
||||
DEFINE_boolean only_store_compressed ${FLAGS_TRUE} \
|
||||
@ -89,14 +61,6 @@ compress_disk_images() {
|
||||
# among them.
|
||||
local -n local_files_to_evaluate="$1"
|
||||
|
||||
# An array that will hold the path on disk to the resulting disk image archives.
|
||||
# Multiple compression formats may be requested, so this array may hold
|
||||
# multiple archives for the same image.
|
||||
local -n local_resulting_archives="$2"
|
||||
|
||||
# Files that did not match the filter for disk images.
|
||||
local -n local_extra_files="$3"
|
||||
|
||||
info "Compressing ${#local_files_to_evaluate[@]} images"
|
||||
# We want to compress images, but we also want to remove the uncompressed files
|
||||
# from the list of uploadable files.
|
||||
@ -118,7 +82,6 @@ compress_disk_images() {
|
||||
if [ -z "${processed_format[${format}]}" ]; then
|
||||
info "Compressing ${filename##*/} to ${format}"
|
||||
COMPRESSED_FILENAME=$(compress_file "${filename}" "${format}")
|
||||
local_resulting_archives+=( "$COMPRESSED_FILENAME" )
|
||||
processed_format["${format}"]=1
|
||||
fi
|
||||
done
|
||||
@ -133,244 +96,6 @@ compress_disk_images() {
|
||||
else
|
||||
info "Keeping ${filename}"
|
||||
fi
|
||||
else
|
||||
local_extra_files+=( "${filename}" )
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
upload_legacy_digests() {
|
||||
[[ ${FLAGS_upload} -eq ${FLAGS_TRUE} ]] || return 0
|
||||
|
||||
local local_digest_file="$1"
|
||||
local -n local_compressed_files="$2"
|
||||
|
||||
[[ "${#local_compressed_files[@]}" -gt 0 ]] || return 0
|
||||
|
||||
# Upload legacy digests
|
||||
declare -a digests_to_upload
|
||||
for file in "${local_compressed_files[@]}";do
|
||||
legacy_digest_file="${file}.DIGESTS"
|
||||
cp "${local_digest_file}" "${legacy_digest_file}"
|
||||
digests_to_upload+=( "${legacy_digest_file}" )
|
||||
done
|
||||
local def_upload_path="${UPLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}"
|
||||
upload_files "digests" "${def_upload_path}" "" "${digests_to_upload[@]}"
|
||||
}
|
||||
|
||||
check_gsutil_opts() {
|
||||
[[ ${FLAGS_upload} -eq ${FLAGS_TRUE} ]] || return 0
|
||||
|
||||
if [[ ${FLAGS_parallel} -eq ${FLAGS_TRUE} ]]; then
|
||||
GSUTIL_OPTS="-m"
|
||||
fi
|
||||
|
||||
if [[ -n "${FLAGS_upload_root}" ]]; then
|
||||
if [[ "${FLAGS_upload_root}" != gs://* ]] \
|
||||
&& [[ "${FLAGS_upload_root}" != rsync://* ]] ; then
|
||||
die_notrace "--upload_root must be a gs:// or rsync:// URL"
|
||||
fi
|
||||
# Make sure the path doesn't end with a slash
|
||||
UPLOAD_ROOT="${FLAGS_upload_root%%/}"
|
||||
fi
|
||||
|
||||
if [[ -n "${FLAGS_upload_path}" ]]; then
|
||||
if [[ "${FLAGS_upload_path}" != gs://* ]] \
|
||||
&& [[ "${FLAGS_upload_path}" != rsync://* ]] ; then
|
||||
die_notrace "--upload_path must be a gs:// or rsync:// URL"
|
||||
fi
|
||||
# Make sure the path doesn't end with a slash
|
||||
UPLOAD_PATH="${FLAGS_upload_path%%/}"
|
||||
fi
|
||||
|
||||
# Ensure scripts run via sudo can use the user's gsutil/boto configuration.
|
||||
if [[ -n "${SUDO_USER}" ]]; then
|
||||
: ${BOTO_PATH:="$HOME/.boto:/home/$SUDO_USER/.boto"}
|
||||
export BOTO_PATH
|
||||
fi
|
||||
}
|
||||
|
||||
# Generic upload function
|
||||
# Usage: upload_files "file type" "${UPLOAD_ROOT}/default/path" "" files...
|
||||
# arg1: file type reported via log
|
||||
# arg2: default upload path, overridden by --upload_path
|
||||
# arg3: upload path suffix that can't be overridden, must end in /
|
||||
# argv: remaining args are files or directories to upload
|
||||
upload_files() {
|
||||
[[ ${FLAGS_upload} -eq ${FLAGS_TRUE} ]] || return 0
|
||||
|
||||
local msg="$1"
|
||||
local local_upload_path="$2"
|
||||
local extra_upload_suffix="$3"
|
||||
shift 3
|
||||
|
||||
if [[ -n "${UPLOAD_PATH}" ]]; then
|
||||
local_upload_path="${UPLOAD_PATH}"
|
||||
fi
|
||||
|
||||
if [[ -n "${extra_upload_suffix}" && "${extra_upload_suffix}" != */ ]]
|
||||
then
|
||||
die "upload suffix '${extra_upload_suffix}' doesn't end in /"
|
||||
fi
|
||||
|
||||
info "Uploading ${msg} to ${local_upload_path}"
|
||||
|
||||
if [[ "${local_upload_path}" = 'rsync://'* ]]; then
|
||||
local rsync_upload_path="${local_upload_path#rsync://}"
|
||||
local sshcmd="ssh -o BatchMode=yes "
|
||||
sshcmd="$sshcmd -o StrictHostKeyChecking=no"
|
||||
sshcmd="$sshcmd -o UserKnownHostsFile=/dev/null"
|
||||
sshcmd="$sshcmd -o NumberOfPasswordPrompts=0"
|
||||
|
||||
# ensure the target path exists
|
||||
local sshuserhost="${rsync_upload_path%:*}"
|
||||
local destpath="${rsync_upload_path#*:}"
|
||||
${sshcmd} "${sshuserhost}" \
|
||||
"mkdir -p ${destpath}/${extra_upload_suffix}"
|
||||
|
||||
# now sync
|
||||
rsync -Pav -e "${sshcmd}" "$@" \
|
||||
"${rsync_upload_path}/${extra_upload_suffix}"
|
||||
else
|
||||
gsutil ${GSUTIL_OPTS} cp -R "$@" \
|
||||
"${local_upload_path}/${extra_upload_suffix}"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# Identical to upload_files but GPG signs every file if enabled.
|
||||
# Usage: sign_and_upload_files "file type" "${UPLOAD_ROOT}/default/path" "" files...
|
||||
# arg1: file type reported via log
|
||||
# arg2: default upload path, overridden by --upload_path
|
||||
# arg3: upload path suffix that can't be overridden, must end in /
|
||||
# argv: remaining args are files or directories to upload
|
||||
sign_and_upload_files() {
|
||||
[[ ${FLAGS_upload} -eq ${FLAGS_TRUE} ]] || return 0
|
||||
|
||||
local msg="$1"
|
||||
local path="$2"
|
||||
local suffix="$3"
|
||||
shift 3
|
||||
|
||||
# run a subshell to possibly clean the temporary directory with
|
||||
# signatures without clobbering the global EXIT trap
|
||||
(
|
||||
# Create simple GPG detached signature for all uploads.
|
||||
local sigs=()
|
||||
if [[ -n "${FLAGS_sign}" ]]; then
|
||||
local file
|
||||
local sigfile
|
||||
local sigdir=$(mktemp --directory)
|
||||
trap "rm -rf ${sigdir}" EXIT
|
||||
for file in "$@"; do
|
||||
if [[ "${file}" =~ \.(asc|gpg|sig)$ ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
for sigfile in $(find "${file}" ! -type d); do
|
||||
mkdir -p "${sigdir}/${sigfile%/*}"
|
||||
gpg --batch --local-user "${FLAGS_sign}" \
|
||||
--output "${sigdir}/${sigfile}.sig" \
|
||||
--detach-sign "${sigfile}" || die "gpg failed"
|
||||
done
|
||||
|
||||
[ -d "${file}" ] &&
|
||||
sigs+=( "${sigdir}/${file}" ) ||
|
||||
sigs+=( "${sigdir}/${file}.sig" )
|
||||
done
|
||||
fi
|
||||
|
||||
upload_files "${msg}" "${path}" "${suffix}" "$@" "${sigs[@]}"
|
||||
)
|
||||
}
|
||||
|
||||
upload_packages() {
|
||||
[[ ${FLAGS_upload} -eq ${FLAGS_TRUE} ]] || return 0
|
||||
[[ -n "${BOARD}" ]] || die "board_options.sh must be sourced first"
|
||||
|
||||
local board_packages="${1:-"${BOARD_ROOT}/packages"}"
|
||||
local def_upload_path="${UPLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}"
|
||||
sign_and_upload_files packages ${def_upload_path} "pkgs/" \
|
||||
"${board_packages}"/*
|
||||
}
|
||||
|
||||
# Upload a set of files (usually images) and digest, optionally w/ gpg sig
|
||||
# If more than one file is specified -d must be the first argument
|
||||
# Usage: upload_image [-d file.DIGESTS] file1 [file2...]
|
||||
upload_image() {
|
||||
[[ ${FLAGS_upload} -eq ${FLAGS_TRUE} ]] || return 0
|
||||
[[ -n "${BOARD}" ]] || die "board_options.sh must be sourced first"
|
||||
|
||||
# The name to use for .DIGESTS and .DIGESTS.asc must be explicit if
|
||||
# there is more than one file to upload to avoid potential confusion.
|
||||
local digests
|
||||
if [[ "$1" == "-d" ]]; then
|
||||
[[ -n "$2" ]] || die "-d requires an argument"
|
||||
digests="$2"
|
||||
shift 2
|
||||
else
|
||||
[[ $# -eq 1 ]] || die "-d is required for multi-file uploads"
|
||||
# digests is assigned after image is possibly compressed/renamed
|
||||
fi
|
||||
|
||||
local uploads=()
|
||||
local filename
|
||||
for filename in "$@"; do
|
||||
if [[ ! -f "${filename}" ]]; then
|
||||
die "File '${filename}' does not exist!"
|
||||
fi
|
||||
uploads+=( "${filename}" )
|
||||
done
|
||||
|
||||
if [[ -z "${digests}" ]]; then
|
||||
digests="${uploads[0]}.DIGESTS"
|
||||
fi
|
||||
|
||||
# For consistency generate a .DIGESTS file similar to the one catalyst
|
||||
# produces for the SDK tarballs and up upload it too.
|
||||
make_digests -d "${digests}" "${uploads[@]}"
|
||||
uploads+=( "${digests}" )
|
||||
|
||||
# Create signature as ...DIGESTS.asc as Gentoo does.
|
||||
if [[ -n "${FLAGS_sign_digests}" ]]; then
|
||||
rm -f "${digests}.asc"
|
||||
gpg --batch --local-user "${FLAGS_sign_digests}" \
|
||||
--clearsign "${digests}" || die "gpg failed"
|
||||
uploads+=( "${digests}.asc" )
|
||||
fi
|
||||
|
||||
local log_msg=$(basename "$digests" .DIGESTS)
|
||||
local def_upload_path="${UPLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}"
|
||||
sign_and_upload_files "${log_msg}" "${def_upload_path}" "" "${uploads[@]}"
|
||||
}
|
||||
|
||||
# Translate the configured upload URL to a download URL
|
||||
# Usage: download_image_url "path/suffix"
|
||||
download_image_url() {
|
||||
if [[ ${FLAGS_upload} -ne ${FLAGS_TRUE} ]]; then
|
||||
echo "$1"
|
||||
return 0
|
||||
fi
|
||||
|
||||
local download_root="${FLAGS_download_root:-${UPLOAD_ROOT}}"
|
||||
|
||||
local download_path
|
||||
local download_channel
|
||||
if [[ -n "${FLAGS_download_path}" ]]; then
|
||||
download_path="${FLAGS_download_path%%/}"
|
||||
elif [[ "${download_root}" == *flatcar-jenkins* ]]; then
|
||||
download_channel="${download_root##*/}"
|
||||
download_root="gs://${download_channel}.release.flatcar-linux.net"
|
||||
# Official release download paths don't include the boards directory
|
||||
download_path="${download_root%%/}/${BOARD}/${FLATCAR_VERSION}"
|
||||
else
|
||||
download_path="${download_root%%/}/boards/${BOARD}/${FLATCAR_VERSION}"
|
||||
fi
|
||||
|
||||
# Just in case download_root was set from UPLOAD_ROOT
|
||||
if [[ "${download_path}" == gs://* ]]; then
|
||||
download_path="https://${download_path#gs://}"
|
||||
fi
|
||||
|
||||
echo "${download_path}/$1"
|
||||
}
|
||||
|
@ -1224,7 +1224,7 @@ EOF
|
||||
"version": "${FLATCAR_VERSION_ID}",
|
||||
"providers": [{
|
||||
"name": "${provider}",
|
||||
"url": "$(download_image_url "$(_dst_name ".box")")",
|
||||
"url": "https://${BUILDCACHE_SERVER:-bincache.flatcar-linux.net}/images/${BOARD%-usr}/${FLATCAR_VERSION}/$(_dst_name ".box")",
|
||||
"checksum_type": "sha256",
|
||||
"checksum": "$(sha256sum "${box}" | awk '{print $1}')"
|
||||
}]
|
||||
@ -1271,53 +1271,6 @@ vm_cleanup() {
|
||||
sudo rm -rf "${VM_TMP_DIR}"
|
||||
}
|
||||
|
||||
vm_upload() {
|
||||
|
||||
declare -a legacy_uploads
|
||||
declare -a uploadable_files
|
||||
declare -a compressed_images
|
||||
declare -a image_files
|
||||
declare -a digest_uploads
|
||||
|
||||
compress_disk_images VM_GENERATED_FILES compressed_images uploadable_files
|
||||
|
||||
if [ "${#compressed_images[@]}" -gt 0 ]; then
|
||||
uploadable_files+=( "${compressed_images[@]}" )
|
||||
legacy_uploads+=( "${compressed_images[@]}" )
|
||||
fi
|
||||
|
||||
local digests="$(_dst_dir)/$(_dst_name .DIGESTS)"
|
||||
upload_image -d "${digests}" "${uploadable_files[@]}"
|
||||
|
||||
[[ -e "${digests}" ]] || return 0
|
||||
|
||||
# Since depending on the ordering of $VM_GENERATED_FILES is brittle only
|
||||
# use it if $VM_DST_IMG isn't included in the uploaded files.
|
||||
if [ "${#legacy_uploads[@]}" -eq 0 ];then
|
||||
legacy_uploads+=( "${VM_GENERATED_FILES[0]}" )
|
||||
fi
|
||||
|
||||
for legacy_upload in "${legacy_uploads[@]}";do
|
||||
local legacy_digest_file="${legacy_upload}.DIGESTS"
|
||||
[[ "${legacy_digest_file}" == "${digests}" ]] && continue
|
||||
|
||||
cp "${digests}" "${legacy_digest_file}"
|
||||
digest_uploads+=( "${legacy_digest_file}" )
|
||||
|
||||
if [[ -e "${digests}.asc" ]]; then
|
||||
digest_uploads+=( "${legacy_digest_file}.asc" )
|
||||
cp "${digests}.asc" "${legacy_digest_file}.asc"
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "${#digest_uploads[@]}" -gt 0 ];then
|
||||
legacy_uploads+=( "${digest_uploads[@]}" )
|
||||
fi
|
||||
|
||||
local def_upload_path="${UPLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}"
|
||||
upload_files "$(_dst_name)" "${def_upload_path}" "" "${legacy_uploads[@]}"
|
||||
}
|
||||
|
||||
print_readme() {
|
||||
local filename
|
||||
info "Files written to $(relpath "$(dirname "${VM_DST_IMG}")")"
|
||||
|
@ -86,8 +86,6 @@ if [[ "${FLAGS_usepkgonly}" -eq "${FLAGS_TRUE}" ]]; then
|
||||
FLAGS_workon="${FLAGS_FALSE}"
|
||||
fi
|
||||
|
||||
check_gsutil_opts
|
||||
|
||||
# Before we can run any tools, we need to update chroot or setup_board.
|
||||
UPDATE_ARGS=( --regen_configs )
|
||||
if [ "${FLAGS_usepkg}" -eq "${FLAGS_TRUE}" ]; then
|
||||
@ -333,8 +331,5 @@ eclean-$BOARD -d packages
|
||||
info "Checking build root"
|
||||
test_image_content "${BOARD_ROOT}"
|
||||
|
||||
# upload packages if enabled
|
||||
upload_packages
|
||||
|
||||
info "Builds complete"
|
||||
command_completed
|
||||
|
@ -45,7 +45,6 @@ create_provenance_overlay() {
|
||||
}
|
||||
|
||||
catalyst_init "$@"
|
||||
check_gsutil_opts
|
||||
|
||||
ROOT_OVERLAY="${TEMPDIR}/stage4-${ARCH}-$FLAGS_version-overlay"
|
||||
|
||||
@ -55,13 +54,4 @@ cp "${BUILD_LIBRARY_DIR}/toolchain_util.sh" "${ROOT_OVERLAY}/tmp"
|
||||
create_provenance_overlay "${ROOT_OVERLAY}"
|
||||
|
||||
catalyst_build
|
||||
|
||||
# TODO: Actually just TOOLCHAIN_PKGS and the exact dependencies should be uploaded
|
||||
for board in $(get_board_list); do
|
||||
board_packages="${BINPKGS}/target/${board}"
|
||||
def_upload_path="${UPLOAD_ROOT}/boards/${board}/${FLAGS_version}"
|
||||
sign_and_upload_files "board toolchain packages" "${def_upload_path}" \
|
||||
"toolchain/" "${board_packages}"/*
|
||||
done
|
||||
|
||||
command_completed
|
||||
|
53
core_date
53
core_date
@ -1,53 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright (c) 2016 The CoreOS Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
|
||||
. "${SCRIPT_ROOT}/common.sh" || exit 1
|
||||
|
||||
USAGE="USAGE: $0 [-v] [date flags]
|
||||
|
||||
This script calculates the date given the CoreOS major version or calculates
|
||||
the version given a date.
|
||||
|
||||
Examples:
|
||||
|
||||
$0 -v 1000
|
||||
Sun Mar 27 00:00:00 UTC 2016
|
||||
|
||||
$0 -v 1000 --iso-8601
|
||||
2016-03-27
|
||||
|
||||
$0 --date 'Jun 1, 2016'
|
||||
1066
|
||||
"
|
||||
|
||||
case "$1" in
|
||||
"-h")
|
||||
echo "$USAGE"
|
||||
;;
|
||||
|
||||
"-v")
|
||||
shift
|
||||
if [[ $# -ge 1 ]] && [[ "$1" != [-+]* ]]; then
|
||||
v="$1"
|
||||
shift
|
||||
else
|
||||
v="${FLATCAR_VERSION}"
|
||||
fi
|
||||
|
||||
# strip of a v prefix or .0.0 suffix
|
||||
v="${v#v}"
|
||||
v="${v%%.*}"
|
||||
|
||||
export TZ=${TZ:=UTC}
|
||||
date -d @$(( (v * 86400) + COREOS_EPOCH )) "$@"
|
||||
;;
|
||||
|
||||
*)
|
||||
t=$(date +%s "$@")
|
||||
echo $(( (t - COREOS_EPOCH) / 86400 ))
|
||||
;;
|
||||
esac
|
@ -1,71 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
|
||||
. "${SCRIPT_ROOT}/common.sh" || exit 1
|
||||
|
||||
assert_inside_chroot
|
||||
|
||||
DEFINE_string data_dir "" "Directory containing downloaded release artifacts"
|
||||
DEFINE_string board "" "Board to sign artifacts for"
|
||||
DEFINE_string version "" "Version to sign artifacts for"
|
||||
DEFINE_integer n_signatures "2" "Number of signatures this release will be signed with"
|
||||
DEFINE_string output_dir "" "Output directory"
|
||||
DEFINE_string gpg_key "" "Value for '--default-key' argument to gpg --sign"
|
||||
|
||||
FLAGS "$@" || exit 1
|
||||
eval set -- "${FLAGS_ARGV}"
|
||||
|
||||
set -e
|
||||
|
||||
data_dir="${FLAGS_data_dir}/${FLAGS_board}/${FLAGS_version}"
|
||||
output_dir="${FLAGS_output_dir}/${FLAGS_board}/${FLAGS_version}"
|
||||
mkdir -p "$output_dir"
|
||||
|
||||
cleanup() {
|
||||
# core_sign_update expects to unpack this too, so we'll clean it up.
|
||||
rm -f "${data_dir}/coreos_production_update.bin"
|
||||
|
||||
rm -f "${data_dir}/update"
|
||||
rm -f "${data_dir}/update.hash"
|
||||
}
|
||||
|
||||
trap cleanup INT TERM EXIT
|
||||
|
||||
# delta_generator expects a list of colon-separated sizes for signature hash algorithms in order to
|
||||
# build the update payload protobuf properly. Since we already assume sha256 elsewhere in
|
||||
# core_sign_update, do it here as well.
|
||||
signature_sizes=""
|
||||
for i in $(seq 1 $FLAGS_n_signatures); do
|
||||
signature_sizes="${signature_sizes}:256"
|
||||
done
|
||||
signature_sizes="${signature_sizes:1:${#signature_sizes}}"
|
||||
|
||||
echo "=== Verifying update payload... ==="
|
||||
gpg2 --verify "${data_dir}/coreos_production_update.bin.bz2.sig"
|
||||
gpg2 --verify "${data_dir}/coreos_production_image.vmlinuz.sig"
|
||||
gpg2 --verify "${data_dir}/coreos_production_update.zip.sig"
|
||||
echo "=== Decompressing update payload... ==="
|
||||
bunzip2 --keep "${data_dir}/coreos_production_update.bin.bz2"
|
||||
|
||||
echo "=== Creating signable update payload... ==="
|
||||
delta_generator \
|
||||
-new_image "${data_dir}/coreos_production_update.bin" \
|
||||
-new_kernel "${data_dir}/coreos_production_image.vmlinuz" \
|
||||
-out_file "${data_dir}/update"
|
||||
delta_generator \
|
||||
--signature_size ${signature_sizes} \
|
||||
--in_file "${data_dir}/update" \
|
||||
--out_hash_file "${data_dir}/update.hash"
|
||||
|
||||
echo "=== Signing update payload... ==="
|
||||
if [[ -z "${FLAGS_gpg_key}" ]]; then
|
||||
gpg2 \
|
||||
--output "${output_dir}/update.sig.$(whoami)" \
|
||||
--armor --detach-sign "${data_dir}/update.hash"
|
||||
else
|
||||
gpg2 \
|
||||
--local-user "$FLAGS_gpg_key" \
|
||||
--output "${output_dir}/update.sig.$(whoami)" \
|
||||
--armor --detach-sign "${data_dir}/update.hash"
|
||||
fi
|
||||
echo "=== Update payload signed successfully. ==="
|
@ -1,41 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright (c) 2014 The CoreOS Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
|
||||
# We have to simple-mindedly set GCLIENT_ROOT in case we're running from
|
||||
# au-generator.zip because common.sh will fail while auto-detect it.
|
||||
export GCLIENT_ROOT=$(readlink -f "${SCRIPT_ROOT}/../../")
|
||||
. "${SCRIPT_ROOT}/common.sh" || exit 1
|
||||
|
||||
DEFINE_string board "amd64-usr" \
|
||||
"Board type of the image"
|
||||
DEFINE_string version "${FLATCAR_VERSION}" \
|
||||
"Version number to promote."
|
||||
|
||||
DEFINE_string build_storage "gs://builds.release.core-os.net" \
|
||||
"GS bucket with official build artifacts."
|
||||
DEFINE_string release_storage "gs://alpha.release.core-os.net" \
|
||||
"GS bucket for release downloads."
|
||||
DEFINE_string legacy_storage "gs://storage.core-os.net/coreos" \
|
||||
"Legacy 'storage' GS bucket."
|
||||
|
||||
# Parse flags
|
||||
FLAGS "$@" || exit 1
|
||||
eval set -- "${FLAGS_ARGV}"
|
||||
switch_to_strict_mode
|
||||
|
||||
# Ensure GS URL doesn't have a trailing /
|
||||
FLAGS_build_storage="${FLAGS_build_storage%%/}"
|
||||
FLAGS_release_storage="${FLAGS_release_storage%%/}"
|
||||
FLAGS_legacy_storage="${FLAGS_legacy_storage%%/}"
|
||||
|
||||
# Full GS URLs
|
||||
gs_build="${FLAGS_build_storage}/alpha/boards/${FLAGS_board}/${FLAGS_version}"
|
||||
gs_release="${FLAGS_release_storage}/${FLAGS_board}/${FLAGS_version}"
|
||||
gs_legacy="${FLAGS_legacy_storage}/${FLAGS_board}/${FLAGS_version}"
|
||||
|
||||
gsutil -m cp "${gs_build}/*" "${gs_release}/"
|
||||
gsutil -m cp "${gs_release}/*" "${gs_legacy}/"
|
@ -1,64 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2013 The CoreOS Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
|
||||
# We have to simple-mindedly set GCLIENT_ROOT in case we're running from
|
||||
# au-generator.zip because common.sh will fail while auto-detect it.
|
||||
export GCLIENT_ROOT=$(readlink -f "${SCRIPT_ROOT}/../../")
|
||||
. "${SCRIPT_ROOT}/common.sh" || exit 1
|
||||
|
||||
DEFINE_string board "amd64-usr" \
|
||||
"Board type of the image"
|
||||
DEFINE_string payload "coreos_production_update.gz" \
|
||||
"Path to the update payload"
|
||||
DEFINE_string version "${FLATCAR_VERSION}" \
|
||||
"Version number of this build."
|
||||
DEFINE_string app_id "e96281a6-d1af-4bde-9a0a-97b76e56dc57" \
|
||||
"CoreOS AppId in roller."
|
||||
|
||||
DEFINE_string user "" \
|
||||
"User for roller."
|
||||
DEFINE_string api_key "" \
|
||||
"API key for roller."
|
||||
DEFINE_string endpoint "https://public.update.core-os.net" \
|
||||
"Roller endpoint to update."
|
||||
DEFINE_string storage "gs://coreos-update" \
|
||||
"Google Storage location to host the payload."
|
||||
DEFINE_string download "https://update.release.core-os.net" \
|
||||
"HTTPS location users will download payload from."
|
||||
|
||||
FLAGS_HELPS="usage: $SCRIPTNAME [flags]
|
||||
|
||||
Setting everything up for use\n
|
||||
|
||||
1) Run 'gsutil config' and use project id coreos-net-storage\n
|
||||
2) Ensure updateservicectl is installed in your path\n
|
||||
"
|
||||
|
||||
# Parse flags
|
||||
FLAGS "$@" || exit 1
|
||||
eval set -- "${FLAGS_ARGV}"
|
||||
switch_to_strict_mode
|
||||
|
||||
[[ -z "${FLAGS_api_key}" ]] && die "--api_key is required"
|
||||
[[ -z "${FLAGS_user}" ]] && die "--user is required"
|
||||
|
||||
# Ensure GS URL doesn't have a trailing /
|
||||
FLAGS_storage="${FLAGS_storage%%/}"
|
||||
|
||||
GS_URL="${FLAGS_storage}/${FLAGS_board}/${FLAGS_version}/update.gz"
|
||||
HTTP_URL="${FLAGS_download}/${FLAGS_board}/${FLAGS_version}/update.gz"
|
||||
gsutil cp "${FLAGS_payload}" "${GS_URL}"
|
||||
|
||||
updateservicectl \
|
||||
--server="${FLAGS_endpoint}" \
|
||||
--user="${FLAGS_user}" \
|
||||
--key="${FLAGS_api_key}" \
|
||||
package create \
|
||||
--app-id="${FLAGS_app_id}" \
|
||||
--file="${FLAGS_payload}" \
|
||||
--url="${HTTP_URL}" \
|
||||
--version="${FLAGS_version}"
|
@ -50,8 +50,6 @@ eval set -- "${FLAGS_ARGV}"
|
||||
# Die on any errors.
|
||||
switch_to_strict_mode
|
||||
|
||||
check_gsutil_opts
|
||||
|
||||
if [[ -z "${FLAGS_kernel_path}" && -z "${FLAGS_efi_grub_path}" &&
|
||||
-z "${FLAGS_shim_path}" ]]; then
|
||||
die_notrace "Specify at least one of --kernel_path, --efi_grub_path, --shim_path"
|
||||
|
@ -42,8 +42,6 @@ eval set -- "${FLAGS_ARGV}"
|
||||
# Die on any errors.
|
||||
switch_to_strict_mode
|
||||
|
||||
check_gsutil_opts
|
||||
|
||||
if [[ -z "${FLAGS_board}" ]] ; then
|
||||
die_notrace "--board is required."
|
||||
fi
|
||||
|
@ -55,8 +55,6 @@ eval set -- "${FLAGS_ARGV}"
|
||||
# Die on any errors.
|
||||
switch_to_strict_mode
|
||||
|
||||
check_gsutil_opts
|
||||
|
||||
if [[ -z "${FLAGS_format}" ]]; then
|
||||
FLAGS_format="$(get_default_vm_type ${FLAGS_board})"
|
||||
fi
|
||||
@ -128,8 +126,8 @@ write_vm_bundle
|
||||
vm_cleanup
|
||||
trap - EXIT
|
||||
|
||||
# Optionally upload all of our hard work
|
||||
vm_upload
|
||||
declare -a compressed_images uploadable_files
|
||||
compress_disk_images VM_GENERATED_FILES
|
||||
|
||||
# Ready to set sail!
|
||||
okboat
|
||||
|
@ -1 +0,0 @@
|
||||
This folder is unused.
|
@ -1,34 +0,0 @@
|
||||
ami
|
||||
ami_vmdk
|
||||
azure
|
||||
azure_gen2
|
||||
gce
|
||||
iso
|
||||
pxe
|
||||
qemu
|
||||
qemu_uefi
|
||||
brightbox
|
||||
cloudsigma
|
||||
cloudstack
|
||||
cloudstack_vhd
|
||||
digitalocean
|
||||
exoscale
|
||||
hyperv
|
||||
niftycloud
|
||||
openstack
|
||||
openstack_mini
|
||||
packet
|
||||
parallels
|
||||
rackspace
|
||||
rackspace_onmetal
|
||||
rackspace_vhd
|
||||
vagrant
|
||||
vagrant_parallels
|
||||
vagrant_virtualbox
|
||||
vagrant_vmware_fusion
|
||||
virtualbox
|
||||
vmware
|
||||
vmware_insecure
|
||||
vmware_ova
|
||||
vmware_raw
|
||||
xen
|
@ -1,7 +0,0 @@
|
||||
ami_vmdk
|
||||
azure_gen2
|
||||
openstack
|
||||
openstack_mini
|
||||
packet
|
||||
pxe
|
||||
qemu_uefi
|
@ -1,169 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# The build may not be started without a tag value.
|
||||
[ -n "${MANIFEST_TAG}" ]
|
||||
|
||||
# Set up GPG for verifying tags.
|
||||
export GNUPGHOME="${PWD}/.gnupg"
|
||||
rm -rf "${GNUPGHOME}"
|
||||
trap 'rm -rf "${GNUPGHOME}"' EXIT
|
||||
mkdir --mode=0700 "${GNUPGHOME}"
|
||||
gpg --import verify.asc
|
||||
# Sometimes this directory is not created automatically making further private
|
||||
# key imports fail, let's create it here as a workaround
|
||||
mkdir -p --mode=0700 "${GNUPGHOME}/private-keys-v1.d/"
|
||||
|
||||
# since /flatcar-jenkins/developer/sdk starts with a / we only use one
|
||||
DOWNLOAD_ROOT_SDK="gs:/${SDK_URL_PATH}"
|
||||
|
||||
SCRIPTS_PATCH_ARG=""
|
||||
OVERLAY_PATCH_ARG=""
|
||||
PORTAGE_PATCH_ARG=""
|
||||
if [ "$(cat scripts.patch | wc -l)" != 0 ]; then
|
||||
SCRIPTS_PATCH_ARG="--scripts-patch scripts.patch"
|
||||
fi
|
||||
if [ "$(cat overlay.patch | wc -l)" != 0 ]; then
|
||||
OVERLAY_PATCH_ARG="--overlay-patch overlay.patch"
|
||||
fi
|
||||
if [ "$(cat portage.patch | wc -l)" != 0 ]; then
|
||||
PORTAGE_PATCH_ARG="--portage-patch portage.patch"
|
||||
fi
|
||||
|
||||
bin/cork create \
|
||||
--verify --verify-signature --replace \
|
||||
--sdk-url-path "${SDK_URL_PATH}" \
|
||||
--json-key "${GS_DEVEL_CREDS}" \
|
||||
${SCRIPTS_PATCH_ARG} ${OVERLAY_PATCH_ARG} ${PORTAGE_PATCH_ARG} \
|
||||
--manifest-branch "refs/tags/${MANIFEST_TAG}" \
|
||||
--manifest-name "${MANIFEST_NAME}" \
|
||||
--manifest-url "${MANIFEST_URL}" \
|
||||
--sdk-url=storage.googleapis.com
|
||||
|
||||
# Clear out old images.
|
||||
sudo rm -rf chroot/build src/build torcx
|
||||
|
||||
enter() {
|
||||
local verify_key=
|
||||
# Run in a subshell to clean some gangue files on exit without
|
||||
# possibly clobbering the global EXIT trap.
|
||||
(
|
||||
trap 'sudo rm -f chroot/etc/portage/gangue.*' EXIT
|
||||
[ -s verify.asc ] &&
|
||||
sudo ln -f verify.asc chroot/etc/portage/gangue.asc &&
|
||||
verify_key=--verify-key=/etc/portage/gangue.asc
|
||||
sudo ln -f "${GS_DEVEL_CREDS}" chroot/etc/portage/gangue.json
|
||||
bin/cork enter --bind-gpg-agent=false -- env \
|
||||
FLATCAR_DEV_BUILDS="${DOWNLOAD_ROOT}" \
|
||||
FLATCAR_DEV_BUILDS_SDK="${DOWNLOAD_ROOT_SDK}" \
|
||||
{FETCH,RESUME}COMMAND_GS="/mnt/host/source/bin/gangue get \
|
||||
--json-key=/etc/portage/gangue.json $verify_key \
|
||||
"'"${URI}" "${DISTDIR}/${FILE}"' \
|
||||
"$@"
|
||||
)
|
||||
}
|
||||
|
||||
script() {
|
||||
enter "/mnt/host/source/src/scripts/$@"
|
||||
}
|
||||
|
||||
source .repo/manifests/version.txt
|
||||
export FLATCAR_BUILD_ID
|
||||
|
||||
# Set up GPG for signing uploads.
|
||||
gpg --import "${GPG_SECRET_KEY_FILE}"
|
||||
|
||||
script update_chroot \
|
||||
--toolchain_boards="${BOARD}" --dev_builds_sdk="${DOWNLOAD_ROOT_SDK}"
|
||||
|
||||
script setup_board \
|
||||
--board="${BOARD}" \
|
||||
--getbinpkgver="${FLATCAR_VERSION}" \
|
||||
--regen_configs_only
|
||||
|
||||
if [ "x${COREOS_OFFICIAL}" == x1 ]
|
||||
then
|
||||
script set_official --board="${BOARD}" --official
|
||||
else
|
||||
script set_official --board="${BOARD}" --noofficial
|
||||
fi
|
||||
|
||||
# Retrieve this version's torcx manifest
|
||||
mkdir -p torcx/pkgs
|
||||
enter gsutil cp -r \
|
||||
"${DOWNLOAD_ROOT}/torcx/manifests/${BOARD}/${FLATCAR_VERSION}/torcx_manifest.json"{,.sig} \
|
||||
/mnt/host/source/torcx/
|
||||
gpg --verify torcx/torcx_manifest.json.sig
|
||||
|
||||
BASH_SYNTAX_ERROR_WORKAROUND=$(mktemp)
|
||||
exec {keep_open}<>"${BASH_SYNTAX_ERROR_WORKAROUND}"
|
||||
rm "${BASH_SYNTAX_ERROR_WORKAROUND}"
|
||||
jq -r '.value.packages[] | . as $p | .name as $n | $p.versions[] | [.casDigest, .hash] | join(" ") | [$n, .] | join(" ")' "torcx/torcx_manifest.json" > "/proc/$$/fd/${keep_open}"
|
||||
# Download all cas references from the manifest and verify their checksums
|
||||
# TODO: technically we can skip ones that don't have a 'path' since they're not
|
||||
# included in the image.
|
||||
while read name digest hash
|
||||
do
|
||||
mkdir -p "torcx/pkgs/${BOARD}/${name}/${digest}"
|
||||
enter gsutil cp -r "${TORCX_PKG_DOWNLOAD_ROOT}/pkgs/${BOARD}/${name}/${digest}" \
|
||||
"/mnt/host/source/torcx/pkgs/${BOARD}/${name}/"
|
||||
downloaded_hash=$(sha512sum "torcx/pkgs/${BOARD}/${name}/${digest}/"*.torcx.tgz | awk '{print $1}')
|
||||
if [[ "sha512-${downloaded_hash}" != "${hash}" ]]
|
||||
then
|
||||
echo "Torcx package had wrong hash: ${downloaded_hash} instead of ${hash}"
|
||||
exit 1
|
||||
fi
|
||||
done < "/proc/$$/fd/${keep_open}"
|
||||
# This was "done < <(jq ...)" but it suddenly gave a syntax error with bash 4 when run with systemd-run-wrap.sh
|
||||
|
||||
script build_image \
|
||||
--board="${BOARD}" \
|
||||
--group="${GROUP}" \
|
||||
--getbinpkg \
|
||||
--getbinpkgver="${FLATCAR_VERSION}" \
|
||||
--sign="${SIGNING_USER}" \
|
||||
--sign_digests="${SIGNING_USER}" \
|
||||
--torcx_manifest=/mnt/host/source/torcx/torcx_manifest.json \
|
||||
--torcx_root=/mnt/host/source/torcx/ \
|
||||
--upload_root="${UPLOAD_ROOT}" \
|
||||
--upload prodtar container
|
||||
|
||||
set +x
|
||||
# Don't fail the whole job
|
||||
set +e
|
||||
echo "==================================================================="
|
||||
echo
|
||||
export BOARD_A="${BOARD}"
|
||||
export BOARD_B="${BOARD}"
|
||||
if [ "${GROUP}" != "developer" ]; then
|
||||
export CHANNEL_A="${GROUP}"
|
||||
else
|
||||
export CHANNEL_A="${CHANNEL_BASE}"
|
||||
fi
|
||||
|
||||
export VERSION_A=$(curl -s -S -f -L "https://${CHANNEL_A}.release.flatcar-linux.net/${BOARD}/current/version.txt" | grep -m 1 "FLATCAR_VERSION=" | cut -d "=" -f 2)
|
||||
|
||||
if [ "${GROUP}" = "developer" ]; then
|
||||
export CHANNEL_B="developer"
|
||||
export MODE_B="/developer/"
|
||||
else
|
||||
export CHANNEL_B="${GROUP}"
|
||||
fi
|
||||
echo "Image differences compared to ${CHANNEL_A} ${VERSION_A}:"
|
||||
rm -f package-diff
|
||||
curl -fsSLO --retry-delay 1 --retry 60 --retry-connrefused --retry-max-time 60 --connect-timeout 20 "https://raw.githubusercontent.com/flatcar-linux/flatcar-build-scripts/master/package-diff"
|
||||
chmod +x package-diff
|
||||
echo "Package updates, compared to ${CHANNEL_A} ${VERSION_A}:"
|
||||
FILE=flatcar_production_image_packages.txt ./package-diff "${VERSION_A}" "${FLATCAR_VERSION}"
|
||||
echo
|
||||
echo "Image file changes, compared to ${CHANNEL_A} ${VERSION_A}:"
|
||||
FILE=flatcar_production_image_contents.txt FILESONLY=1 CUTKERNEL=1 ./package-diff "${VERSION_A}" "${FLATCAR_VERSION}"
|
||||
echo
|
||||
echo "Image kernel config changes, compared to ${CHANNEL_A} ${VERSION_A}:"
|
||||
FILE=flatcar_production_image_kernel_config.txt ./package-diff "${VERSION_A}" "${FLATCAR_VERSION}"
|
||||
echo
|
||||
echo "Image file size change (includes /boot, /usr and the default rootfs partitions), compared to ${CHANNEL_A} ${VERSION_A}:"
|
||||
FILE=flatcar_production_image_contents.txt CALCSIZE=1 ./package-diff "${VERSION_A}" "${FLATCAR_VERSION}"
|
||||
echo
|
||||
BASE_PATH="https://bucket.release.flatcar-linux.net/$(echo $UPLOAD_ROOT | sed 's|gs://||g')/boards/${BOARD}/${FLATCAR_VERSION}"
|
||||
echo "Image URL: ${BASE_PATH}/flatcar_production_image.bin.bz2"
|
@ -1,4 +0,0 @@
|
||||
FROM debian:11
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y qemu-system-aarch64 qemu-efi-aarch64 lbzip2 sudo dnsmasq gnupg2 git curl iptables
|
@ -1,93 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
rm -rf *.tap _kola_temp*
|
||||
|
||||
NAME="jenkins-${JOB_NAME##*/}-${BUILD_NUMBER}"
|
||||
|
||||
if [[ "${AWS_INSTANCE_TYPE}" != "" ]]; then
|
||||
instance_type="${AWS_INSTANCE_TYPE}"
|
||||
elif [[ "${BOARD}" == "arm64-usr" ]]; then
|
||||
instance_type="a1.large"
|
||||
elif [[ "${BOARD}" == "amd64-usr" ]]; then
|
||||
instance_type="t3.small"
|
||||
fi
|
||||
|
||||
# If the OFFER is empty, it should be treated as the basic offering.
|
||||
if [[ "${OFFER}" == "" ]]; then
|
||||
OFFER="basic"
|
||||
fi
|
||||
|
||||
# Append the offer as oem suffix.
|
||||
if [[ "${OFFER}" != "basic" ]]; then
|
||||
OEM_SUFFIX="_${OFFER}"
|
||||
fi
|
||||
|
||||
if [[ "${KOLA_TESTS}" == "" ]]; then
|
||||
KOLA_TESTS="*"
|
||||
fi
|
||||
|
||||
if [[ "${AWS_AMI_ID}" == "" ]]; then
|
||||
[ -s verify.asc ] && verify_key=--verify-key=verify.asc || verify_key=
|
||||
mkdir -p tmp
|
||||
bin/cork download-image \
|
||||
--cache-dir=tmp \
|
||||
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--platform="aws${OEM_SUFFIX}" \
|
||||
--root="${DOWNLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}" \
|
||||
--sanity-check=false --verify=true $verify_key
|
||||
bunzip2 "tmp/flatcar_production_ami_vmdk${OEM_SUFFIX}_image.vmdk.bz2"
|
||||
BUCKET="flatcar-kola-ami-import-${AWS_REGION}"
|
||||
trap 'bin/ore -d aws delete --region="${AWS_REGION}" --name="${NAME}" --ami-name="${NAME}" --file="tmp/flatcar_production_ami_vmdk${OEM_SUFFIX}_image.vmdk" --bucket "s3://${BUCKET}/${BOARD}/"; rm -r tmp/' EXIT
|
||||
bin/ore aws initialize --region="${AWS_REGION}" --bucket "${BUCKET}"
|
||||
AWS_AMI_ID=$(bin/ore aws upload --force --region="${AWS_REGION}" --name=${NAME} --ami-name="${NAME}" --ami-description="Flatcar Test ${NAME}" --file="tmp/flatcar_production_ami_vmdk${OEM_SUFFIX}_image.vmdk" --bucket "s3://${BUCKET}/${BOARD}/" | jq -r .HVM)
|
||||
echo "Created new AMI ${AWS_AMI_ID} (will be removed after testing)"
|
||||
fi
|
||||
|
||||
# Run the cl.internet test on multiple machine types only if it should run in general
|
||||
cl_internet_included="$(set -o noglob; bin/kola list --platform=aws --filter ${KOLA_TESTS} | { grep cl.internet || true ; } )"
|
||||
if [[ "${BOARD}" == "amd64-usr" ]] && [[ "${cl_internet_included}" != "" ]]; then
|
||||
for INSTANCE in m4.2xlarge; do
|
||||
(
|
||||
set +x
|
||||
OUTPUT=$(timeout --signal=SIGQUIT 6h bin/kola run \
|
||||
--parallel=8 \
|
||||
--basename="${NAME}" \
|
||||
--board="${BOARD}" \
|
||||
--aws-ami="${AWS_AMI_ID}" \
|
||||
--aws-region="${AWS_REGION}" \
|
||||
--aws-type="${INSTANCE}" \
|
||||
--aws-iam-profile="${AWS_IAM_PROFILE}" \
|
||||
--platform=aws \
|
||||
--channel="${GROUP}" \
|
||||
--offering="${OFFER}" \
|
||||
--tapfile="${JOB_NAME##*/}_validate_${INSTANCE}.tap" \
|
||||
--torcx-manifest=torcx_manifest.json \
|
||||
cl.internet 2>&1 || true)
|
||||
echo "=== START $INSTANCE ==="
|
||||
echo "${OUTPUT}" | sed "s/^/${INSTANCE}: /g"
|
||||
echo "=== END $INSTANCE ==="
|
||||
) &
|
||||
done
|
||||
fi
|
||||
|
||||
# Do not expand the kola test patterns globs
|
||||
set -o noglob
|
||||
timeout --signal=SIGQUIT 6h bin/kola run \
|
||||
--parallel=8 \
|
||||
--basename="${NAME}" \
|
||||
--board="${BOARD}" \
|
||||
--aws-ami="${AWS_AMI_ID}" \
|
||||
--aws-region="${AWS_REGION}" \
|
||||
--aws-type="${instance_type}" \
|
||||
--aws-iam-profile="${AWS_IAM_PROFILE}" \
|
||||
--platform=aws \
|
||||
--channel="${GROUP}" \
|
||||
--offering="${OFFER}" \
|
||||
--tapfile="${JOB_NAME##*/}.tap" \
|
||||
--torcx-manifest=torcx_manifest.json \
|
||||
${KOLA_TESTS}
|
||||
set +o noglob
|
||||
|
||||
# wait for the cl.internet test results
|
||||
wait
|
@ -1,55 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
rm -rf *.tap _kola_temp*
|
||||
|
||||
NAME="jenkins-${JOB_NAME##*/}-${BUILD_NUMBER}"
|
||||
|
||||
if [[ "${BOARD}" == "arm64-usr" ]]; then
|
||||
if [[ "${AZURE_HYPER_V_GENERATION}" != "V2" ]]; then
|
||||
echo "Unsupported combination"
|
||||
exit 1
|
||||
fi
|
||||
AZURE_USE_GALLERY="--azure-use-gallery"
|
||||
fi
|
||||
|
||||
if [[ "${KOLA_TESTS}" == "" ]]; then
|
||||
KOLA_TESTS="*"
|
||||
fi
|
||||
|
||||
if [[ "${AZURE_MACHINE_SIZE}" != "" ]]; then
|
||||
AZURE_MACHINE_SIZE_OPT="--azure-size=${AZURE_MACHINE_SIZE}"
|
||||
fi
|
||||
|
||||
# If the OFFER is empty, it should be treated as the basic offering.
|
||||
if [[ "${OFFER}" == "" ]]; then
|
||||
OFFER="basic"
|
||||
fi
|
||||
|
||||
if [ "${BLOB_URL}" = "" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Do not expand the kola test patterns globs
|
||||
set -o noglob
|
||||
# Align timeout with ore azure gc --duration parameter
|
||||
timeout --signal=SIGQUIT 6h bin/kola run \
|
||||
--parallel="${PARALLEL}" \
|
||||
--basename="${NAME}" \
|
||||
--board="${BOARD}" \
|
||||
--channel="${GROUP}" \
|
||||
--platform=azure \
|
||||
--offering="${OFFER}" \
|
||||
--azure-blob-url="${BLOB_URL}" \
|
||||
--azure-location="${LOCATION}" \
|
||||
--azure-profile="${AZURE_CREDENTIALS}" \
|
||||
--azure-auth="${AZURE_AUTH_CREDENTIALS}" \
|
||||
--tapfile="${JOB_NAME##*/}.tap" \
|
||||
--torcx-manifest=torcx_manifest.json \
|
||||
${AZURE_USE_GALLERY} \
|
||||
${AZURE_MACHINE_SIZE_OPT} \
|
||||
${AZURE_HYPER_V_GENERATION:+--azure-hyper-v-generation=${AZURE_HYPER_V_GENERATION}} \
|
||||
${AZURE_VNET_SUBNET_NAME:+--azure-vnet-subnet-name=${AZURE_VNET_SUBNET_NAME}} \
|
||||
${AZURE_USE_PRIVATE_IPS:+--azure-use-private-ips=${AZURE_USE_PRIVATE_IPS}} \
|
||||
${KOLA_TESTS}
|
||||
set +o noglob
|
@ -1,47 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
sudo rm -f flatcar_developer_container.bin*
|
||||
trap 'sudo rm -f flatcar_developer_container.bin*' EXIT
|
||||
|
||||
[ -s verify.asc ] && verify_key=--verify-key=verify.asc || verify_key=
|
||||
|
||||
bin/gangue get \
|
||||
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--verify=true $verify_key \
|
||||
"${DOWNLOAD_ROOT}/boards/${BOARD}/${VERSION}/flatcar_production_image_kernel_config.txt"
|
||||
|
||||
bin/gangue get \
|
||||
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--verify=true $verify_key \
|
||||
"${DOWNLOAD_ROOT}/boards/${BOARD}/${VERSION}/flatcar_developer_container.bin.bz2"
|
||||
bunzip2 flatcar_developer_container.bin.bz2
|
||||
|
||||
if [[ "$(systemd-nspawn --version | grep 'systemd 241')" = "" ]]
|
||||
then
|
||||
PIPEARG="--pipe"
|
||||
else
|
||||
# TODO: Remove this case once Flatcar >=2592 is used on all nodes
|
||||
PIPEARG=""
|
||||
fi
|
||||
|
||||
sudo systemd-nspawn $PIPEARG \
|
||||
--setenv=PORTAGE_BINHOST="${PORTAGE_BINHOST}" \
|
||||
--bind-ro=/lib/modules \
|
||||
--bind-ro="$PWD/flatcar_production_image_kernel_config.txt:/boot/config" \
|
||||
--bind-ro="${GOOGLE_APPLICATION_CREDENTIALS}:/opt/credentials.json" \
|
||||
--bind-ro="$PWD/verify.asc:/opt/verify.asc" \
|
||||
--bind-ro="$PWD/bin/gangue:/opt/bin/gangue" \
|
||||
--image=flatcar_developer_container.bin \
|
||||
--machine=flatcar-developer-container-$(uuidgen) \
|
||||
--tmpfs=/usr/src \
|
||||
--tmpfs=/var/tmp \
|
||||
/bin/bash -eux << 'EOF'
|
||||
export PORTAGE_BINHOST="${PORTAGE_BINHOST}"
|
||||
export {FETCH,RESUME}COMMAND_GS="/opt/bin/gangue get --json-key=/opt/credentials.json --verify=true /opt/verify.asc \"\${URI}\" \"\${DISTDIR}/\${FILE}\""
|
||||
emerge-gitclone
|
||||
. /usr/share/coreos/release
|
||||
emerge -gv coreos-sources
|
||||
ln -fns /boot/config /usr/src/linux/.config
|
||||
exec make -C /usr/src/linux -j"$(nproc)" modules_prepare V=1
|
||||
EOF
|
@ -1,47 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# JOB_NAME will not fit within the character limit
|
||||
NAME="jenkins-${BUILD_NUMBER}"
|
||||
|
||||
set -o pipefail
|
||||
|
||||
if [[ "${DOWNLOAD_ROOT}" == gs://flatcar-jenkins-private/* ]]; then
|
||||
echo "Fetching google/cloud-sdk"
|
||||
docker pull google/cloud-sdk > /dev/null
|
||||
BUCKET_PATH="${DOWNLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}/flatcar_production_digitalocean_image.bin.bz2"
|
||||
IMAGE_URL="$(docker run --rm --net=host -v "${GOOGLE_APPLICATION_CREDENTIALS}:${GOOGLE_APPLICATION_CREDENTIALS}" google/cloud-sdk sh -c "python3 -m pip install pyopenssl > /dev/null; gsutil signurl -d 7d -r us ${GOOGLE_APPLICATION_CREDENTIALS} ${BUCKET_PATH} | grep -o 'https.*'")"
|
||||
else
|
||||
BASE_URL="https://bucket.release.flatcar-linux.net/$(echo $DOWNLOAD_ROOT | sed 's|gs://||g')/boards/${BOARD}/${FLATCAR_VERSION}"
|
||||
IMAGE_URL="${BASE_URL}/flatcar_production_digitalocean_image.bin.bz2"
|
||||
fi
|
||||
|
||||
bin/ore do create-image \
|
||||
--config-file="${DIGITALOCEAN_CREDS}" \
|
||||
--region="${DO_REGION}" \
|
||||
--name="${NAME}" \
|
||||
--url="${IMAGE_URL}"
|
||||
|
||||
trap 'bin/ore do delete-image \
|
||||
--name="${NAME}" \
|
||||
--config-file="${DIGITALOCEAN_CREDS}"' EXIT
|
||||
|
||||
if [[ "${KOLA_TESTS}" == "" ]]; then
|
||||
KOLA_TESTS="*"
|
||||
fi
|
||||
|
||||
# Do not expand the kola test patterns globs
|
||||
set -o noglob
|
||||
timeout --signal=SIGQUIT 4h bin/kola run \
|
||||
--do-size=${DO_MACHINE_SIZE} \
|
||||
--do-region=${DO_REGION} \
|
||||
--basename="${NAME}" \
|
||||
--do-config-file="${DIGITALOCEAN_CREDS}" \
|
||||
--do-image="${NAME}" \
|
||||
--parallel=8 \
|
||||
--platform=do \
|
||||
--channel="${GROUP}" \
|
||||
--tapfile="${JOB_NAME##*/}.tap" \
|
||||
--torcx-manifest=torcx_manifest.json \
|
||||
${KOLA_TESTS}
|
||||
set +o noglob
|
@ -1,52 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
rm -rf *.tap _kola_temp*
|
||||
|
||||
# If the OFFER is empty, it should be treated as the basic offering.
|
||||
if [[ "${OFFER}" == "" ]]; then
|
||||
OFFER="basic"
|
||||
fi
|
||||
|
||||
# Append the offer as oem suffix.
|
||||
if [[ "${OFFER}" != "basic" ]]; then
|
||||
OEM_SUFFIX="_${OFFER}"
|
||||
fi
|
||||
|
||||
# Create a name that includes the OFFER,
|
||||
# but replace _ with -, as gcloud doesn't like it otherwise.
|
||||
OEMNAME="${OFFER}-${BUILD_NUMBER}"
|
||||
NAME=${OEMNAME//_/-}
|
||||
|
||||
bin/ore gcloud create-image \
|
||||
--board="${BOARD}" \
|
||||
--family="${NAME}" \
|
||||
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--source-root="${DOWNLOAD_ROOT}/boards" \
|
||||
--source-name=flatcar_production_gce${OEM_SUFFIX}.tar.gz \
|
||||
--version="${FLATCAR_VERSION}"
|
||||
|
||||
GCE_NAME="${NAME//[+.]/-}-${FLATCAR_VERSION//[+.]/-}"
|
||||
|
||||
trap 'bin/ore gcloud delete-images \
|
||||
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
"${GCE_NAME}"' EXIT
|
||||
|
||||
if [[ "${KOLA_TESTS}" == "" ]]; then
|
||||
KOLA_TESTS="*"
|
||||
fi
|
||||
|
||||
# Do not expand the kola test patterns globs
|
||||
set -o noglob
|
||||
timeout --signal=SIGQUIT 6h bin/kola run \
|
||||
--basename="${NAME}" \
|
||||
--gce-image="${GCE_NAME}" \
|
||||
--gce-json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--gce-machinetype="${GCE_MACHINE_TYPE}" \
|
||||
--parallel=4 \
|
||||
--platform=gce \
|
||||
--channel="${GROUP}" \
|
||||
--tapfile="${JOB_NAME##*/}.tap" \
|
||||
--torcx-manifest=torcx_manifest.json \
|
||||
${KOLA_TESTS}
|
||||
set +o noglob
|
@ -1,95 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# JOB_NAME will not fit within the character limit
|
||||
NAME="jenkins-${BUILD_NUMBER}"
|
||||
|
||||
# same as the GC timeout
|
||||
timeout=6h
|
||||
|
||||
set -o pipefail
|
||||
|
||||
# Construct the URLs of the image to be used during tests.
|
||||
if [[ "${DOWNLOAD_ROOT}" == gs://flatcar-jenkins-private/* ]]; then
|
||||
echo "Fetching google/cloud-sdk"
|
||||
docker pull google/cloud-sdk > /dev/null
|
||||
BUCKET_PATH="${DOWNLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}"
|
||||
IMAGE_URL="$(docker run --rm --net=host -v "${GOOGLE_APPLICATION_CREDENTIALS}:${GOOGLE_APPLICATION_CREDENTIALS}" google/cloud-sdk sh -c "python3 -m pip install pyopenssl > /dev/null; gsutil signurl -d 7d -r us ${GOOGLE_APPLICATION_CREDENTIALS} ${BUCKET_PATH}/flatcar_production_packet_image.bin.bz2 | grep -o 'https.*'")"
|
||||
KERNEL_URL="$(docker run --rm --net=host -v "${GOOGLE_APPLICATION_CREDENTIALS}:${GOOGLE_APPLICATION_CREDENTIALS}" google/cloud-sdk sh -c "python3 -m pip install pyopenssl > /dev/null; gsutil signurl -d 7d -r us ${GOOGLE_APPLICATION_CREDENTIALS} ${BUCKET_PATH}/flatcar_production_pxe.vmlinuz | grep -o 'https.*'")"
|
||||
CPIO_URL="$(docker run --rm --net=host -v "${GOOGLE_APPLICATION_CREDENTIALS}:${GOOGLE_APPLICATION_CREDENTIALS}" google/cloud-sdk sh -c "python3 -m pip install pyopenssl > /dev/null; gsutil signurl -d 7d -r us ${GOOGLE_APPLICATION_CREDENTIALS} ${BUCKET_PATH}/flatcar_production_pxe_image.cpio.gz | grep -o 'https.*'")"
|
||||
else
|
||||
BASE_PATH="bucket.release.flatcar-linux.net/$(echo $DOWNLOAD_ROOT | sed 's|gs://||g')/boards/${BOARD}/${FLATCAR_VERSION}"
|
||||
IMAGE_URL="https://${BASE_PATH}/flatcar_production_packet_image.bin.bz2"
|
||||
KERNEL_URL="https://${BASE_PATH}/flatcar_production_pxe.vmlinuz"
|
||||
CPIO_URL="https://${BASE_PATH}/flatcar_production_pxe_image.cpio.gz"
|
||||
fi
|
||||
|
||||
if [[ "${KOLA_TESTS}" == "" ]]; then
|
||||
KOLA_TESTS="*"
|
||||
fi
|
||||
|
||||
# Equinix Metal ARM server are not yet hourly available in the default `sv15` region
|
||||
# so we override the `PACKET_REGION` to `DC`. ARM servers are available in metro
|
||||
# either DA (Dallas) or DC (Washington), but DC has more servers available.
|
||||
# See also https://metal.equinix.com/developers/docs/locations/capacity/.
|
||||
# We do not override `PACKET_REGION` for both board on top level because we need to keep proximity
|
||||
# for PXE booting.
|
||||
if [[ "${BOARD}" == "arm64-usr" ]]; then
|
||||
PACKET_REGION="DC"
|
||||
fi
|
||||
|
||||
# Run the cl.internet test on multiple machine types only if it should run in general
|
||||
cl_internet_included="$(set -o noglob; bin/kola list --platform=packet --filter ${KOLA_TESTS} | { grep cl.internet || true ; } )"
|
||||
if [[ "${BOARD}" == "amd64-usr" ]] && [[ "${cl_internet_included}" != "" ]]; then
|
||||
for INSTANCE in m3.small.x86 c3.medium.x86 m3.large.x86 s3.xlarge.x86 n2.xlarge.x86; do
|
||||
(
|
||||
set +x
|
||||
OUTPUT=$(timeout --signal=SIGQUIT "${timeout}" bin/kola run \
|
||||
--basename="${NAME}" \
|
||||
--board="${BOARD}" \
|
||||
--channel="${GROUP}" \
|
||||
--gce-json-key="${UPLOAD_CREDS}" \
|
||||
--packet-api-key="${PACKET_API_KEY}" \
|
||||
--packet-image-url="${IMAGE_URL}" \
|
||||
--packet-installer-image-kernel-url="${KERNEL_URL}" \
|
||||
--packet-installer-image-cpio-url="${CPIO_URL}" \
|
||||
--packet-project="${PACKET_PROJECT}" \
|
||||
--packet-storage-url="${UPLOAD_ROOT}/mantle/packet" \
|
||||
--packet-plan="${INSTANCE}" \
|
||||
--equinixmetal-metro="${PACKET_REGION}" \
|
||||
--parallel="${PARALLEL_TESTS}" \
|
||||
--platform=packet \
|
||||
--tapfile="${JOB_NAME##*/}_validate_${INSTANCE}.tap" \
|
||||
--torcx-manifest=torcx_manifest.json \
|
||||
cl.internet 2>&1 || true)
|
||||
echo "=== START $INSTANCE ==="
|
||||
echo "${OUTPUT}" | sed "s/^/${INSTANCE}: /g"
|
||||
echo "=== END $INSTANCE ==="
|
||||
) &
|
||||
done
|
||||
fi
|
||||
|
||||
# Do not expand the kola test patterns globs
|
||||
set -o noglob
|
||||
timeout --signal=SIGQUIT "${timeout}" bin/kola run \
|
||||
--basename="${NAME}" \
|
||||
--board="${BOARD}" \
|
||||
--channel="${GROUP}" \
|
||||
--gce-json-key="${UPLOAD_CREDS}" \
|
||||
--packet-api-key="${PACKET_API_KEY}" \
|
||||
--packet-image-url="${IMAGE_URL}" \
|
||||
--packet-installer-image-kernel-url="${KERNEL_URL}" \
|
||||
--packet-installer-image-cpio-url="${CPIO_URL}" \
|
||||
--packet-project="${PACKET_PROJECT}" \
|
||||
--packet-storage-url="${UPLOAD_ROOT}/mantle/packet" \
|
||||
--packet-plan="${PACKET_MACHINE_TYPE}" \
|
||||
--equinixmetal-metro="${PACKET_REGION}" \
|
||||
--parallel="${PARALLEL_TESTS}" \
|
||||
--platform=packet \
|
||||
--tapfile="${JOB_NAME##*/}.tap" \
|
||||
--torcx-manifest=torcx_manifest.json \
|
||||
${KOLA_TESTS}
|
||||
set +o noglob
|
||||
|
||||
# wait for the cl.internet test results
|
||||
wait
|
@ -1,5 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
SCRIPTFOLDER="$(dirname "$(readlink -f "$0")")"
|
||||
"${SCRIPTFOLDER}/qemu_common.sh" qemu
|
@ -1,151 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
PLATFORM="$1"
|
||||
if [ "${PLATFORM}" = qemu ]; then
|
||||
TIMEOUT="12h"
|
||||
BIOS="bios-256k.bin"
|
||||
elif [ "${PLATFORM}" = qemu_uefi ]; then
|
||||
TIMEOUT="14h"
|
||||
BIOS="/mnt/host/source/tmp/flatcar_production_qemu_uefi_efi_code.qcow2"
|
||||
else
|
||||
echo "Unknown platform: \"${PLATFORM}\""
|
||||
fi
|
||||
|
||||
native_arm64() {
|
||||
[[ "${NATIVE_ARM64}" == true ]]
|
||||
}
|
||||
|
||||
sudo rm -rf *.tap src/scripts/_kola_temp tmp _kola_temp* _tmp
|
||||
|
||||
if native_arm64 ; then
|
||||
# for kola reflinking
|
||||
sudo rm -rf /var/tmp
|
||||
mkdir -p _tmp
|
||||
chmod 1777 _tmp
|
||||
ln -s "$PWD/_tmp" /var/tmp
|
||||
# use arm64 mantle bins
|
||||
rm -rf bin
|
||||
mv bin.arm64 bin
|
||||
# simulate SDK folder structure
|
||||
mkdir -p src
|
||||
ln -s .. src/scripts
|
||||
sudo rm -f chroot
|
||||
ln -s / chroot
|
||||
|
||||
enter() {
|
||||
"$@"
|
||||
}
|
||||
else
|
||||
enter() {
|
||||
bin/cork enter --bind-gpg-agent=false -- "$@"
|
||||
}
|
||||
fi
|
||||
|
||||
script() {
|
||||
enter "/mnt/host/source/src/scripts/$@"
|
||||
}
|
||||
|
||||
# Set up GPG for verifying tags.
|
||||
export GNUPGHOME="${PWD}/.gnupg"
|
||||
rm -rf "${GNUPGHOME}"
|
||||
trap 'rm -rf "${GNUPGHOME}"' EXIT
|
||||
mkdir --mode=0700 "${GNUPGHOME}"
|
||||
gpg --import verify.asc
|
||||
# Sometimes this directory is not created automatically making further private
|
||||
# key imports fail, let's create it here as a workaround
|
||||
mkdir -p --mode=0700 "${GNUPGHOME}/private-keys-v1.d/"
|
||||
|
||||
# since /flatcar-jenkins/developer/sdk starts with a / we only use one
|
||||
DOWNLOAD_ROOT_SDK="gs:/${SDK_URL_PATH}"
|
||||
|
||||
if native_arm64 ; then
|
||||
mkdir -p .repo/
|
||||
if [ ! -e .repo/manifests ]; then
|
||||
mkdir -p ~/.ssh
|
||||
ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts
|
||||
git clone "${MANIFEST_URL}" .repo/manifests
|
||||
fi
|
||||
git -C .repo/manifests tag -v "${MANIFEST_TAG}"
|
||||
git -C .repo/manifests checkout "${MANIFEST_TAG}"
|
||||
else
|
||||
bin/cork create \
|
||||
--verify --verify-signature --replace \
|
||||
--sdk-url-path "${SDK_URL_PATH}" \
|
||||
--json-key "${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--manifest-branch "refs/tags/${MANIFEST_TAG}" \
|
||||
--manifest-name "${MANIFEST_NAME}" \
|
||||
--sdk-url storage.googleapis.com \
|
||||
--manifest-url "${MANIFEST_URL}"
|
||||
fi
|
||||
|
||||
source .repo/manifests/version.txt
|
||||
|
||||
[ -s verify.asc ] && verify_key=--verify-key=verify.asc || verify_key=
|
||||
|
||||
if ! native_arm64; then
|
||||
script update_chroot \
|
||||
--toolchain_boards="${BOARD}" --dev_builds_sdk="${DOWNLOAD_ROOT_SDK}"
|
||||
fi
|
||||
|
||||
mkdir -p tmp
|
||||
bin/cork download-image \
|
||||
--cache-dir=tmp \
|
||||
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--platform="${PLATFORM}" \
|
||||
--root="${DOWNLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}" \
|
||||
--verify=true $verify_key
|
||||
enter lbunzip2 -k -f /mnt/host/source/tmp/flatcar_production_image.bin.bz2
|
||||
|
||||
# create folder to handle case where arm64 is missing
|
||||
sudo mkdir -p chroot/usr/lib/kola/{arm64,amd64}
|
||||
# copy all of the latest mantle binaries into the chroot
|
||||
sudo cp -t chroot/usr/lib/kola/arm64 bin/arm64/*
|
||||
sudo cp -t chroot/usr/lib/kola/amd64 bin/amd64/*
|
||||
sudo cp -t chroot/usr/bin bin/[b-z]*
|
||||
|
||||
if [[ "${KOLA_TESTS}" == "" ]]; then
|
||||
KOLA_TESTS="*"
|
||||
fi
|
||||
|
||||
rm -f flatcar_test_update.gz
|
||||
bin/gangue get \
|
||||
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--verify=true $verify_key \
|
||||
"${DOWNLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}/flatcar_test_update.gz"
|
||||
mv flatcar_test_update.gz tmp/
|
||||
|
||||
if [ "${KOLA_TESTS}" = "*" ] || [ "$(echo "${KOLA_TESTS}" | grep 'cl.update.payload')" != "" ]; then
|
||||
# First test to update from the previous release, this is done before running the real kola suite so that the qemu-latest symlink still points to the full run
|
||||
rm -f flatcar_production_image.bin.bz2
|
||||
curl -fsSLO --retry-delay 1 --retry 60 --retry-connrefused --retry-max-time 60 --connect-timeout 20 "https://${GROUP}.release.flatcar-linux.net/${BOARD}/current/flatcar_production_image.bin.bz2"
|
||||
mv flatcar_production_image.bin.bz2 tmp/flatcar_production_image_previous.bin.bz2
|
||||
enter lbunzip2 -k -f /mnt/host/source/tmp/flatcar_production_image_previous.bin.bz2
|
||||
enter sudo timeout --signal=SIGQUIT "${TIMEOUT}" kola run \
|
||||
--board="${BOARD}" \
|
||||
--channel="${GROUP}" \
|
||||
--parallel="${PARALLEL}" \
|
||||
--platform=qemu \
|
||||
--qemu-bios="${BIOS}" \
|
||||
--qemu-image=/mnt/host/source/tmp/flatcar_production_image_previous.bin \
|
||||
--tapfile="/mnt/host/source/${JOB_NAME##*/}_update_from_previous_release.tap" \
|
||||
--torcx-manifest=/mnt/host/source/torcx_manifest.json \
|
||||
--update-payload=/mnt/host/source/tmp/flatcar_test_update.gz \
|
||||
cl.update.payload || true
|
||||
fi
|
||||
|
||||
# Do not expand the kola test patterns globs
|
||||
set -o noglob
|
||||
enter sudo timeout --signal=SIGQUIT "${TIMEOUT}" kola run \
|
||||
--board="${BOARD}" \
|
||||
--channel="${GROUP}" \
|
||||
--parallel="${PARALLEL}" \
|
||||
--platform=qemu \
|
||||
--qemu-bios="${BIOS}" \
|
||||
--qemu-image=/mnt/host/source/tmp/flatcar_production_image.bin \
|
||||
--tapfile="/mnt/host/source/${JOB_NAME##*/}.tap" \
|
||||
--torcx-manifest=/mnt/host/source/torcx_manifest.json \
|
||||
${KOLA_TESTS}
|
||||
set +o noglob
|
||||
|
||||
sudo rm -rf tmp
|
@ -1,9 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
SCRIPTFOLDER="$(dirname "$(readlink -f "$0")")"
|
||||
if [[ "$NATIVE_ARM64" == true ]]; then
|
||||
"${SCRIPTFOLDER}/qemu_uefi_arm64.sh" qemu_uefi
|
||||
else
|
||||
"${SCRIPTFOLDER}/qemu_common.sh" qemu_uefi
|
||||
fi
|
@ -1,40 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
SCRIPTFOLDER="$(dirname "$(readlink -f "$0")")"
|
||||
# strip $PWD prefix so that we can access the path relative to the container working directory
|
||||
SCRIPTFOLDER=${SCRIPTFOLDER#$PWD/}
|
||||
|
||||
DOCKER_IMG=ghcr.io/kinvolk/kola-test-runner:latest
|
||||
|
||||
envarg=()
|
||||
envflags=(
|
||||
SSH_AUTH_SOCK
|
||||
BOARD
|
||||
MANIFEST_URL
|
||||
SDK_URL_PATH
|
||||
CHANNEL_BASE
|
||||
GROUP
|
||||
KOLA_TESTS
|
||||
MANIFEST_TAG
|
||||
DOWNLOAD_ROOT
|
||||
PARALLEL
|
||||
GOOGLE_APPLICATION_CREDENTIALS
|
||||
NATIVE_ARM64
|
||||
)
|
||||
for envvar in ${envflags[@]}; do
|
||||
envarg+=( -e "${envvar}=${!envvar}" )
|
||||
done
|
||||
|
||||
docker pull ${DOCKER_IMG}
|
||||
exec docker run --privileged \
|
||||
--rm \
|
||||
-v /dev:/dev \
|
||||
-w /mnt/host/source \
|
||||
-v ${PWD}:/mnt/host/source \
|
||||
-v ${GOOGLE_APPLICATION_CREDENTIALS}:${GOOGLE_APPLICATION_CREDENTIALS} \
|
||||
${SSH_AUTH_SOCK:+-v ${SSH_AUTH_SOCK}:${SSH_AUTH_SOCK}} \
|
||||
"${envarg[@]}" \
|
||||
${DOCKER_IMG} \
|
||||
"${SCRIPTFOLDER}/qemu_common.sh" qemu_uefi
|
@ -1,40 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# JOB_NAME will not fit within the character limit
|
||||
NAME="jenkins-${BUILD_NUMBER}"
|
||||
|
||||
[ -s verify.asc ] && verify_key=--verify-key=verify.asc || verify_key=
|
||||
|
||||
mkdir -p tmp
|
||||
bin/cork download-image \
|
||||
--cache-dir=tmp \
|
||||
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--platform=esx \
|
||||
--root="${DOWNLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}" \
|
||||
--verify=true $verify_key
|
||||
|
||||
trap 'bin/ore esx --esx-config-file "${VMWARE_ESX_CREDS}" remove-vms \
|
||||
--pattern "${NAME}*" || true' EXIT
|
||||
|
||||
if [[ "${KOLA_TESTS}" == "" ]]; then
|
||||
KOLA_TESTS="*"
|
||||
fi
|
||||
|
||||
# Delete every VM that is running because we'll use all available spots
|
||||
bin/ore esx --esx-config-file "${VMWARE_ESX_CREDS}" remove-vms || true
|
||||
|
||||
# Do not expand the kola test patterns globs
|
||||
set -o noglob
|
||||
timeout --signal=SIGQUIT 2h bin/kola run \
|
||||
--basename="${NAME}" \
|
||||
--esx-config-file "${VMWARE_ESX_CREDS}" \
|
||||
--esx-ova-path tmp/flatcar_production_vmware_ova.ova \
|
||||
--parallel=4 \
|
||||
--platform=esx \
|
||||
--channel="${GROUP}" \
|
||||
--tapfile="${JOB_NAME##*/}.tap" \
|
||||
--torcx-manifest=torcx_manifest.json \
|
||||
${KOLA_TESTS}
|
||||
set +o noglob
|
||||
sudo rm -rf tmp
|
@ -1,149 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
BASE=$(dirname $(readlink -f "$0"))
|
||||
git -C manifest config user.name "${GIT_AUTHOR_NAME}"
|
||||
git -C manifest config user.email "${GIT_AUTHOR_EMAIL}"
|
||||
|
||||
COREOS_OFFICIAL=0
|
||||
|
||||
finish() {
|
||||
local tag="$1"
|
||||
git -C manifest tag -v "${tag}"
|
||||
git -C manifest push "${BUILDS_PUSH_URL}" "refs/tags/${tag}:refs/tags/${tag}"
|
||||
tee manifest.properties << EOF
|
||||
MANIFEST_URL = ${BUILDS_CLONE_URL}
|
||||
MANIFEST_REF = refs/tags/${tag}
|
||||
MANIFEST_NAME = release.xml
|
||||
COREOS_OFFICIAL = ${COREOS_OFFICIAL:-0}
|
||||
EOF
|
||||
}
|
||||
|
||||
# Set up GPG for verifying tags.
|
||||
export GNUPGHOME="${PWD}/.gnupg"
|
||||
rm -rf "${GNUPGHOME}"
|
||||
trap 'rm -rf "${GNUPGHOME}"' EXIT
|
||||
mkdir --mode=0700 "${GNUPGHOME}"
|
||||
gpg --import verify.asc
|
||||
# Sometimes this directory is not created automatically making further private
|
||||
# key imports fail, let's create it here as a workaround
|
||||
mkdir -p --mode=0700 "${GNUPGHOME}/private-keys-v1.d/"
|
||||
|
||||
# Branches are of the form remote-name/branch-name. Tags are just tag-name.
|
||||
# If we have a release tag use it, for branches we need to make a tag.
|
||||
if [[ "${GIT_BRANCH}" != */* ]]
|
||||
then
|
||||
COREOS_OFFICIAL=1
|
||||
finish "${GIT_BRANCH}"
|
||||
exit
|
||||
fi
|
||||
|
||||
MANIFEST_BRANCH="${GIT_BRANCH##*/}"
|
||||
MANIFEST_ID="${MANIFEST_BRANCH}"
|
||||
# Nightly builds use the "default" manifest from flatcar-master and have the same scripts/overlay/portage branches without a "user/" prefix.
|
||||
# No further exclusions are made because nothing bad happens if other branches were used.
|
||||
if [[ "${MANIFEST_NAME}" = default ]] && [[ "${MANIFEST_BRANCH}" = flatcar-master ]] && \
|
||||
[[ "${SCRIPTS_REF}" = "${OVERLAY_REF}" ]] && [[ "${OVERLAY_REF}" = "${PORTAGE_REF}" ]] && \
|
||||
[[ "${SCRIPTS_REF}" != */* ]] && [[ "${SCRIPTS_REF}" != "" ]]
|
||||
then
|
||||
# Use SCRIPTS_REF but others also work since they have the same value
|
||||
MANIFEST_ID="${SCRIPTS_REF}-nightly"
|
||||
fi
|
||||
|
||||
MANIFEST_NAME="${MANIFEST_NAME}.xml"
|
||||
[[ -f "manifest/${MANIFEST_NAME}" ]]
|
||||
|
||||
source manifest/version.txt
|
||||
|
||||
if [[ "${SDK_VERSION}" == sdk-*-nightly ]]
|
||||
then
|
||||
# Get the SDK version from GCS - we use gsutil to get access to the bucket since it's private.
|
||||
SDK_VERSION=$(docker run --rm -v "${GOOGLE_APPLICATION_CREDENTIALS}:/opt/release.json:ro" google/cloud-sdk:alpine bash -c "gcloud auth activate-service-account --key-file /opt/release.json && gsutil cat gs://flatcar-jenkins/developer/sdk/amd64/${SDK_VERSION}.txt" | tee /dev/stderr)
|
||||
if [[ -z "${SDK_VERSION}" ]]
|
||||
then
|
||||
echo "No SDK found, retrigger the manifest job with default SDK_VERSION and SDK_URL_PATH values."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
export FLATCAR_BUILD_ID="${BUILD_ID_PREFIX}${MANIFEST_ID}-${BUILD_NUMBER}"
|
||||
# Nightlies and dev builds have the current date as Flatcar version
|
||||
if [[ "${MANIFEST_BRANCH}" = flatcar-master ]]
|
||||
then
|
||||
FLATCAR_VERSION_ID="$(date '+%Y.%m.%d')"
|
||||
fi
|
||||
|
||||
if [[ "${SDK_VERSION}" = sdk-new ]]
|
||||
then
|
||||
# Use the version of the current developer build for DOWNSTREAM=all(-full), requires a seed SDK to be set
|
||||
# (releases use git tags where all this code here is not executed because the manifest
|
||||
# and version.txt should not be modified, the Alpha release version.txt has to refer to
|
||||
# the release to be build for its SDK version)
|
||||
SDK_VERSION="${FLATCAR_VERSION_ID}+${FLATCAR_BUILD_ID}"
|
||||
fi
|
||||
|
||||
if [[ -n "${SDK_VERSION}" ]]
|
||||
then
|
||||
export FLATCAR_SDK_VERSION="${SDK_VERSION}"
|
||||
fi
|
||||
|
||||
# Ensure that each XML tag occupies exactly one line each by first removing all line breaks and then adding
|
||||
# a line break after each tag.
|
||||
# This way set_manifest_ref can find the right tag by matching for "/$reponame".
|
||||
cat manifest/"${MANIFEST_NAME}" | tr '\n' ' ' | sed 's#/>#/>\n#g' > "manifest/${FLATCAR_BUILD_ID}.xml"
|
||||
|
||||
set_manifest_ref() {
|
||||
local reponame="$1"
|
||||
local reference="$2"
|
||||
# Select lines with "/$reponame" (kept as first group) and "revision" (kept as second group) and replace the value
|
||||
# of "revision" (third group, not kept) with the new reference.
|
||||
sed -i -E "s#(/$reponame.*)(revision=\")([^\"]*)#\1\2$reference#g" "manifest/${FLATCAR_BUILD_ID}.xml"
|
||||
}
|
||||
|
||||
setup_manifest_ref() {
|
||||
local reponame="${1}"
|
||||
local ref="${2}"
|
||||
local full_ref="refs/heads/${ref}"
|
||||
|
||||
if [[ -z "${ref//[0-9]}" ]]; then
|
||||
full_ref="refs/pull/${ref}/head"
|
||||
fi
|
||||
set_manifest_ref "${reponame}" "${full_ref}"
|
||||
"${BASE}/post-github-status.sh" --repo "flatcar-linux/${reponame}" --ref "${full_ref}" --status pending
|
||||
}
|
||||
|
||||
if [[ -n "${SCRIPTS_REF}" ]]
|
||||
then
|
||||
setup_manifest_ref scripts "${SCRIPTS_REF}"
|
||||
fi
|
||||
if [[ -n "${OVERLAY_REF}" ]]
|
||||
then
|
||||
setup_manifest_ref coreos-overlay "${OVERLAY_REF}"
|
||||
fi
|
||||
if [[ -n "${PORTAGE_REF}" ]]
|
||||
then
|
||||
setup_manifest_ref portage-stable "${PORTAGE_REF}"
|
||||
fi
|
||||
|
||||
ln -fns "${FLATCAR_BUILD_ID}.xml" manifest/default.xml
|
||||
ln -fns "${FLATCAR_BUILD_ID}.xml" manifest/release.xml
|
||||
|
||||
tee manifest/version.txt << EOF
|
||||
FLATCAR_VERSION=${FLATCAR_VERSION_ID}+${FLATCAR_BUILD_ID}
|
||||
FLATCAR_VERSION_ID=${FLATCAR_VERSION_ID}
|
||||
FLATCAR_BUILD_ID=${FLATCAR_BUILD_ID}
|
||||
FLATCAR_SDK_VERSION=${FLATCAR_SDK_VERSION}
|
||||
EOF
|
||||
# Note: You have to keep FLATCAR_VERSION in sync with the value used in the "sdk-new" case.
|
||||
|
||||
# Set up GPG for signing tags.
|
||||
gpg --import "${GPG_SECRET_KEY_FILE}"
|
||||
|
||||
# Tag a development build manifest.
|
||||
git -C manifest add "${FLATCAR_BUILD_ID}.xml" default.xml release.xml version.txt
|
||||
git -C manifest commit \
|
||||
-m "${FLATCAR_BUILD_ID}: add build manifest" \
|
||||
-m "Based on ${GIT_URL} branch ${MANIFEST_BRANCH}" \
|
||||
-m "${BUILD_URL}"
|
||||
git -C manifest tag -u "${SIGNING_USER}" -m "${FLATCAR_BUILD_ID}" "${FLATCAR_BUILD_ID}"
|
||||
|
||||
finish "${FLATCAR_BUILD_ID}"
|
@ -1,120 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# The build may not be started without a tag value.
|
||||
[ -n "${MANIFEST_TAG}" ]
|
||||
|
||||
# For developer builds that are based on a non-developer release,
|
||||
# we need the DOWNLOAD_ROOT variable to be the base path, keeping the
|
||||
# UPLOAD_ROOT variable as the developer path.
|
||||
if [[ "${RELEASE_BASE_IS_DEV}" = "false" && "${GROUP}" = "developer" && "${RELEASE_BASE}" != "" ]]; then
|
||||
DOWNLOAD_ROOT=$(echo ${DOWNLOAD_ROOT} | sed 's,/developer,,');
|
||||
fi
|
||||
# since /flatcar-jenkins/developer/sdk starts with a / we only use one
|
||||
DOWNLOAD_ROOT_SDK="gs:/${SDK_URL_PATH}"
|
||||
|
||||
# Set up GPG for verifying tags.
|
||||
export GNUPGHOME="${PWD}/.gnupg"
|
||||
rm -rf "${GNUPGHOME}"
|
||||
trap 'rm -rf "${GNUPGHOME}"' EXIT
|
||||
mkdir --mode=0700 "${GNUPGHOME}"
|
||||
gpg --import verify.asc
|
||||
# Sometimes this directory is not created automatically making further private
|
||||
# key imports fail, let's create it here as a workaround
|
||||
mkdir -p --mode=0700 "${GNUPGHOME}/private-keys-v1.d/"
|
||||
|
||||
SCRIPTS_PATCH_ARG=""
|
||||
OVERLAY_PATCH_ARG=""
|
||||
PORTAGE_PATCH_ARG=""
|
||||
if [ "$(cat scripts.patch | wc -l)" != 0 ]; then
|
||||
SCRIPTS_PATCH_ARG="--scripts-patch scripts.patch"
|
||||
fi
|
||||
if [ "$(cat overlay.patch | wc -l)" != 0 ]; then
|
||||
OVERLAY_PATCH_ARG="--overlay-patch overlay.patch"
|
||||
fi
|
||||
if [ "$(cat portage.patch | wc -l)" != 0 ]; then
|
||||
PORTAGE_PATCH_ARG="--portage-patch portage.patch"
|
||||
fi
|
||||
|
||||
bin/cork create \
|
||||
--verify --verify-signature --replace \
|
||||
--sdk-url-path "${SDK_URL_PATH}" \
|
||||
--json-key "${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
${SCRIPTS_PATCH_ARG} ${OVERLAY_PATCH_ARG} ${PORTAGE_PATCH_ARG} \
|
||||
--manifest-branch "refs/tags/${MANIFEST_TAG}" \
|
||||
--manifest-name "${MANIFEST_NAME}" \
|
||||
--manifest-url "${MANIFEST_URL}" \
|
||||
--sdk-url=storage.googleapis.com
|
||||
|
||||
enter() {
|
||||
local verify_key=
|
||||
# Run in a subshell to clean some gangue files on exit without
|
||||
# possibly clobbering the global EXIT trap.
|
||||
(
|
||||
trap 'sudo rm -f chroot/etc/portage/gangue.*' EXIT
|
||||
[ -s verify.asc ] &&
|
||||
sudo ln -f verify.asc chroot/etc/portage/gangue.asc &&
|
||||
verify_key=--verify-key=/etc/portage/gangue.asc
|
||||
sudo ln -f "${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
chroot/etc/portage/gangue.json
|
||||
bin/cork enter --bind-gpg-agent=false -- env \
|
||||
FLATCAR_DEV_BUILDS="${DOWNLOAD_ROOT}" \
|
||||
FLATCAR_DEV_BUILDS_SDK="${DOWNLOAD_ROOT_SDK}" \
|
||||
{FETCH,RESUME}COMMAND_GS="/mnt/host/source/bin/gangue get \
|
||||
--json-key=/etc/portage/gangue.json $verify_key \
|
||||
"'"${URI}" "${DISTDIR}/${FILE}"' \
|
||||
"$@"
|
||||
)
|
||||
}
|
||||
|
||||
script() {
|
||||
enter "/mnt/host/source/src/scripts/$@"
|
||||
}
|
||||
|
||||
source .repo/manifests/version.txt
|
||||
export FLATCAR_BUILD_ID
|
||||
|
||||
# Set up GPG for signing uploads.
|
||||
gpg --import "${GPG_SECRET_KEY_FILE}"
|
||||
|
||||
script update_chroot \
|
||||
--toolchain_boards="${BOARD}" --dev_builds_sdk="${DOWNLOAD_ROOT_SDK}"
|
||||
|
||||
script setup_board \
|
||||
--board="${BOARD}" \
|
||||
--getbinpkgver=${RELEASE_BASE:-"${FLATCAR_VERSION}" --toolchainpkgonly} \
|
||||
--skip_chroot_upgrade \
|
||||
--force
|
||||
|
||||
script build_packages \
|
||||
--board="${BOARD}" \
|
||||
--getbinpkgver=${RELEASE_BASE:-"${FLATCAR_VERSION}" --toolchainpkgonly} \
|
||||
--usepkg_exclude="${BINARY_PACKAGES_TO_EXCLUDE}" \
|
||||
--skip_chroot_upgrade \
|
||||
--skip_torcx_store \
|
||||
--sign="${SIGNING_USER}" \
|
||||
--sign_digests="${SIGNING_USER}" \
|
||||
--upload_root="${UPLOAD_ROOT}" \
|
||||
--upload
|
||||
|
||||
script build_torcx_store \
|
||||
--board="${BOARD}" \
|
||||
--sign="${SIGNING_USER}" \
|
||||
--sign_digests="${SIGNING_USER}" \
|
||||
--upload_root="${UPLOAD_ROOT}" \
|
||||
--torcx_upload_root="${TORCX_PKG_DOWNLOAD_ROOT}" \
|
||||
--tectonic_torcx_download_root="${TECTONIC_TORCX_DOWNLOAD_ROOT}" \
|
||||
--upload
|
||||
|
||||
if [[ "${GROUP}" = "developer" ]]
|
||||
then
|
||||
GROUP="${CHANNEL_BASE}"
|
||||
fi
|
||||
|
||||
# Update entry for latest nightly build reference (there are no symlinks in GCS and it is also good to keep it deterministic)
|
||||
if [[ "${FLATCAR_BUILD_ID}" == *-*-nightly-* ]]
|
||||
then
|
||||
# Extract the nightly name like "flatcar-MAJOR-nightly" from "dev-flatcar-MAJOR-nightly-NUMBER"
|
||||
NAME=$(echo "${FLATCAR_BUILD_ID}" | grep -o "dev-.*-nightly" | cut -d - -f 2-)
|
||||
echo "${FLATCAR_VERSION}" | bin/cork enter --bind-gpg-agent=false -- gsutil cp - "${UPLOAD_ROOT}/boards/${BOARD}/${NAME}.txt"
|
||||
fi
|
@ -1,53 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
SHFLAGS=$(dirname $(readlink -f "$0"))/../lib/shflags/shflags
|
||||
. "${SHFLAGS}" || exit 1
|
||||
|
||||
DEFINE_string repo "" "Name of the repository to which to post status"
|
||||
DEFINE_string ref "" "Reference from which to figure out commit"
|
||||
DEFINE_string github_token "${GITHUB_TOKEN}" "Github Personal Access Token used to submit the commit status"
|
||||
DEFINE_string status "pending" "Status to submit for commit. [failure,pending,success,error]"
|
||||
DEFINE_string context "ci/jenkins" "Context to use for commit status."
|
||||
DEFINE_boolean verbose "${FLAGS_FALSE}" "Show curl output"
|
||||
|
||||
# Parse command line
|
||||
FLAGS "$@" || exit 1
|
||||
eval set -- "${FLAGS_ARGV}"
|
||||
|
||||
if [ -z "${FLAGS_repo}" ]; then
|
||||
echo >&2 "Error: --repo is required"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${FLAGS_ref}" ]; then
|
||||
echo >&2 "Error: --ref is required"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${FLAGS_github_token}" ]; then
|
||||
echo >&2 "Error: --github_token is required"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CURLOPTS="-sS"
|
||||
if [[ "${FLAGS_verbose}" -eq "${FLAGS_true}" ]]; then
|
||||
CURLOPTS=""
|
||||
fi
|
||||
|
||||
GITHUB_API="https://api.github.com"
|
||||
# BUILD_URL = JENKINS_URL + JOB_NAME + BUILD_NUMBER
|
||||
target_url="${BUILD_URL}cldsv"
|
||||
commit=$(git ls-remote "https://github.com/${FLAGS_repo}" "${FLAGS_ref}"| cut -f1)
|
||||
if [ -z "${commit}" ]; then
|
||||
echo >&2 "Can't figure out commit for repo ${FLAGS_repo} ref ${FLAGS_ref}"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
curl ${CURLOPTS} "${GITHUB_API}/repos/${FLAGS_repo}/statuses/${commit}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: token ${FLAGS_github_token}" \
|
||||
-X POST -d @- <<EOF
|
||||
{
|
||||
"state":"${FLAGS_status}",
|
||||
"context": "${FLAGS_context}",
|
||||
"target_url":"${target_url}"
|
||||
}
|
||||
EOF
|
@ -1,23 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
rm -f ami.properties images.json
|
||||
|
||||
[ -s verify.asc ] && verify_key=--verify-key=verify.asc || verify_key=
|
||||
|
||||
bin/plume pre-release --force \
|
||||
--debug \
|
||||
--platform=aws \
|
||||
--aws-credentials="${AWS_CREDENTIALS}" \
|
||||
--gce-json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--board="${BOARD}" \
|
||||
--channel="${CHANNEL}" \
|
||||
--version="${FLATCAR_VERSION}" \
|
||||
--write-image-list=images.json \
|
||||
$verify_key
|
||||
|
||||
hvm_ami_id=$(jq -r '.aws.amis[]|select(.name == "'"${AWS_REGION}"'").hvm' images.json)
|
||||
|
||||
tee ami.properties << EOF
|
||||
HVM_AMI_ID = ${hvm_ami_id:?}
|
||||
EOF
|
@ -1,33 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
AZURE_CATEGORY_OPT=""
|
||||
if [[ "${IS_NON_SPONSORED}" == true ]]
|
||||
then
|
||||
AZURE_CATEGORY_OPT="--azure-category=pro"
|
||||
fi
|
||||
|
||||
rm -f images.json
|
||||
|
||||
[ -s verify.asc ] && verify_key=--verify-key=verify.asc || verify_key=
|
||||
|
||||
bin/plume pre-release --force \
|
||||
--debug \
|
||||
--platform=azure \
|
||||
--azure-profile="${AZURE_CREDENTIALS}" \
|
||||
--azure-auth="${AZURE_AUTH_CREDENTIALS}" \
|
||||
--gce-json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--board="${BOARD}" \
|
||||
--channel="${CHANNEL}" \
|
||||
--version="${FLATCAR_VERSION}" \
|
||||
--write-image-list=images.json \
|
||||
${AZURE_CATEGORY_OPT} \
|
||||
$verify_key
|
||||
|
||||
sas_url=$(jq -r '.azure.image' images.json)
|
||||
if [ "${sas_url}" = "null" ]; then
|
||||
sas_url=""
|
||||
fi
|
||||
tee test.properties << EOF
|
||||
SAS_URL ^ ${sas_url:?}
|
||||
EOF
|
@ -1,21 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
case "${CHANNEL}" in
|
||||
*)
|
||||
boards=( amd64-usr arm64-usr )
|
||||
;;
|
||||
esac
|
||||
|
||||
for board in "${boards[@]}"
|
||||
do
|
||||
bin/plume release \
|
||||
--debug \
|
||||
--aws-credentials="${AWS_CREDENTIALS}" \
|
||||
--azure-profile="${AZURE_CREDENTIALS}" \
|
||||
--azure-auth="${AZURE_AUTH_CREDENTIALS}" \
|
||||
--gce-json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--gce-release-key="${GOOGLE_RELEASE_CREDENTIALS}" \
|
||||
--board="${board}" \
|
||||
--channel="${CHANNEL}" \
|
||||
--version="${VERSION}"
|
||||
done
|
@ -1,94 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# The build may not be started without a tag value.
|
||||
[ -n "${MANIFEST_TAG}" ]
|
||||
|
||||
# Catalyst leaves things chowned as root.
|
||||
[ -d .cache/sdks ] && sudo chown -R "$USER" .cache/sdks
|
||||
|
||||
# Set up GPG for verifying tags.
|
||||
export GNUPGHOME="${PWD}/.gnupg"
|
||||
rm -rf "${GNUPGHOME}"
|
||||
trap 'rm -rf "${GNUPGHOME}"' EXIT
|
||||
mkdir --mode=0700 "${GNUPGHOME}"
|
||||
gpg --import verify.asc
|
||||
# Sometimes this directory is not created automatically making further private
|
||||
# key imports fail, let's create it here as a workaround
|
||||
mkdir -p --mode=0700 "${GNUPGHOME}/private-keys-v1.d/"
|
||||
|
||||
if [[ "${SEED_SDK_VERSION}" == alpha ]]
|
||||
then
|
||||
SEED_SDK_VERSION=$(curl -s -S -f -L "https://alpha.release.flatcar-linux.net/amd64-usr/current/version.txt" | grep -m 1 FLATCAR_SDK_VERSION= | cut -d = -f 2- | tee /dev/stderr)
|
||||
if [[ -z "${SEED_SDK_VERSION}" ]]
|
||||
then
|
||||
echo "Unexpected: Alpha release SDK version not found"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
DOWNLOAD_ROOT=${DOWNLOAD_ROOT:-"gs://flatcar-jenkins"}
|
||||
# The seed SDK is always a release SDK
|
||||
DOWNLOAD_ROOT_SDK="gs://flatcar-jenkins/sdk"
|
||||
|
||||
# We do not use a nightly SDK as seed for bootstrapping because the next major Alpha SDK release would also have to use the last published Alpha release SDK as seed.
|
||||
# Also, we don't want compiler bugs to propagate from one nightly SDK to the next even though the commit in question was reverted.
|
||||
# Having a clear bootstrap path is our last safety line before insanity for that kind of bugs, and is a requirement for reproducibility and security.
|
||||
# Fore more info, read Ken Thompson's Turing Award Lecture "Reflections on Trusting Trust".
|
||||
# In rare cases this will mean that a huge compiler update has to be split because first a released SDK with a newer compiler is needed to compile an even newer compiler
|
||||
# (or linker, libc etc). For experiments one can download the nightly/developer SDK and start the bootstrap from it locally but exposing this functionality in Jenkins would
|
||||
# cause more confusion than helping to understand what the requirements are to get SDK changes to a releasable state.
|
||||
|
||||
bin/cork update \
|
||||
--create --downgrade-replace --verify --verify-signature --verbose \
|
||||
--sdk-version "${SEED_SDK_VERSION}" \
|
||||
--force-sync \
|
||||
--json-key "${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--manifest-branch "refs/tags/${MANIFEST_TAG}" \
|
||||
--sdk-url storage.googleapis.com \
|
||||
--sdk-url-path "/flatcar-jenkins/sdk" \
|
||||
--manifest-name "${MANIFEST_NAME}" \
|
||||
--manifest-url "${MANIFEST_URL}" -- --dev_builds_sdk="${DOWNLOAD_ROOT_SDK}" --setuponly
|
||||
|
||||
if [[ ${FULL_BUILD} == "false" ]]; then
|
||||
export FORCE_STAGES="stage4"
|
||||
fi
|
||||
|
||||
enter() {
|
||||
# we add the public key to verify the signature with gangue
|
||||
sudo ln -f ./verify.asc chroot/opt/verify.asc
|
||||
# GCP service account to get access to private bucket during the gangue downloading
|
||||
sudo ln -f "${GOOGLE_APPLICATION_CREDENTIALS}" chroot/etc/portage/gangue.json
|
||||
bin/cork enter --bind-gpg-agent=false -- "$@"
|
||||
}
|
||||
|
||||
source .repo/manifests/version.txt
|
||||
export FLATCAR_BUILD_ID
|
||||
|
||||
# Set up GPG for signing uploads.
|
||||
gpg --import "${GPG_SECRET_KEY_FILE}"
|
||||
|
||||
# Wipe all of catalyst.
|
||||
sudo rm -rf src/build
|
||||
|
||||
# Fetch DIGEST to prevent re-downloading the same SDK tarball
|
||||
enter /mnt/host/source/bin/gangue get --verify-key /opt/verify.asc --json-key /etc/portage/gangue.json "${DOWNLOAD_ROOT_SDK}/amd64/${FLATCAR_SDK_VERSION}/flatcar-sdk-amd64-${FLATCAR_SDK_VERSION}.tar.bz2.DIGESTS" /mnt/host/source/.cache/sdks/
|
||||
|
||||
enter sudo \
|
||||
FLATCAR_DEV_BUILDS_SDK="${DOWNLOAD_ROOT_SDK}" \
|
||||
FORCE_STAGES="${FORCE_STAGES}" \
|
||||
/mnt/host/source/src/scripts/bootstrap_sdk \
|
||||
--sign="${SIGNING_USER}" \
|
||||
--sign_digests="${SIGNING_USER}" \
|
||||
--upload_root="${UPLOAD_ROOT}" \
|
||||
--stage1_overlay_ref="${STAGE1_OVERLAY_REF}" \
|
||||
--stage1_portage_ref="${STAGE1_PORTAGE_REF}" \
|
||||
--upload
|
||||
|
||||
# Update entry for latest nightly build reference (there are no symlinks in GCS and it is also good to keep it deterministic)
|
||||
if [[ "${FLATCAR_BUILD_ID}" == *-*-nightly-* ]]
|
||||
then
|
||||
# Extract the nightly name like "flatcar-MAJOR-nightly" from "dev-flatcar-MAJOR-nightly-NUMBER"
|
||||
NAME=$(echo "${FLATCAR_BUILD_ID}" | grep -o "dev-.*-nightly" | cut -d - -f 2-)
|
||||
echo "${FLATCAR_VERSION}" | enter gsutil cp - "${UPLOAD_ROOT}/sdk/amd64/sdk-${NAME}.txt"
|
||||
fi
|
@ -1,84 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# The build may not be started without a tag value.
|
||||
[ -n "${MANIFEST_TAG}" ]
|
||||
|
||||
# Catalyst leaves things chowned as root.
|
||||
[ -d .cache/sdks ] && sudo chown -R "$USER" .cache/sdks
|
||||
|
||||
# Set up GPG for verifying tags.
|
||||
export GNUPGHOME="${PWD}/.gnupg"
|
||||
rm -rf "${GNUPGHOME}"
|
||||
trap 'rm -rf "${GNUPGHOME}"' EXIT
|
||||
mkdir --mode=0700 "${GNUPGHOME}"
|
||||
gpg --import verify.asc
|
||||
# Sometimes this directory is not created automatically making further private
|
||||
# key imports fail, let's create it here as a workaround
|
||||
mkdir -p --mode=0700 "${GNUPGHOME}/private-keys-v1.d/"
|
||||
|
||||
DOWNLOAD_ROOT=${DOWNLOAD_ROOT:-"${UPLOAD_ROOT}"}
|
||||
# since /flatcar-jenkins/developer/sdk starts with a / we only use one
|
||||
DOWNLOAD_ROOT_SDK="gs:/${SDK_URL_PATH}"
|
||||
|
||||
SCRIPTS_PATCH_ARG=""
|
||||
OVERLAY_PATCH_ARG=""
|
||||
PORTAGE_PATCH_ARG=""
|
||||
if [ "$(cat scripts.patch | wc -l)" != 0 ]; then
|
||||
SCRIPTS_PATCH_ARG="--scripts-patch scripts.patch"
|
||||
fi
|
||||
if [ "$(cat overlay.patch | wc -l)" != 0 ]; then
|
||||
OVERLAY_PATCH_ARG="--overlay-patch overlay.patch"
|
||||
fi
|
||||
if [ "$(cat portage.patch | wc -l)" != 0 ]; then
|
||||
PORTAGE_PATCH_ARG="--portage-patch portage.patch"
|
||||
fi
|
||||
|
||||
bin/cork create \
|
||||
--verify --verify-signature --replace \
|
||||
--sdk-url-path "${SDK_URL_PATH}" \
|
||||
--json-key "${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--sdk-url storage.googleapis.com \
|
||||
${SCRIPTS_PATCH_ARG} ${OVERLAY_PATCH_ARG} ${PORTAGE_PATCH_ARG} \
|
||||
--manifest-branch "refs/tags/${MANIFEST_TAG}" \
|
||||
--manifest-name "${MANIFEST_NAME}" \
|
||||
--manifest-url "${MANIFEST_URL}"
|
||||
|
||||
enter() {
|
||||
sudo ln -f "${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
chroot/etc/portage/gangue.json
|
||||
# we add the public key to verify the signature with gangue
|
||||
sudo ln -f ./verify.asc chroot/opt/verify.asc
|
||||
bin/cork enter --bind-gpg-agent=false -- env \
|
||||
FLATCAR_DEV_BUILDS="${DOWNLOAD_ROOT}" \
|
||||
FLATCAR_DEV_BUILDS_SDK="${DOWNLOAD_ROOT_SDK}" \
|
||||
{FETCH,RESUME}COMMAND_GS="/mnt/host/source/bin/gangue get \
|
||||
--json-key=/etc/portage/gangue.json $verify_key \
|
||||
"'"${URI}" "${DISTDIR}/${FILE}"' \
|
||||
"$@"
|
||||
}
|
||||
|
||||
script() {
|
||||
enter "/mnt/host/source/src/scripts/$@"
|
||||
}
|
||||
|
||||
source .repo/manifests/version.txt
|
||||
export FLATCAR_BUILD_ID
|
||||
|
||||
# Fetch DIGEST to prevent re-downloading the same SDK tarball
|
||||
enter /mnt/host/source/bin/gangue get --verify-key /opt/verify.asc --json-key /etc/portage/gangue.json "${DOWNLOAD_ROOT_SDK}/amd64/${FLATCAR_SDK_VERSION}/flatcar-sdk-amd64-${FLATCAR_SDK_VERSION}.tar.bz2.DIGESTS" /mnt/host/source/.cache/sdks/
|
||||
|
||||
script update_chroot \
|
||||
--toolchain_boards="${BOARD}" --dev_builds_sdk="${DOWNLOAD_ROOT_SDK}" --setuponly
|
||||
|
||||
# Set up GPG for signing uploads.
|
||||
gpg --import "${GPG_SECRET_KEY_FILE}"
|
||||
|
||||
# Wipe all of catalyst.
|
||||
sudo rm -rf src/build
|
||||
|
||||
enter sudo FLATCAR_DEV_BUILDS_SDK="${DOWNLOAD_ROOT_SDK}" /mnt/host/source/src/scripts/build_toolchains \
|
||||
--sign="${SIGNING_USER}" \
|
||||
--sign_digests="${SIGNING_USER}" \
|
||||
--upload_root="${UPLOAD_ROOT}" \
|
||||
--upload
|
126
jenkins/vms.sh
126
jenkins/vms.sh
@ -1,126 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# The build may not be started without a tag value.
|
||||
[ -n "${MANIFEST_TAG}" ]
|
||||
|
||||
# Set up GPG for verifying tags.
|
||||
export GNUPGHOME="${PWD}/.gnupg"
|
||||
rm -rf "${GNUPGHOME}"
|
||||
trap 'rm -rf "${GNUPGHOME}"' EXIT
|
||||
mkdir --mode=0700 "${GNUPGHOME}"
|
||||
gpg --import verify.asc
|
||||
# Sometimes this directory is not created automatically making further private
|
||||
# key imports fail, let's create it here as a workaround
|
||||
mkdir -p --mode=0700 "${GNUPGHOME}/private-keys-v1.d/"
|
||||
|
||||
# since /flatcar-jenkins/developer/sdk starts with a / we only use one
|
||||
DOWNLOAD_ROOT_SDK="gs:/${SDK_URL_PATH}"
|
||||
|
||||
SCRIPTS_PATCH_ARG=""
|
||||
OVERLAY_PATCH_ARG=""
|
||||
PORTAGE_PATCH_ARG=""
|
||||
if [ "$(cat scripts.patch | wc -l)" != 0 ]; then
|
||||
SCRIPTS_PATCH_ARG="--scripts-patch scripts.patch"
|
||||
fi
|
||||
if [ "$(cat overlay.patch | wc -l)" != 0 ]; then
|
||||
OVERLAY_PATCH_ARG="--overlay-patch overlay.patch"
|
||||
fi
|
||||
if [ "$(cat portage.patch | wc -l)" != 0 ]; then
|
||||
PORTAGE_PATCH_ARG="--portage-patch portage.patch"
|
||||
fi
|
||||
|
||||
bin/cork create \
|
||||
--replace --verify --verify-signature --verbose \
|
||||
--sdk-url-path "${SDK_URL_PATH}" \
|
||||
--json-key "${GS_DEVEL_CREDS}" \
|
||||
${SCRIPTS_PATCH_ARG} ${OVERLAY_PATCH_ARG} ${PORTAGE_PATCH_ARG} \
|
||||
--manifest-branch "refs/tags/${MANIFEST_TAG}" \
|
||||
--manifest-name "${MANIFEST_NAME}" \
|
||||
--manifest-url "${MANIFEST_URL}" \
|
||||
--sdk-url=storage.googleapis.com
|
||||
|
||||
# Clear out old images.
|
||||
sudo rm -rf chroot/build tmp
|
||||
|
||||
enter() {
|
||||
local verify_key=
|
||||
# Run in a subshell to clean some gangue files on exit without
|
||||
# possibly clobbering the global EXIT trap.
|
||||
(
|
||||
trap 'sudo rm -f chroot/etc/portage/gangue.*' EXIT
|
||||
[ -s verify.asc ] &&
|
||||
sudo ln -f verify.asc chroot/etc/portage/gangue.asc &&
|
||||
verify_key=--verify-key=/etc/portage/gangue.asc
|
||||
sudo ln -f "${GS_DEVEL_CREDS}" chroot/etc/portage/gangue.json
|
||||
bin/cork enter --bind-gpg-agent=false -- env \
|
||||
FLATCAR_DEV_BUILDS="${GS_DEVEL_ROOT}" \
|
||||
FLATCAR_DEV_BUILDS_SDK="${DOWNLOAD_ROOT_SDK}" \
|
||||
{FETCH,RESUME}COMMAND_GS="/mnt/host/source/bin/gangue get \
|
||||
--json-key=/etc/portage/gangue.json $verify_key \
|
||||
"'"${URI}" "${DISTDIR}/${FILE}"' \
|
||||
"$@"
|
||||
)
|
||||
}
|
||||
|
||||
script() {
|
||||
enter "/mnt/host/source/src/scripts/$@"
|
||||
}
|
||||
|
||||
source .repo/manifests/version.txt
|
||||
export FLATCAR_BUILD_ID
|
||||
|
||||
script update_chroot \
|
||||
--toolchain_boards="${BOARD}" --dev_builds_sdk="${DOWNLOAD_ROOT_SDK}"
|
||||
|
||||
# Set up GPG for signing uploads.
|
||||
gpg --import "${GPG_SECRET_KEY_FILE}"
|
||||
|
||||
[ -s verify.asc ] && verify_key=--verify-key=verify.asc || verify_key=
|
||||
|
||||
mkdir -p src tmp
|
||||
bin/cork download-image \
|
||||
--root="${UPLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}" \
|
||||
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--cache-dir=./src \
|
||||
--platform=qemu \
|
||||
--verify=true $verify_key
|
||||
|
||||
img=src/flatcar_production_image.bin
|
||||
[[ "${img}.bz2" -nt "${img}" ]] &&
|
||||
enter lbunzip2 -k -f "/mnt/host/source/${img}.bz2"
|
||||
|
||||
if [[ "${FORMATS}" = "" ]]
|
||||
then
|
||||
FORMATS="${FORMAT}"
|
||||
fi
|
||||
|
||||
if [[ "${FORMATS}" == *"azure_gen2"* ]] ; then
|
||||
# azure_gen2 shares an image with azure
|
||||
if [[ " ${FORMATS} " != *" azure "* ]]; then
|
||||
FORMATS+=" azure"
|
||||
fi
|
||||
FORMATS=${FORMATS/azure_gen2/}
|
||||
fi
|
||||
|
||||
for FORMAT in ${FORMATS}; do
|
||||
COMPRESSION_FORMAT="bz2"
|
||||
|
||||
if [[ "${FORMAT}" =~ ^(openstack|openstack_mini|digitalocean)$ ]];then
|
||||
COMPRESSION_FORMAT="gz,bz2"
|
||||
fi
|
||||
|
||||
script image_to_vm.sh \
|
||||
--board="${BOARD}" \
|
||||
--format="${FORMAT}" \
|
||||
--getbinpkg \
|
||||
--getbinpkgver="${FLATCAR_VERSION}" \
|
||||
--from=/mnt/host/source/src \
|
||||
--to=/mnt/host/source/tmp \
|
||||
--sign="${SIGNING_USER}" \
|
||||
--sign_digests="${SIGNING_USER}" \
|
||||
--download_root="${DOWNLOAD_ROOT}" \
|
||||
--upload_root="${UPLOAD_ROOT}" \
|
||||
--image_compression_formats="${COMPRESSION_FORMAT}" \
|
||||
--upload
|
||||
done
|
@ -29,8 +29,6 @@ if [[ -z "${FLAGS_board}" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
check_gsutil_opts
|
||||
|
||||
# set BOARD and BOARD_ROOT
|
||||
. "${BUILD_LIBRARY_DIR}/toolchain_util.sh" || exit 1
|
||||
. "${BUILD_LIBRARY_DIR}/board_options.sh" || exit 1
|
||||
@ -78,9 +76,6 @@ else
|
||||
|
||||
info "Checking build root"
|
||||
test_image_content "${BOARD_ROOT}"
|
||||
|
||||
# upload packages if enabled
|
||||
upload_packages
|
||||
fi
|
||||
|
||||
command_completed
|
||||
|
@ -1,27 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEAzFS5uVJ+pgibcFLD3kbYk02Edj0HXq31ZT/Bva1sLp3Ysv+Q
|
||||
Tv/ezjf0gGFfASdgpz6G+zTipS9AIrQr0yFR+tdp1ZsHLGxVwvUoXFftdapqlyj8
|
||||
uQcWjjbN7qJsZu0Ett/qo93hQ5nHW7Sv5dRm/ZsDFqk2Uvyaoef4bF9r03wYpZq7
|
||||
K3oALZ2smETv+A5600mj1Xg5M52QFU67UHlsEFkZphrGjiqiCdp9AAbAvE7a5rFc
|
||||
Jf86YR73QX08K8BX7OMzkn3DsqdnWvLB3l3W6kvIuP+75SrMNeYAcU8PI1+bzLcA
|
||||
G3VN3jA78zeKALgynUNH50mxuiiU3DO4DZ+p5QIDAQABAoIBAH7ENbE+9+nkPyMx
|
||||
hekaBPVmSz7b3/2iaTNWmckmlY5aSX3LxejtH3rLBjq7rihWGMXJqg6hodcfeGfP
|
||||
Zb0H2AeKq1Nlac7qq05XsKGRv3WXs6dyO1BDkH/Minh5dk1o0NrwEm91kXLSLfe8
|
||||
IsCwxPCjwgfGFTjpFLpL4zjA/nFmWRyk2eyvs5VYRGKbbC83alUy7LutyRdZfw1b
|
||||
nwXldw2m8k/HPbGhaAqPpXTOjckIXZS5Dcp3smrOzwObZ6c3gQzg8upaRmxJVOmk
|
||||
cgCFTe0yUB2GMTEE3SUmuWJyZqECoyQtuiu0yT3igH8MZQpjg9NXm0eho/bXjN36
|
||||
frH+ikUCgYEA7VdCRcisnYWct29j+Bnaio9yXwwxhfoee53a4LQgjw5RLGUe1mXe
|
||||
j56oZ1Mak3Hh55sVQLNXZBuXHQqPsr7KkWXJXedDNFfq1u6by4LeJV0YYiDjjaCM
|
||||
T5G4Tcs7xhBWszLMCjhpJCrwHdGk3aa65UQ+angZlxhyziULCjpb5rMCgYEA3GUb
|
||||
VkqlVuNkHoogOMwg+h1jUSkwtWvP/z/FOXrKjivuwSgQ+i6PsildI3FL/WQtJxgd
|
||||
arB+l0L8TZJ6spFdNXwGmdCLqEcgEBYl11EojOXYLa7oLONI41iRQ3/nBBIqC38P
|
||||
Cs6CZQG/ZpKSoOzXE34BwcrOL99MA2oaVpGHuQcCgYA1IIk3Mbph8FyqOwb3rGHd
|
||||
Dksdt48GXHyiUy2BixCWtS+6blA+0cLGB0/PAS07wAw/WdmiCAMR55Ml7w1Hh6m0
|
||||
bkJrAK9schmhTvwUzBCJ8JLatF37f+qojQfichHJPjMKHd7KkuIGNI5XPmxXKVFA
|
||||
rMwD7SpdRh28w1H7UiDsPQKBgGebnFtXohyTr2hv9K/evo32LM9ltsFC2rga6YOZ
|
||||
BwoI+yeQx1JleyX9LgzQYTHQ2y0quAGE0S4YznVFLCswDQpssMm0cUL9lMQbNVTg
|
||||
kViTYKoxNHKNsqE17Kw3v4l5ZIydAZxJ8qC7TphQxV+jl4RRU1AgIAf/SEO+qH0T
|
||||
0yMXAoGBAN+y9QpGnGX6cgwLQQ7IC6MC+3NRed21s+KxHzpyF+Zh/q6NTLUSgp8H
|
||||
dBmeF4wAZTY+g/fdB9drYeaSdRs3SZsM7gMEvjspjYgE2rV/5gkncFyGKRAiNOR4
|
||||
bsy1Gm/UYLTc8+S3fq/xjg9RCjW9JMwavAwL6oVNNt7nyAXPfvSu
|
||||
-----END RSA PRIVATE KEY-----
|
@ -1,9 +0,0 @@
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzFS5uVJ+pgibcFLD3kbY
|
||||
k02Edj0HXq31ZT/Bva1sLp3Ysv+QTv/ezjf0gGFfASdgpz6G+zTipS9AIrQr0yFR
|
||||
+tdp1ZsHLGxVwvUoXFftdapqlyj8uQcWjjbN7qJsZu0Ett/qo93hQ5nHW7Sv5dRm
|
||||
/ZsDFqk2Uvyaoef4bF9r03wYpZq7K3oALZ2smETv+A5600mj1Xg5M52QFU67UHls
|
||||
EFkZphrGjiqiCdp9AAbAvE7a5rFcJf86YR73QX08K8BX7OMzkn3DsqdnWvLB3l3W
|
||||
6kvIuP+75SrMNeYAcU8PI1+bzLcAG3VN3jA78zeKALgynUNH50mxuiiU3DO4DZ+p
|
||||
5QIDAQAB
|
||||
-----END PUBLIC KEY-----
|
@ -1,6 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
KEY="$1"
|
||||
openssl genrsa -rand /dev/random -out "${KEY}.key.pem" 2048
|
||||
openssl rsa -in "${KEY}.key.pem" -pubout -out "${KEY}.pub.pem"
|
@ -1,8 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# If there is no default printer use ./print_key.sh prod-2 -d printer_name
|
||||
# List available printers with lpstat -a
|
||||
|
||||
set -ex
|
||||
KEY="$1"
|
||||
shift
|
||||
qrencode -8 -o - < "${KEY}.key.pem" | lp -E -o fit-to-page "$@"
|
@ -1,9 +0,0 @@
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA7pO21yN+b6yx9P+wHzS2
|
||||
clxGs18eWxfoleETLbFVmcXu783rgBP0bFjYfqrNZNaI1Ip6bxEYkPacg0xFg7ri
|
||||
lNdO/cxJV5Ltj40wFlpmzJOAH8hx5SF8KWg2NV1I6TS8pp+CQqcvvOKu6AIcWfeY
|
||||
11V7eJ8rWcDsnqpTg8T1VRxytsg2UjTMfQwzcGLTb8cQ8AV39ED5WC5NdS9Bld4h
|
||||
XqS9Dx6Pe3JOQLZze6XIIwWuB2jxGpM1GWfRNm5nxvne3l7ggC970482a7STGK10
|
||||
fD8//k8myVxleMAeQoMRXoRq9p3C84H4Bw8v2dX13kFFCgfEQj6SOZ5huXZKLPpB
|
||||
LwIDAQAB
|
||||
-----END PUBLIC KEY-----
|
@ -1,251 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -eux
|
||||
|
||||
APPID=e96281a6-d1af-4bde-9a0a-97b76e56dc57
|
||||
|
||||
declare -A RELEASE_CHANNEL
|
||||
RELEASE_CHANNEL[alpha]=Alpha
|
||||
RELEASE_CHANNEL[beta]=Beta
|
||||
RELEASE_CHANNEL[stable]=Stable
|
||||
|
||||
download() {
|
||||
local channel="$1"
|
||||
local version="$2"
|
||||
local board="$3"
|
||||
|
||||
local gs="gs://builds.release.core-os.net/${channel}/boards/${board}/${version}"
|
||||
local dir="${BASEDIR}/${board}/${version}"
|
||||
mkdir -p "${dir}"
|
||||
pushd "${dir}" >/dev/null
|
||||
|
||||
gsutil -m cp \
|
||||
"${gs}/flatcar_production_image.vmlinuz" \
|
||||
"${gs}/flatcar_production_image.vmlinuz.sig" \
|
||||
"${gs}/flatcar_production_update.bin.bz2" \
|
||||
"${gs}/flatcar_production_update.bin.bz2.sig" \
|
||||
"${gs}/flatcar_production_update.zip" \
|
||||
"${gs}/flatcar_production_update.zip.sig" ./
|
||||
|
||||
# torcx manifest: try embargoed release bucket first
|
||||
local torcx_base="gs://builds.release.core-os.net/embargoed/devfiles/torcx/manifests/${board}/${version}"
|
||||
if ! gsutil -q stat "${torcx_base}/torcx_manifest.json"; then
|
||||
# Non-embargoed release
|
||||
local torcx_base="gs://builds.developer.core-os.net/torcx/manifests/${BOARD}/${version}"
|
||||
fi
|
||||
gsutil -m cp \
|
||||
"${torcx_base}/torcx_manifest.json" \
|
||||
"${torcx_base}/torcx_manifest.json.sig" \
|
||||
./
|
||||
|
||||
gpg2 --verify "flatcar_production_image.vmlinuz.sig"
|
||||
gpg2 --verify "flatcar_production_update.bin.bz2.sig"
|
||||
gpg2 --verify "flatcar_production_update.zip.sig"
|
||||
gpg2 --verify "torcx_manifest.json.sig"
|
||||
|
||||
popd >/dev/null
|
||||
}
|
||||
|
||||
devsign() {
|
||||
local channel="$1"
|
||||
local version="$2"
|
||||
local board="$3"
|
||||
|
||||
"$(dirname $0)/../core_dev_sign_update" \
|
||||
--data_dir "${BASEDIR}" \
|
||||
--version "${version}" \
|
||||
--output_dir "${SIGDIR}"
|
||||
|
||||
git -C "${SIGDIR}" add .
|
||||
git -C "${SIGDIR}" commit -m "Add sigs from ${USER} for ${channel} ${version}"
|
||||
}
|
||||
|
||||
upload() {
|
||||
local channel="$1"
|
||||
local version="$2"
|
||||
local board="$3"
|
||||
|
||||
local dir="${BASEDIR}/${board}/${version}"
|
||||
local payload="${dir}/flatcar_production_update.gz"
|
||||
local torcx_manifest="${dir}/torcx_manifest.json"
|
||||
local torcx_manifest_sig="${dir}/torcx_manifest.json.asc"
|
||||
local path
|
||||
for path in "${payload}" "${torcx_manifest}" "${torcx_manifest_sig}"; do
|
||||
if [[ ! -e "${path}" ]]; then
|
||||
echo "No such file: ${path}" >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
"$(dirname $0)/../core_roller_upload" \
|
||||
--user="${ROLLER_USERNAME}" \
|
||||
--api_key="${ROLLER_API_KEY}" \
|
||||
--app_id="${APPID}" \
|
||||
--board="${board}" \
|
||||
--version="${version}" \
|
||||
--payload="${payload}"
|
||||
|
||||
# Upload torcx manifests
|
||||
gsutil cp \
|
||||
"${torcx_manifest}" \
|
||||
"${torcx_manifest_sig}" \
|
||||
"gs://coreos-tectonic-torcx/manifests/${board}/${version}/"
|
||||
|
||||
# Update version in a canary channel if one is defined.
|
||||
local -n canary_channel="ROLLER_CANARY_CHANNEL_${channel^^}"
|
||||
if [[ -n "${canary_channel}" ]]; then
|
||||
updateservicectl \
|
||||
--server="https://public.update.core-os.net" \
|
||||
--user="${ROLLER_USERNAME}" \
|
||||
--key="${ROLLER_API_KEY}" \
|
||||
channel update \
|
||||
--app-id="${APPID}" \
|
||||
--channel="${canary_channel}" \
|
||||
--version="${version}"
|
||||
fi
|
||||
}
|
||||
|
||||
ready() {
|
||||
local channel="$1"
|
||||
local version="$2"
|
||||
local board="$3"
|
||||
|
||||
# setting the percent will deactivate (not delete) any existing rollouts for
|
||||
# this specific group.
|
||||
echo "Rollout set to 0% for ${board}"
|
||||
updateservicectl \
|
||||
--server="https://public.update.core-os.net" \
|
||||
--user="${ROLLER_USERNAME}" \
|
||||
--key="${ROLLER_API_KEY}" \
|
||||
group percent \
|
||||
--app-id="${APPID}" \
|
||||
--group-id="${channel}" \
|
||||
--update-percent=0
|
||||
|
||||
# FIXME(bgilbert): We set --publish=true because there's no way to
|
||||
# say --publish=unchanged
|
||||
updateservicectl \
|
||||
--server="https://public.update.core-os.net" \
|
||||
--user="${ROLLER_USERNAME}" \
|
||||
--key="${ROLLER_API_KEY}" \
|
||||
channel update \
|
||||
--app-id="${APPID}" \
|
||||
--channel="${RELEASE_CHANNEL[${channel}]}" \
|
||||
--publish=true \
|
||||
--version="${version}"
|
||||
}
|
||||
|
||||
roll() {
|
||||
local channel="$1"
|
||||
local hours="$2"
|
||||
local board="$3"
|
||||
|
||||
local seconds=$((${hours} * 3600))
|
||||
|
||||
# Only ramp rollouts on AMD64; ARM64 is too small
|
||||
if [[ "$board" = "arm64-usr" ]]; then
|
||||
echo "Setting rollout for arm64-usr to 100%"
|
||||
updateservicectl \
|
||||
--server="https://public.update.core-os.net" \
|
||||
--user="${ROLLER_USERNAME}" \
|
||||
--key="${ROLLER_API_KEY}" \
|
||||
group percent \
|
||||
--app-id="${APPID}" \
|
||||
--group-id="${channel}" \
|
||||
--update-percent=100
|
||||
else
|
||||
# creating a new rollout deletes any existing rollout for this group and
|
||||
# automatically activates the new one.
|
||||
echo "Creating linear rollout for ${board} that will get to 100% in ${hours}h"
|
||||
updateservicectl \
|
||||
--server="https://public.update.core-os.net" \
|
||||
--user="${ROLLER_USERNAME}" \
|
||||
--key="${ROLLER_API_KEY}" \
|
||||
rollout create linear \
|
||||
--app-id="${APPID}" \
|
||||
--group-id="${channel}" \
|
||||
--duration="${seconds}" \
|
||||
--frame-size="60"
|
||||
fi
|
||||
}
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 {download|upload} <ARTIFACT-DIR> [{-a|-b|-s} <VERSION>]..." >&2
|
||||
echo "Usage: $0 devsign <ARTIFACT-DIR> <SIG-DIR> [{-a|-b|-s} <VERSION> <BOARD>]..." >&2
|
||||
echo "Usage: $0 ready [{-a|-b|-s} <VERSION>]..." >&2
|
||||
echo "Usage: $0 roll [{-a|-b|-s} <HOURS-TO-100-PERCENT>]..." >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Parse subcommand.
|
||||
CMD="${1:-}"
|
||||
shift ||:
|
||||
case "${CMD}" in
|
||||
download|devsign)
|
||||
;;
|
||||
upload|ready|roll)
|
||||
if [[ -e "${HOME}/.config/roller.conf" ]]; then
|
||||
. "${HOME}/.config/roller.conf"
|
||||
fi
|
||||
if [[ -z "${ROLLER_USERNAME:-}" || -z "${ROLLER_API_KEY:-}" ]]; then
|
||||
echo 'Missing $ROLLER_USERNAME or $ROLLER_API_KEY.' >&2
|
||||
echo "Consider adding shell assignments to ~/.config/roller.conf." >&2
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
|
||||
# Parse fixed args if necessary.
|
||||
case "${CMD}" in
|
||||
download|devsign|upload)
|
||||
BASEDIR="${1:-}"
|
||||
shift ||:
|
||||
if [[ -z "${BASEDIR}" ]]; then
|
||||
usage
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
case "${CMD}" in
|
||||
devsign)
|
||||
SIGDIR="${1:-}"
|
||||
shift ||:
|
||||
if [[ -z "${SIGDIR}" ]]; then
|
||||
usage
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
# Sync SIGDIR exactly once.
|
||||
case "${CMD}" in
|
||||
devsign)
|
||||
echo "Updating ${SIGDIR}..."
|
||||
git -C "${SIGDIR}" pull -r
|
||||
;;
|
||||
esac
|
||||
|
||||
# Walk argument pairs.
|
||||
while [[ $# > 0 ]]; do
|
||||
c="$1"
|
||||
v="${2?Must provide an argument for each channel (see usage)}"
|
||||
shift 2
|
||||
|
||||
case "${c}" in
|
||||
-a)
|
||||
$CMD "alpha" "${v}" "amd64-usr"
|
||||
$CMD "alpha" "${v}" "arm64-usr"
|
||||
;;
|
||||
-b)
|
||||
$CMD "beta" "${v}" "amd64-usr"
|
||||
$CMD "beta" "${v}" "arm64-usr"
|
||||
;;
|
||||
-s)
|
||||
$CMD "stable" "${v}" "amd64-usr"
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
done
|
@ -8,17 +8,8 @@ SCRIPT_ROOT=$(dirname $(readlink -f "$0"))
|
||||
. "${SCRIPT_ROOT}/common.sh" || exit 1
|
||||
|
||||
DEFINE_boolean dry_run ${FLAGS_FALSE} "Trial run, makes no changes."
|
||||
DEFINE_boolean parallel ${FLAGS_TRUE} "Enable parallelism in gsutil."
|
||||
DEFINE_boolean upload ${FLAGS_FALSE} "Upload distfile mirror via gsutil."
|
||||
|
||||
# FIXME(marineam): We need to add gs support to emirrordist so it
|
||||
# doesn't have to operate on a local copy of the complete mirror.
|
||||
DEFINE_boolean download ${FLAGS_FALSE} \
|
||||
"Download the current mirror before making updates to it."
|
||||
|
||||
MIRROR_ROOT="${DEFAULT_BUILD_ROOT}/mirror"
|
||||
UPLOAD_ROOT="gs://storage.core-os.net/mirror"
|
||||
SECOND_ROOT="gs://coreos-net-storage/mirror"
|
||||
|
||||
# Parse flags
|
||||
FLAGS "$@" || exit 1
|
||||
@ -34,11 +25,6 @@ if [[ $# -eq 0 ]]; then
|
||||
set -- "${!repos[@]}"
|
||||
fi
|
||||
|
||||
GSUTIL_OPTS=
|
||||
if [[ ${FLAGS_parallel} -eq ${FLAGS_TRUE} ]]; then
|
||||
GSUTIL_OPTS="-m"
|
||||
fi
|
||||
|
||||
EXIT_CODE=0
|
||||
|
||||
|
||||
@ -87,40 +73,6 @@ update_local_mirror() {
|
||||
|
||||
EXIT_CODE=1
|
||||
}
|
||||
upload_mirror() {
|
||||
local repo_name="$1"
|
||||
local local_mirror="${MIRROR_ROOT}/${repos[$repo_name]}"
|
||||
local remote_mirror="$2/${repos[$repo_name]}"
|
||||
|
||||
info "Uploading public distfiles for $repo_name"
|
||||
gsutil ${GSUTIL_OPTS} rsync -c \
|
||||
"${local_mirror}/distfiles/" "${remote_mirror}/distfiles"
|
||||
|
||||
info "Uploading private metadata for $repo_name"
|
||||
# uses cp instead of rsync in order to provide acl
|
||||
gsutil ${GSUTIL_OPTS} cp -a project-private \
|
||||
"${local_mirror}/info/*" "${remote_mirror}/info"
|
||||
}
|
||||
download_mirror() {
|
||||
local repo_name="$1"
|
||||
local local_mirror="${MIRROR_ROOT}/${repos[$repo_name]}"
|
||||
local remote_mirror="${UPLOAD_ROOT}/${repos[$repo_name]}"
|
||||
|
||||
info "Downloading public distfiles for $repo_name"
|
||||
mkdir -p "${local_mirror}/"{distfiles,info}
|
||||
gsutil ${GSUTIL_OPTS} rsync -c -d \
|
||||
"${remote_mirror}/distfiles/" "${local_mirror}/distfiles"
|
||||
|
||||
info "Downloading private metadata for $repo_name"
|
||||
gsutil ${GSUTIL_OPTS} rsync -c -d \
|
||||
"${remote_mirror}/info/" "${local_mirror}/info"
|
||||
}
|
||||
|
||||
if [[ ${FLAGS_download} -eq ${FLAGS_TRUE} ]]; then
|
||||
for repo in "$@"; do
|
||||
download_mirror "$repo"
|
||||
done
|
||||
fi
|
||||
|
||||
for repo in "$@"; do
|
||||
if ! portageq get_repo_path / "$repo" >/dev/null; then
|
||||
@ -134,12 +86,5 @@ if [[ ${FLAGS_dry_run} == ${FLAGS_TRUE} ]]; then
|
||||
exit
|
||||
fi
|
||||
|
||||
if [[ ${FLAGS_upload} -eq ${FLAGS_TRUE} ]]; then
|
||||
for repo in "$@"; do
|
||||
upload_mirror "$repo" "$UPLOAD_ROOT"
|
||||
upload_mirror "$repo" "$SECOND_ROOT"
|
||||
done
|
||||
fi
|
||||
|
||||
command_completed
|
||||
exit $EXIT_CODE
|
||||
|
Loading…
x
Reference in New Issue
Block a user