mirror of
https://github.com/flatcar/scripts.git
synced 2025-09-22 14:11:07 +02:00
jenkins: move all inline bash scripts to flatcar-scripts
The logic of the inline bash scripts of each job was sometimes separated into the flatcar-scripts/jenkins/*.sh helpers but mostly part of the Groovy file. This coupling had its advantages but also downsides when special cases needed to be added for different release versions. Other issues were that the inline scripts needed the backslash character to be escaped twice and Jenkins was not good in terminating the child processes when stopping a job. Having inline bash scripts in Groovy also mandated the use of Jenkins to build and release Flatcar Container Linux which hinders test builds in other CI platforms. Move the inline bash scripts fully to to the files in flatcar-scripts/jenkins/ and create new ones for job that didn't have a script there yet. Also invoke them through a systemd-run wrapper script which ensures that all child processes are terminated and also sets up /opt/bin as additional path for the static lbzcat binary. A workaround for bash 4 was needed to use a temporary file instead of the <(cmd) bash feature which caused a strange syntax error, otherwise the bash commands are moved as they are.
This commit is contained in:
parent
0ec82836ad
commit
8eaef708be
47
jenkins/images.sh
Normal file → Executable file
47
jenkins/images.sh
Normal file → Executable file
@ -1,4 +1,42 @@
|
|||||||
#!/bin/bash -ex
|
#!/bin/bash
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
# The build may not be started without a tag value.
|
||||||
|
[ -n "${MANIFEST_TAG}" ]
|
||||||
|
|
||||||
|
# Set up GPG for verifying tags.
|
||||||
|
export GNUPGHOME="${PWD}/.gnupg"
|
||||||
|
rm -rf "${GNUPGHOME}"
|
||||||
|
trap 'rm -rf "${GNUPGHOME}"' EXIT
|
||||||
|
mkdir --mode=0700 "${GNUPGHOME}"
|
||||||
|
gpg --import verify.asc
|
||||||
|
# Sometimes this directory is not created automatically making further private
|
||||||
|
# key imports fail, let's create it here as a workaround
|
||||||
|
mkdir -p --mode=0700 "${GNUPGHOME}/private-keys-v1.d/"
|
||||||
|
|
||||||
|
DOWNLOAD_ROOT_SDK="https://storage.googleapis.com${SDK_URL_PATH}"
|
||||||
|
|
||||||
|
SCRIPTS_PATCH_ARG=""
|
||||||
|
OVERLAY_PATCH_ARG=""
|
||||||
|
PORTAGE_PATCH_ARG=""
|
||||||
|
if [ "$(cat scripts.patch | wc -l)" != 0 ]; then
|
||||||
|
SCRIPTS_PATCH_ARG="--scripts-patch scripts.patch"
|
||||||
|
fi
|
||||||
|
if [ "$(cat overlay.patch | wc -l)" != 0 ]; then
|
||||||
|
OVERLAY_PATCH_ARG="--overlay-patch overlay.patch"
|
||||||
|
fi
|
||||||
|
if [ "$(cat portage.patch | wc -l)" != 0 ]; then
|
||||||
|
PORTAGE_PATCH_ARG="--portage-patch portage.patch"
|
||||||
|
fi
|
||||||
|
|
||||||
|
bin/cork update \
|
||||||
|
--create --downgrade-replace --verify --verify-signature --verbose \
|
||||||
|
--sdk-url-path "${SDK_URL_PATH}" \
|
||||||
|
--force-sync \
|
||||||
|
${SCRIPTS_PATCH_ARG} ${OVERLAY_PATCH_ARG} ${PORTAGE_PATCH_ARG} \
|
||||||
|
--manifest-branch "refs/tags/${MANIFEST_TAG}" \
|
||||||
|
--manifest-name "${MANIFEST_NAME}" \
|
||||||
|
--manifest-url "${MANIFEST_URL}" -- --dev_builds_sdk="${DOWNLOAD_ROOT_SDK}"
|
||||||
|
|
||||||
# Clear out old images.
|
# Clear out old images.
|
||||||
sudo rm -rf chroot/build src/build torcx
|
sudo rm -rf chroot/build src/build torcx
|
||||||
@ -48,6 +86,10 @@ enter gsutil cp -r \
|
|||||||
/mnt/host/source/torcx/
|
/mnt/host/source/torcx/
|
||||||
gpg --verify torcx/torcx_manifest.json.sig
|
gpg --verify torcx/torcx_manifest.json.sig
|
||||||
|
|
||||||
|
BASH_SYNTAX_ERROR_WORKAROUND=$(mktemp)
|
||||||
|
exec {keep_open}<>"${BASH_SYNTAX_ERROR_WORKAROUND}"
|
||||||
|
rm "${BASH_SYNTAX_ERROR_WORKAROUND}"
|
||||||
|
jq -r '.value.packages[] | . as $p | .name as $n | $p.versions[] | [.casDigest, .hash] | join(" ") | [$n, .] | join(" ")' "torcx/torcx_manifest.json" > "/proc/$$/fd/${keep_open}"
|
||||||
# Download all cas references from the manifest and verify their checksums
|
# Download all cas references from the manifest and verify their checksums
|
||||||
# TODO: technically we can skip ones that don't have a 'path' since they're not
|
# TODO: technically we can skip ones that don't have a 'path' since they're not
|
||||||
# included in the image.
|
# included in the image.
|
||||||
@ -62,7 +104,8 @@ do
|
|||||||
echo "Torcx package had wrong hash: ${downloaded_hash} instead of ${hash}"
|
echo "Torcx package had wrong hash: ${downloaded_hash} instead of ${hash}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
done < <(jq -r '.value.packages[] | . as $p | .name as $n | $p.versions[] | [.casDigest, .hash] | join(" ") | [$n, .] | join(" ")' "torcx/torcx_manifest.json")
|
done < "/proc/$$/fd/${keep_open}"
|
||||||
|
# This was "done < <(jq ...)" but it suddenly gave a syntax error with bash 4 when run with systemd-run-wrap.sh
|
||||||
|
|
||||||
script build_image \
|
script build_image \
|
||||||
--board="${BOARD}" \
|
--board="${BOARD}" \
|
||||||
|
63
jenkins/kola/aws.sh
Executable file
63
jenkins/kola/aws.sh
Executable file
@ -0,0 +1,63 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
rm -rf *.tap _kola_temp*
|
||||||
|
|
||||||
|
NAME="jenkins-${JOB_NAME##*/}-${BUILD_NUMBER}"
|
||||||
|
|
||||||
|
if [[ "${AWS_INSTANCE_TYPE}" != "" ]]; then
|
||||||
|
instance_type="${AWS_INSTANCE_TYPE}"
|
||||||
|
elif [[ "${BOARD}" == "arm64-usr" ]]; then
|
||||||
|
instance_type="a1.medium"
|
||||||
|
elif [[ "${BOARD}" == "amd64-usr" ]]; then
|
||||||
|
instance_type="t3.small"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If the OFFER is empty, it should be treated as the basic offering.
|
||||||
|
if [[ "${OFFER}" == "" ]]; then
|
||||||
|
OFFER="basic"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Append the offer as oem suffix.
|
||||||
|
if [[ "${OFFER}" != "basic" ]]; then
|
||||||
|
OEM_SUFFIX="_${OFFER}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "${KOLA_TESTS}" == "" ]]; then
|
||||||
|
KOLA_TESTS="*"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "${AWS_AMI_ID}" == "" ]]; then
|
||||||
|
[ -s verify.asc ] && verify_key=--verify-key=verify.asc || verify_key=
|
||||||
|
mkdir -p tmp
|
||||||
|
bin/cork download-image \
|
||||||
|
--cache-dir=tmp \
|
||||||
|
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||||
|
--platform="aws${OEM_SUFFIX}" \
|
||||||
|
--root="${DOWNLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}" \
|
||||||
|
--sanity-check=false --verify=true $verify_key
|
||||||
|
bunzip2 "tmp/flatcar_production_ami_vmdk${OEM_SUFFIX}_image.vmdk.bz2"
|
||||||
|
BUCKET="flatcar-kola-ami-import-${AWS_REGION}"
|
||||||
|
trap 'bin/ore -d aws delete --region="${AWS_REGION}" --name="${NAME}" --ami-name="${NAME}" --file="tmp/flatcar_production_ami_vmdk${OEM_SUFFIX}_image.vmdk" --bucket "s3://${BUCKET}/${BOARD}/"; rm -r tmp/' EXIT
|
||||||
|
bin/ore aws initialize --region="${AWS_REGION}" --bucket "${BUCKET}"
|
||||||
|
AWS_AMI_ID=$(bin/ore aws upload --force --region="${AWS_REGION}" --name=${NAME} --ami-name="${NAME}" --ami-description="Flatcar Test ${NAME}" --file="tmp/flatcar_production_ami_vmdk${OEM_SUFFIX}_image.vmdk" --bucket "s3://${BUCKET}/${BOARD}/" | jq -r .HVM)
|
||||||
|
echo "Created new AMI ${AWS_AMI_ID} (will be removed after testing)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Do not expand the kola test patterns globs
|
||||||
|
set -o noglob
|
||||||
|
timeout --signal=SIGQUIT 6h bin/kola run \
|
||||||
|
--parallel=8 \
|
||||||
|
--basename="${NAME}" \
|
||||||
|
--board="${BOARD}" \
|
||||||
|
--aws-ami="${AWS_AMI_ID}" \
|
||||||
|
--aws-region="${AWS_REGION}" \
|
||||||
|
--aws-type="${instance_type}" \
|
||||||
|
--aws-iam-profile="${AWS_IAM_PROFILE}" \
|
||||||
|
--platform=aws \
|
||||||
|
--channel="${GROUP}" \
|
||||||
|
--offering="${OFFER}" \
|
||||||
|
--tapfile="${JOB_NAME##*/}.tap" \
|
||||||
|
--torcx-manifest=torcx_manifest.json \
|
||||||
|
${KOLA_TESTS}
|
||||||
|
set +o noglob
|
47
jenkins/kola/azure.sh
Executable file
47
jenkins/kola/azure.sh
Executable file
@ -0,0 +1,47 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
rm -rf *.tap _kola_temp*
|
||||||
|
|
||||||
|
NAME="jenkins-${JOB_NAME##*/}-${BUILD_NUMBER}"
|
||||||
|
|
||||||
|
if [[ "${BOARD}" == "arm64-usr" ]]; then
|
||||||
|
echo "Unsupported board"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "${KOLA_TESTS}" == "" ]]; then
|
||||||
|
KOLA_TESTS="*"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "${AZURE_MACHINE_SIZE}" != "" ]]; then
|
||||||
|
AZURE_MACHINE_SIZE_OPT="--azure-size=${AZURE_MACHINE_SIZE}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If the OFFER is empty, it should be treated as the basic offering.
|
||||||
|
if [[ "${OFFER}" == "" ]]; then
|
||||||
|
OFFER="basic"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${BLOB_URL}" = "" ]; then
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Do not expand the kola test patterns globs
|
||||||
|
set -o noglob
|
||||||
|
timeout --signal=SIGQUIT 20h bin/kola run \
|
||||||
|
--parallel="${PARALLEL}" \
|
||||||
|
--basename="${NAME}" \
|
||||||
|
--board="${BOARD}" \
|
||||||
|
--channel="${GROUP}" \
|
||||||
|
--platform=azure \
|
||||||
|
--offering="${OFFER}" \
|
||||||
|
--azure-blob-url="${BLOB_URL}" \
|
||||||
|
--azure-location="${LOCATION}" \
|
||||||
|
--azure-profile="${AZURE_CREDENTIALS}" \
|
||||||
|
--azure-auth="${AZURE_AUTH_CREDENTIALS}" \
|
||||||
|
--tapfile="${JOB_NAME##*/}.tap" \
|
||||||
|
--torcx-manifest=torcx_manifest.json \
|
||||||
|
${AZURE_MACHINE_SIZE_OPT} \
|
||||||
|
${KOLA_TESTS}
|
||||||
|
set +o noglob
|
46
jenkins/kola/dev-container.sh
Executable file
46
jenkins/kola/dev-container.sh
Executable file
@ -0,0 +1,46 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
sudo rm -f flatcar_developer_container.bin*
|
||||||
|
trap 'sudo rm -f flatcar_developer_container.bin*' EXIT
|
||||||
|
|
||||||
|
[ -s verify.asc ] && verify_key=--verify-key=verify.asc || verify_key=
|
||||||
|
|
||||||
|
bin/gangue get \
|
||||||
|
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||||
|
--verify=true $verify_key \
|
||||||
|
"${DOWNLOAD_ROOT}/boards/${BOARD}/${VERSION}/flatcar_production_image_kernel_config.txt"
|
||||||
|
|
||||||
|
bin/gangue get \
|
||||||
|
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||||
|
--verify=true $verify_key \
|
||||||
|
"${DOWNLOAD_ROOT}/boards/${BOARD}/${VERSION}/flatcar_developer_container.bin.bz2"
|
||||||
|
bunzip2 flatcar_developer_container.bin.bz2
|
||||||
|
|
||||||
|
if [[ "$(systemd-nspawn --version | grep 'systemd 241')" = "" ]]
|
||||||
|
then
|
||||||
|
PIPEARG="--pipe"
|
||||||
|
else
|
||||||
|
# TODO: Remove this case once Flatcar >=2592 is used on all nodes
|
||||||
|
PIPEARG=""
|
||||||
|
fi
|
||||||
|
|
||||||
|
sudo systemd-nspawn $PIPEARG \
|
||||||
|
--bind-ro=/lib/modules \
|
||||||
|
--bind-ro="$PWD/flatcar_production_image_kernel_config.txt:/boot/config" \
|
||||||
|
--image=flatcar_developer_container.bin \
|
||||||
|
--machine=flatcar-developer-container-$(uuidgen) \
|
||||||
|
--tmpfs=/usr/src \
|
||||||
|
--tmpfs=/var/tmp \
|
||||||
|
/bin/bash -eux << 'EOF'
|
||||||
|
emerge-gitclone
|
||||||
|
. /usr/share/coreos/release
|
||||||
|
if [[ $FLATCAR_RELEASE_VERSION =~ master ]]
|
||||||
|
then
|
||||||
|
git -C /var/lib/portage/portage-stable checkout master
|
||||||
|
git -C /var/lib/portage/coreos-overlay checkout master
|
||||||
|
fi
|
||||||
|
emerge -gv coreos-sources
|
||||||
|
ln -fns /boot/config /usr/src/linux/.config
|
||||||
|
exec make -C /usr/src/linux -j"$(nproc)" modules_prepare V=1
|
||||||
|
EOF
|
47
jenkins/kola/do.sh
Executable file
47
jenkins/kola/do.sh
Executable file
@ -0,0 +1,47 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
# JOB_NAME will not fit within the character limit
|
||||||
|
NAME="jenkins-${BUILD_NUMBER}"
|
||||||
|
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
if [[ "${DOWNLOAD_ROOT}" == gs://flatcar-jenkins-private/* ]]; then
|
||||||
|
echo "Fetching google/cloud-sdk"
|
||||||
|
docker pull google/cloud-sdk > /dev/null
|
||||||
|
BUCKET_PATH="${DOWNLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}/flatcar_production_digitalocean_image.bin.bz2"
|
||||||
|
IMAGE_URL="$(docker run --rm --net=host -v "${GOOGLE_APPLICATION_CREDENTIALS}:${GOOGLE_APPLICATION_CREDENTIALS}" google/cloud-sdk sh -c "python3 -m pip install pyopenssl > /dev/null; gsutil signurl -d 7d -r us ${GOOGLE_APPLICATION_CREDENTIALS} ${BUCKET_PATH} | grep -o 'https.*'")"
|
||||||
|
else
|
||||||
|
BASE_URL="https://storage.googleapis.com/$(echo $DOWNLOAD_ROOT | sed 's|gs://||g')/boards/${BOARD}/${FLATCAR_VERSION}"
|
||||||
|
IMAGE_URL="${BASE_URL}/flatcar_production_digitalocean_image.bin.bz2"
|
||||||
|
fi
|
||||||
|
|
||||||
|
bin/ore do create-image \
|
||||||
|
--config-file="${DIGITALOCEAN_CREDS}" \
|
||||||
|
--region="${DO_REGION}" \
|
||||||
|
--name="${NAME}" \
|
||||||
|
--url="${IMAGE_URL}"
|
||||||
|
|
||||||
|
trap 'bin/ore do delete-image \
|
||||||
|
--name="${NAME}" \
|
||||||
|
--config-file="${DIGITALOCEAN_CREDS}"' EXIT
|
||||||
|
|
||||||
|
if [[ "${KOLA_TESTS}" == "" ]]; then
|
||||||
|
KOLA_TESTS="*"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Do not expand the kola test patterns globs
|
||||||
|
set -o noglob
|
||||||
|
timeout --signal=SIGQUIT 4h bin/kola run \
|
||||||
|
--do-size=${DO_MACHINE_SIZE} \
|
||||||
|
--do-region=${DO_REGION} \
|
||||||
|
--basename="${NAME}" \
|
||||||
|
--do-config-file="${DIGITALOCEAN_CREDS}" \
|
||||||
|
--do-image="${NAME}" \
|
||||||
|
--parallel=8 \
|
||||||
|
--platform=do \
|
||||||
|
--channel="${GROUP}" \
|
||||||
|
--tapfile="${JOB_NAME##*/}.tap" \
|
||||||
|
--torcx-manifest=torcx_manifest.json \
|
||||||
|
${KOLA_TESTS}
|
||||||
|
set +o noglob
|
52
jenkins/kola/gce.sh
Executable file
52
jenkins/kola/gce.sh
Executable file
@ -0,0 +1,52 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
rm -rf *.tap _kola_temp*
|
||||||
|
|
||||||
|
# If the OFFER is empty, it should be treated as the basic offering.
|
||||||
|
if [[ "${OFFER}" == "" ]]; then
|
||||||
|
OFFER="basic"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Append the offer as oem suffix.
|
||||||
|
if [[ "${OFFER}" != "basic" ]]; then
|
||||||
|
OEM_SUFFIX="_${OFFER}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create a name that includes the OEM_SUFFIX,
|
||||||
|
# but replace _ with -, as gcloud doesn't like it otherwise.
|
||||||
|
OEMNAME="jenkins-${JOB_NAME##*/}${OEM_SUFFIX}-${BUILD_NUMBER}"
|
||||||
|
NAME=${OEMNAME//_/-}
|
||||||
|
|
||||||
|
bin/ore gcloud create-image \
|
||||||
|
--board="${BOARD}" \
|
||||||
|
--family="${NAME}" \
|
||||||
|
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||||
|
--source-root="${DOWNLOAD_ROOT}/boards" \
|
||||||
|
--source-name=flatcar_production_gce${OEM_SUFFIX}.tar.gz \
|
||||||
|
--version="${FLATCAR_VERSION}"
|
||||||
|
|
||||||
|
GCE_NAME="${NAME//[+.]/-}-${FLATCAR_VERSION//[+.]/-}"
|
||||||
|
|
||||||
|
trap 'bin/ore gcloud delete-images \
|
||||||
|
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||||
|
"${GCE_NAME}"' EXIT
|
||||||
|
|
||||||
|
if [[ "${KOLA_TESTS}" == "" ]]; then
|
||||||
|
KOLA_TESTS="*"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Do not expand the kola test patterns globs
|
||||||
|
set -o noglob
|
||||||
|
timeout --signal=SIGQUIT 6h bin/kola run \
|
||||||
|
--basename="${NAME}" \
|
||||||
|
--gce-image="${GCE_NAME}" \
|
||||||
|
--gce-json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||||
|
--gce-machinetype="${GCE_MACHINE_TYPE}" \
|
||||||
|
--parallel=4 \
|
||||||
|
--platform=gce \
|
||||||
|
--channel="${GROUP}" \
|
||||||
|
--tapfile="${JOB_NAME##*/}.tap" \
|
||||||
|
--torcx-manifest=torcx_manifest.json \
|
||||||
|
${KOLA_TESTS}
|
||||||
|
set +o noglob
|
54
jenkins/kola/packet.sh
Executable file
54
jenkins/kola/packet.sh
Executable file
@ -0,0 +1,54 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
# JOB_NAME will not fit within the character limit
|
||||||
|
NAME="jenkins-${BUILD_NUMBER}"
|
||||||
|
|
||||||
|
timeout=8h
|
||||||
|
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
# Construct the URLs of the image to be used during tests.
|
||||||
|
# KERNEL/CPIO_URL will be used by iPXE and so it will use http instead of https to
|
||||||
|
# make the boot process faster (except for signed URLs).
|
||||||
|
# IMAGE_URL is downloaded through Flatcar and can do SSL just fine, so that one
|
||||||
|
# can use https:// without a significant delay
|
||||||
|
if [[ "${DOWNLOAD_ROOT}" == gs://flatcar-jenkins-private/* ]]; then
|
||||||
|
echo "Fetching google/cloud-sdk"
|
||||||
|
docker pull google/cloud-sdk > /dev/null
|
||||||
|
BUCKET_PATH="${DOWNLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}"
|
||||||
|
IMAGE_URL="$(docker run --rm --net=host -v "${GOOGLE_APPLICATION_CREDENTIALS}:${GOOGLE_APPLICATION_CREDENTIALS}" google/cloud-sdk sh -c "python3 -m pip install pyopenssl > /dev/null; gsutil signurl -d 7d -r us ${GOOGLE_APPLICATION_CREDENTIALS} ${BUCKET_PATH}/flatcar_production_packet_image.bin.bz2 | grep -o 'https.*'")"
|
||||||
|
KERNEL_URL="$(docker run --rm --net=host -v "${GOOGLE_APPLICATION_CREDENTIALS}:${GOOGLE_APPLICATION_CREDENTIALS}" google/cloud-sdk sh -c "python3 -m pip install pyopenssl > /dev/null; gsutil signurl -d 7d -r us ${GOOGLE_APPLICATION_CREDENTIALS} ${BUCKET_PATH}/flatcar_production_pxe.vmlinuz | grep -o 'https.*'")"
|
||||||
|
CPIO_URL="$(docker run --rm --net=host -v "${GOOGLE_APPLICATION_CREDENTIALS}:${GOOGLE_APPLICATION_CREDENTIALS}" google/cloud-sdk sh -c "python3 -m pip install pyopenssl > /dev/null; gsutil signurl -d 7d -r us ${GOOGLE_APPLICATION_CREDENTIALS} ${BUCKET_PATH}/flatcar_production_pxe_image.cpio.gz | grep -o 'https.*'")"
|
||||||
|
else
|
||||||
|
BASE_PATH="storage.googleapis.com/$(echo $DOWNLOAD_ROOT | sed 's|gs://||g')/boards/${BOARD}/${FLATCAR_VERSION}"
|
||||||
|
IMAGE_URL="https://${BASE_PATH}/flatcar_production_packet_image.bin.bz2"
|
||||||
|
KERNEL_URL="http://${BASE_PATH}/flatcar_production_pxe.vmlinuz"
|
||||||
|
CPIO_URL="http://${BASE_PATH}/flatcar_production_pxe_image.cpio.gz"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "${KOLA_TESTS}" == "" ]]; then
|
||||||
|
KOLA_TESTS="*"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Do not expand the kola test patterns globs
|
||||||
|
set -o noglob
|
||||||
|
timeout --signal=SIGQUIT "${timeout}" bin/kola run \
|
||||||
|
--basename="${NAME}" \
|
||||||
|
--board="${BOARD}" \
|
||||||
|
--channel="${GROUP}" \
|
||||||
|
--gce-json-key="${UPLOAD_CREDS}" \
|
||||||
|
--packet-api-key="${PACKET_API_KEY}" \
|
||||||
|
--packet-facility="${PACKET_REGION}" \
|
||||||
|
--packet-image-url="${IMAGE_URL}" \
|
||||||
|
--packet-installer-image-kernel-url="${KERNEL_URL}" \
|
||||||
|
--packet-installer-image-cpio-url="${CPIO_URL}" \
|
||||||
|
--packet-project="${PACKET_PROJECT}" \
|
||||||
|
--packet-storage-url="${UPLOAD_ROOT}/mantle/packet" \
|
||||||
|
--packet-plan="${PACKET_MACHINE_TYPE}" \
|
||||||
|
--parallel="${PARALLEL_TESTS}" \
|
||||||
|
--platform=packet \
|
||||||
|
--tapfile="${JOB_NAME##*/}.tap" \
|
||||||
|
--torcx-manifest=torcx_manifest.json \
|
||||||
|
${KOLA_TESTS}
|
||||||
|
set +o noglob
|
67
jenkins/kola/qemu.sh
Executable file
67
jenkins/kola/qemu.sh
Executable file
@ -0,0 +1,67 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
sudo rm -rf *.tap src/scripts/_kola_temp tmp _kola_temp*
|
||||||
|
|
||||||
|
enter() {
|
||||||
|
bin/cork enter --bind-gpg-agent=false -- "$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Set up GPG for verifying tags.
|
||||||
|
export GNUPGHOME="${PWD}/.gnupg"
|
||||||
|
rm -rf "${GNUPGHOME}"
|
||||||
|
trap 'rm -rf "${GNUPGHOME}"' EXIT
|
||||||
|
mkdir --mode=0700 "${GNUPGHOME}"
|
||||||
|
gpg --import verify.asc
|
||||||
|
# Sometimes this directory is not created automatically making further private
|
||||||
|
# key imports fail, let's create it here as a workaround
|
||||||
|
mkdir -p --mode=0700 "${GNUPGHOME}/private-keys-v1.d/"
|
||||||
|
|
||||||
|
DOWNLOAD_ROOT_SDK="https://storage.googleapis.com${SDK_URL_PATH}"
|
||||||
|
|
||||||
|
bin/cork update \
|
||||||
|
--create --downgrade-replace --verify --verify-signature --verbose \
|
||||||
|
--sdk-url-path "${SDK_URL_PATH}" \
|
||||||
|
--force-sync \
|
||||||
|
--manifest-branch "refs/tags/${MANIFEST_TAG}" \
|
||||||
|
--manifest-name "${MANIFEST_NAME}" \
|
||||||
|
--manifest-url "${MANIFEST_URL}" -- --dev_builds_sdk="${DOWNLOAD_ROOT_SDK}"
|
||||||
|
source .repo/manifests/version.txt
|
||||||
|
|
||||||
|
[ -s verify.asc ] && verify_key=--verify-key=verify.asc || verify_key=
|
||||||
|
|
||||||
|
mkdir -p tmp
|
||||||
|
bin/cork download-image \
|
||||||
|
--cache-dir=tmp \
|
||||||
|
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||||
|
--platform=qemu \
|
||||||
|
--root="${DOWNLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}" \
|
||||||
|
--verify=true $verify_key
|
||||||
|
enter lbunzip2 -k -f /mnt/host/source/tmp/flatcar_production_image.bin.bz2
|
||||||
|
|
||||||
|
# create folder to handle case where arm64 is missing
|
||||||
|
sudo mkdir -p chroot/usr/lib/kola/arm64
|
||||||
|
# copy all of the latest mantle binaries into the chroot
|
||||||
|
sudo cp -t chroot/usr/lib/kola/arm64 bin/arm64/*
|
||||||
|
sudo cp -t chroot/usr/lib/kola/amd64 bin/amd64/*
|
||||||
|
sudo cp -t chroot/usr/bin bin/[b-z]*
|
||||||
|
|
||||||
|
if [[ "${KOLA_TESTS}" == "" ]]; then
|
||||||
|
KOLA_TESTS="*"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Do not expand the kola test patterns globs
|
||||||
|
set -o noglob
|
||||||
|
enter sudo timeout --signal=SIGQUIT 12h kola run \
|
||||||
|
--board="${BOARD}" \
|
||||||
|
--channel="${GROUP}" \
|
||||||
|
--parallel="${PARALLEL}" \
|
||||||
|
--platform=qemu \
|
||||||
|
--qemu-bios=bios-256k.bin \
|
||||||
|
--qemu-image=/mnt/host/source/tmp/flatcar_production_image.bin \
|
||||||
|
--tapfile="/mnt/host/source/${JOB_NAME##*/}.tap" \
|
||||||
|
--torcx-manifest=/mnt/host/source/torcx_manifest.json \
|
||||||
|
${KOLA_TESTS}
|
||||||
|
set +o noglob
|
||||||
|
|
||||||
|
sudo rm -rf tmp
|
67
jenkins/kola/qemu_uefi.sh
Executable file
67
jenkins/kola/qemu_uefi.sh
Executable file
@ -0,0 +1,67 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
sudo rm -rf *.tap src/scripts/_kola_temp tmp _kola_temp*
|
||||||
|
|
||||||
|
enter() {
|
||||||
|
bin/cork enter --bind-gpg-agent=false -- "$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Set up GPG for verifying tags.
|
||||||
|
export GNUPGHOME="${PWD}/.gnupg"
|
||||||
|
rm -rf "${GNUPGHOME}"
|
||||||
|
trap 'rm -rf "${GNUPGHOME}"' EXIT
|
||||||
|
mkdir --mode=0700 "${GNUPGHOME}"
|
||||||
|
gpg --import verify.asc
|
||||||
|
# Sometimes this directory is not created automatically making further private
|
||||||
|
# key imports fail, let's create it here as a workaround
|
||||||
|
mkdir -p --mode=0700 "${GNUPGHOME}/private-keys-v1.d/"
|
||||||
|
|
||||||
|
DOWNLOAD_ROOT_SDK="https://storage.googleapis.com${SDK_URL_PATH}"
|
||||||
|
|
||||||
|
bin/cork update \
|
||||||
|
--create --downgrade-replace --verify --verify-signature --verbose \
|
||||||
|
--sdk-url-path "${SDK_URL_PATH}" \
|
||||||
|
--force-sync \
|
||||||
|
--manifest-branch "refs/tags/${MANIFEST_TAG}" \
|
||||||
|
--manifest-name "${MANIFEST_NAME}" \
|
||||||
|
--manifest-url "${MANIFEST_URL}" -- --dev_builds_sdk="${DOWNLOAD_ROOT_SDK}"
|
||||||
|
source .repo/manifests/version.txt
|
||||||
|
|
||||||
|
[ -s verify.asc ] && verify_key=--verify-key=verify.asc || verify_key=
|
||||||
|
|
||||||
|
mkdir -p tmp
|
||||||
|
bin/cork download-image \
|
||||||
|
--cache-dir=tmp \
|
||||||
|
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||||
|
--platform=qemu_uefi \
|
||||||
|
--root="${DOWNLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}" \
|
||||||
|
--verify=true $verify_key
|
||||||
|
enter lbunzip2 -k -f /mnt/host/source/tmp/flatcar_production_image.bin.bz2
|
||||||
|
|
||||||
|
# create folder to handle case where arm64 is missing
|
||||||
|
sudo mkdir -p chroot/usr/lib/kola/arm64
|
||||||
|
# copy all of the latest mantle binaries into the chroot
|
||||||
|
sudo cp -t chroot/usr/lib/kola/arm64 bin/arm64/*
|
||||||
|
sudo cp -t chroot/usr/lib/kola/amd64 bin/amd64/*
|
||||||
|
sudo cp -t chroot/usr/bin bin/[b-z]*
|
||||||
|
|
||||||
|
if [[ "${KOLA_TESTS}" == "" ]]; then
|
||||||
|
KOLA_TESTS="*"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Do not expand the kola test patterns globs
|
||||||
|
set -o noglob
|
||||||
|
enter sudo timeout --signal=SIGQUIT 14h kola run \
|
||||||
|
--board="${BOARD}" \
|
||||||
|
--channel="${GROUP}" \
|
||||||
|
--parallel="${PARALLEL}" \
|
||||||
|
--platform=qemu \
|
||||||
|
--qemu-bios=/mnt/host/source/tmp/flatcar_production_qemu_uefi_efi_code.fd \
|
||||||
|
--qemu-image=/mnt/host/source/tmp/flatcar_production_image.bin \
|
||||||
|
--tapfile="/mnt/host/source/${JOB_NAME##*/}.tap" \
|
||||||
|
--torcx-manifest=/mnt/host/source/torcx_manifest.json \
|
||||||
|
${KOLA_TESTS}
|
||||||
|
set +o noglob
|
||||||
|
|
||||||
|
sudo rm -rf tmp
|
40
jenkins/kola/vmware-esx.sh
Executable file
40
jenkins/kola/vmware-esx.sh
Executable file
@ -0,0 +1,40 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
# JOB_NAME will not fit within the character limit
|
||||||
|
NAME="jenkins-${BUILD_NUMBER}"
|
||||||
|
|
||||||
|
[ -s verify.asc ] && verify_key=--verify-key=verify.asc || verify_key=
|
||||||
|
|
||||||
|
mkdir -p tmp
|
||||||
|
bin/cork download-image \
|
||||||
|
--cache-dir=tmp \
|
||||||
|
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||||
|
--platform=esx \
|
||||||
|
--root="${DOWNLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}" \
|
||||||
|
--verify=true $verify_key
|
||||||
|
|
||||||
|
trap 'bin/ore esx --esx-config-file "${VMWARE_ESX_CREDS}" remove-vms \
|
||||||
|
--pattern "${NAME}*" || true' EXIT
|
||||||
|
|
||||||
|
if [[ "${KOLA_TESTS}" == "" ]]; then
|
||||||
|
KOLA_TESTS="*"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Delete every VM that is running because we'll use all available spots
|
||||||
|
bin/ore esx --esx-config-file "${VMWARE_ESX_CREDS}" remove-vms || true
|
||||||
|
|
||||||
|
# Do not expand the kola test patterns globs
|
||||||
|
set -o noglob
|
||||||
|
timeout --signal=SIGQUIT 2h bin/kola run \
|
||||||
|
--basename="${NAME}" \
|
||||||
|
--esx-config-file "${VMWARE_ESX_CREDS}" \
|
||||||
|
--esx-ova-path tmp/flatcar_production_vmware_ova.ova \
|
||||||
|
--parallel=4 \
|
||||||
|
--platform=esx \
|
||||||
|
--channel="${GROUP}" \
|
||||||
|
--tapfile="${JOB_NAME##*/}.tap" \
|
||||||
|
--torcx-manifest=torcx_manifest.json \
|
||||||
|
${KOLA_TESTS}
|
||||||
|
set +o noglob
|
||||||
|
sudo rm -rf tmp
|
135
jenkins/manifest.sh
Executable file
135
jenkins/manifest.sh
Executable file
@ -0,0 +1,135 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -ex
|
||||||
|
git -C manifest config user.name "${GIT_AUTHOR_NAME}"
|
||||||
|
git -C manifest config user.email "${GIT_AUTHOR_EMAIL}"
|
||||||
|
|
||||||
|
COREOS_OFFICIAL=0
|
||||||
|
|
||||||
|
finish() {
|
||||||
|
local tag="$1"
|
||||||
|
git -C manifest tag -v "${tag}"
|
||||||
|
git -C manifest push "${BUILDS_PUSH_URL}" "refs/tags/${tag}:refs/tags/${tag}"
|
||||||
|
tee manifest.properties << EOF
|
||||||
|
MANIFEST_URL = ${BUILDS_CLONE_URL}
|
||||||
|
MANIFEST_REF = refs/tags/${tag}
|
||||||
|
MANIFEST_NAME = release.xml
|
||||||
|
COREOS_OFFICIAL = ${COREOS_OFFICIAL:-0}
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
# Set up GPG for verifying tags.
|
||||||
|
export GNUPGHOME="${PWD}/.gnupg"
|
||||||
|
rm -rf "${GNUPGHOME}"
|
||||||
|
trap 'rm -rf "${GNUPGHOME}"' EXIT
|
||||||
|
mkdir --mode=0700 "${GNUPGHOME}"
|
||||||
|
gpg --import verify.asc
|
||||||
|
# Sometimes this directory is not created automatically making further private
|
||||||
|
# key imports fail, let's create it here as a workaround
|
||||||
|
mkdir -p --mode=0700 "${GNUPGHOME}/private-keys-v1.d/"
|
||||||
|
|
||||||
|
# Branches are of the form remote-name/branch-name. Tags are just tag-name.
|
||||||
|
# If we have a release tag use it, for branches we need to make a tag.
|
||||||
|
if [[ "${GIT_BRANCH}" != */* ]]
|
||||||
|
then
|
||||||
|
COREOS_OFFICIAL=1
|
||||||
|
finish "${GIT_BRANCH}"
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
MANIFEST_BRANCH="${GIT_BRANCH##*/}"
|
||||||
|
MANIFEST_ID="${MANIFEST_BRANCH}"
|
||||||
|
# Nightly builds use the "default" manifest from flatcar-master and have the same scripts/overlay/portage branches without a "user/" prefix.
|
||||||
|
# No further exclusions are made because nothing bad happens if other branches were used.
|
||||||
|
if [[ "${MANIFEST_NAME}" = default ]] && [[ "${MANIFEST_BRANCH}" = flatcar-master ]] && \
|
||||||
|
[[ "${SCRIPTS_REF}" = "${OVERLAY_REF}" ]] && [[ "${OVERLAY_REF}" = "${PORTAGE_REF}" ]] && \
|
||||||
|
[[ "${SCRIPTS_REF}" != */* ]] && [[ "${SCRIPTS_REF}" != "" ]]
|
||||||
|
then
|
||||||
|
# Use SCRIPTS_REF but others also work since they have the same value
|
||||||
|
MANIFEST_ID="${SCRIPTS_REF}-nightly"
|
||||||
|
fi
|
||||||
|
|
||||||
|
MANIFEST_NAME="${MANIFEST_NAME}.xml"
|
||||||
|
[[ -f "manifest/${MANIFEST_NAME}" ]]
|
||||||
|
|
||||||
|
source manifest/version.txt
|
||||||
|
|
||||||
|
if [[ "${SDK_VERSION}" == sdk-*-nightly ]]
|
||||||
|
then
|
||||||
|
SDK_VERSION=$(curl -s -S -f -L "https://storage.googleapis.com/flatcar-jenkins/developer/sdk/amd64/${SDK_VERSION}.txt" | tee /dev/stderr)
|
||||||
|
if [[ -z "${SDK_VERSION}" ]]
|
||||||
|
then
|
||||||
|
echo "No SDK found, retrigger the manifest job with default SDK_VERSION and SDK_URL_PATH values."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
export FLATCAR_BUILD_ID="${BUILD_ID_PREFIX}${MANIFEST_ID}-${BUILD_NUMBER}"
|
||||||
|
# Nightlies and dev builds have the current date as Flatcar version
|
||||||
|
if [[ "${MANIFEST_BRANCH}" = flatcar-master ]]
|
||||||
|
then
|
||||||
|
FLATCAR_VERSION_ID="$(date '+%Y.%m.%d')"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "${SDK_VERSION}" = sdk-new ]]
|
||||||
|
then
|
||||||
|
# Use the version of the current developer build for DOWNSTREAM=all(-full), requires a seed SDK to be set
|
||||||
|
# (releases use git tags where all this code here is not executed because the manifest
|
||||||
|
# and version.txt should not be modified, the Alpha release version.txt has to refer to
|
||||||
|
# the release to be build for its SDK version)
|
||||||
|
SDK_VERSION="${FLATCAR_VERSION_ID}+${FLATCAR_BUILD_ID}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -n "${SDK_VERSION}" ]]
|
||||||
|
then
|
||||||
|
export FLATCAR_SDK_VERSION="${SDK_VERSION}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Ensure that each XML tag occupies exactly one line each by first removing all line breaks and then adding
|
||||||
|
# a line break after each tag.
|
||||||
|
# This way set_manifest_ref can find the right tag by matching for "/$reponame".
|
||||||
|
cat manifest/"${MANIFEST_NAME}" | tr '\n' ' ' | sed 's#/>#/>\n#g' > "manifest/${FLATCAR_BUILD_ID}.xml"
|
||||||
|
|
||||||
|
set_manifest_ref() {
|
||||||
|
local reponame="$1"
|
||||||
|
local reference="$2"
|
||||||
|
# Select lines with "/$reponame" (kept as first group) and "revision" (kept as second group) and replace the value
|
||||||
|
# of "revision" (third group, not kept) with the new reference.
|
||||||
|
sed -i -E "s#(/$reponame.*)(revision=\")([^\"]*)#\1\2refs/heads/$reference#g" "manifest/${FLATCAR_BUILD_ID}.xml"
|
||||||
|
}
|
||||||
|
|
||||||
|
if [[ -n "${SCRIPTS_REF}" ]]
|
||||||
|
then
|
||||||
|
set_manifest_ref scripts "${SCRIPTS_REF}"
|
||||||
|
fi
|
||||||
|
if [[ -n "${OVERLAY_REF}" ]]
|
||||||
|
then
|
||||||
|
set_manifest_ref coreos-overlay "${OVERLAY_REF}"
|
||||||
|
fi
|
||||||
|
if [[ -n "${PORTAGE_REF}" ]]
|
||||||
|
then
|
||||||
|
set_manifest_ref portage-stable "${PORTAGE_REF}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
ln -fns "${FLATCAR_BUILD_ID}.xml" manifest/default.xml
|
||||||
|
ln -fns "${FLATCAR_BUILD_ID}.xml" manifest/release.xml
|
||||||
|
|
||||||
|
tee manifest/version.txt << EOF
|
||||||
|
FLATCAR_VERSION=${FLATCAR_VERSION_ID}+${FLATCAR_BUILD_ID}
|
||||||
|
FLATCAR_VERSION_ID=${FLATCAR_VERSION_ID}
|
||||||
|
FLATCAR_BUILD_ID=${FLATCAR_BUILD_ID}
|
||||||
|
FLATCAR_SDK_VERSION=${FLATCAR_SDK_VERSION}
|
||||||
|
EOF
|
||||||
|
# Note: You have to keep FLATCAR_VERSION in sync with the value used in the "sdk-new" case.
|
||||||
|
|
||||||
|
# Set up GPG for signing tags.
|
||||||
|
gpg --import "${GPG_SECRET_KEY_FILE}"
|
||||||
|
|
||||||
|
# Tag a development build manifest.
|
||||||
|
git -C manifest add "${FLATCAR_BUILD_ID}.xml" default.xml release.xml version.txt
|
||||||
|
git -C manifest commit \
|
||||||
|
-m "${FLATCAR_BUILD_ID}: add build manifest" \
|
||||||
|
-m "Based on ${GIT_URL} branch ${MANIFEST_BRANCH}" \
|
||||||
|
-m "${BUILD_URL}"
|
||||||
|
git -C manifest tag -u "${SIGNING_USER}" -m "${FLATCAR_BUILD_ID}" "${FLATCAR_BUILD_ID}"
|
||||||
|
|
||||||
|
finish "${FLATCAR_BUILD_ID}"
|
60
jenkins/packages.sh
Normal file → Executable file
60
jenkins/packages.sh
Normal file → Executable file
@ -1,4 +1,49 @@
|
|||||||
#!/bin/bash -ex
|
#!/bin/bash
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
# The build may not be started without a tag value.
|
||||||
|
[ -n "${MANIFEST_TAG}" ]
|
||||||
|
|
||||||
|
# For developer builds that are based on a non-developer release,
|
||||||
|
# we need the DOWNLOAD_ROOT variable to be the base path, keeping the
|
||||||
|
# UPLOAD_ROOT variable as the developer path.
|
||||||
|
if [[ "${RELEASE_BASE_IS_DEV}" = "false" && "${GROUP}" = "developer" && "${RELEASE_BASE}" != "" ]]; then
|
||||||
|
DOWNLOAD_ROOT=$(echo ${DOWNLOAD_ROOT} | sed 's,/developer,,');
|
||||||
|
fi
|
||||||
|
DOWNLOAD_ROOT_SDK="https://storage.googleapis.com${SDK_URL_PATH}"
|
||||||
|
|
||||||
|
# Set up GPG for verifying tags.
|
||||||
|
export GNUPGHOME="${PWD}/.gnupg"
|
||||||
|
rm -rf "${GNUPGHOME}"
|
||||||
|
trap 'rm -rf "${GNUPGHOME}"' EXIT
|
||||||
|
mkdir --mode=0700 "${GNUPGHOME}"
|
||||||
|
gpg --import verify.asc
|
||||||
|
# Sometimes this directory is not created automatically making further private
|
||||||
|
# key imports fail, let's create it here as a workaround
|
||||||
|
mkdir -p --mode=0700 "${GNUPGHOME}/private-keys-v1.d/"
|
||||||
|
|
||||||
|
SCRIPTS_PATCH_ARG=""
|
||||||
|
OVERLAY_PATCH_ARG=""
|
||||||
|
PORTAGE_PATCH_ARG=""
|
||||||
|
if [ "$(cat scripts.patch | wc -l)" != 0 ]; then
|
||||||
|
SCRIPTS_PATCH_ARG="--scripts-patch scripts.patch"
|
||||||
|
fi
|
||||||
|
if [ "$(cat overlay.patch | wc -l)" != 0 ]; then
|
||||||
|
OVERLAY_PATCH_ARG="--overlay-patch overlay.patch"
|
||||||
|
fi
|
||||||
|
if [ "$(cat portage.patch | wc -l)" != 0 ]; then
|
||||||
|
PORTAGE_PATCH_ARG="--portage-patch portage.patch"
|
||||||
|
fi
|
||||||
|
|
||||||
|
bin/cork update \
|
||||||
|
--create --downgrade-replace --verify --verify-signature --verbose \
|
||||||
|
--sdk-url-path "${SDK_URL_PATH}" \
|
||||||
|
--force-sync \
|
||||||
|
${SCRIPTS_PATCH_ARG} ${OVERLAY_PATCH_ARG} ${PORTAGE_PATCH_ARG} \
|
||||||
|
--manifest-branch "refs/tags/${MANIFEST_TAG}" \
|
||||||
|
--manifest-name "${MANIFEST_NAME}" \
|
||||||
|
--manifest-url "${MANIFEST_URL}" \
|
||||||
|
-- --toolchain_boards="${BOARD}" --dev_builds_sdk="${DOWNLOAD_ROOT_SDK}"
|
||||||
|
|
||||||
enter() {
|
enter() {
|
||||||
local verify_key=
|
local verify_key=
|
||||||
@ -52,3 +97,16 @@ script build_torcx_store \
|
|||||||
--torcx_upload_root="${TORCX_PKG_DOWNLOAD_ROOT}" \
|
--torcx_upload_root="${TORCX_PKG_DOWNLOAD_ROOT}" \
|
||||||
--tectonic_torcx_download_root="${TECTONIC_TORCX_DOWNLOAD_ROOT}" \
|
--tectonic_torcx_download_root="${TECTONIC_TORCX_DOWNLOAD_ROOT}" \
|
||||||
--upload
|
--upload
|
||||||
|
|
||||||
|
if [[ "${GROUP}" = "developer" ]]
|
||||||
|
then
|
||||||
|
GROUP="${CHANNEL_BASE}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Update entry for latest nightly build reference (there are no symlinks in GCS and it is also good to keep it deterministic)
|
||||||
|
if [[ "${FLATCAR_BUILD_ID}" == *-*-nightly-* ]]
|
||||||
|
then
|
||||||
|
# Extract the nightly name like "flatcar-MAJOR-nightly" from "dev-flatcar-MAJOR-nightly-NUMBER"
|
||||||
|
NAME=$(echo "${FLATCAR_BUILD_ID}" | grep -o "dev-.*-nightly" | cut -d - -f 2-)
|
||||||
|
echo "${FLATCAR_VERSION}" | bin/cork enter --bind-gpg-agent=false -- gsutil cp - "${UPLOAD_ROOT}/boards/${BOARD}/${NAME}.txt"
|
||||||
|
fi
|
||||||
|
23
jenkins/prerelease/aws.sh
Executable file
23
jenkins/prerelease/aws.sh
Executable file
@ -0,0 +1,23 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
rm -f ami.properties images.json
|
||||||
|
|
||||||
|
[ -s verify.asc ] && verify_key=--verify-key=verify.asc || verify_key=
|
||||||
|
|
||||||
|
bin/plume pre-release --force \
|
||||||
|
--debug \
|
||||||
|
--platform=aws \
|
||||||
|
--aws-credentials="${AWS_CREDENTIALS}" \
|
||||||
|
--gce-json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||||
|
--board="${BOARD}" \
|
||||||
|
--channel="${CHANNEL}" \
|
||||||
|
--version="${FLATCAR_VERSION}" \
|
||||||
|
--write-image-list=images.json \
|
||||||
|
$verify_key
|
||||||
|
|
||||||
|
hvm_ami_id=$(jq -r '.aws.amis[]|select(.name == "'"${AWS_REGION}"'").hvm' images.json)
|
||||||
|
|
||||||
|
tee ami.properties << EOF
|
||||||
|
HVM_AMI_ID = ${hvm_ami_id:?}
|
||||||
|
EOF
|
33
jenkins/prerelease/azure.sh
Executable file
33
jenkins/prerelease/azure.sh
Executable file
@ -0,0 +1,33 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
AZURE_CATEGORY_OPT=""
|
||||||
|
if [[ "${IS_NON_SPONSORED}" == true ]]
|
||||||
|
then
|
||||||
|
AZURE_CATEGORY_OPT="--azure-category=pro --private"
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm -f images.json
|
||||||
|
|
||||||
|
[ -s verify.asc ] && verify_key=--verify-key=verify.asc || verify_key=
|
||||||
|
|
||||||
|
bin/plume pre-release --force \
|
||||||
|
--debug \
|
||||||
|
--platform=azure \
|
||||||
|
--azure-profile="${AZURE_CREDENTIALS}" \
|
||||||
|
--azure-auth="${AZURE_AUTH_CREDENTIALS}" \
|
||||||
|
--gce-json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||||
|
--board="${BOARD}" \
|
||||||
|
--channel="${CHANNEL}" \
|
||||||
|
--version="${FLATCAR_VERSION}" \
|
||||||
|
--write-image-list=images.json \
|
||||||
|
${AZURE_CATEGORY_OPT} \
|
||||||
|
$verify_key
|
||||||
|
|
||||||
|
sas_url=$(jq -r '.azure.image' images.json)
|
||||||
|
if [ "${sas_url}" = "null" ]; then
|
||||||
|
sas_url=""
|
||||||
|
fi
|
||||||
|
tee test.properties << EOF
|
||||||
|
SAS_URL ^ ${sas_url:?}
|
||||||
|
EOF
|
24
jenkins/release.sh
Executable file
24
jenkins/release.sh
Executable file
@ -0,0 +1,24 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -ex
|
||||||
|
case "${CHANNEL}" in
|
||||||
|
stable|beta)
|
||||||
|
boards=( amd64-usr )
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
boards=( amd64-usr arm64-usr )
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
for board in "${boards[@]}"
|
||||||
|
do
|
||||||
|
bin/plume release \
|
||||||
|
--debug \
|
||||||
|
--aws-credentials="${AWS_CREDENTIALS}" \
|
||||||
|
--azure-profile="${AZURE_CREDENTIALS}" \
|
||||||
|
--azure-auth="${AZURE_AUTH_CREDENTIALS}" \
|
||||||
|
--gce-json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||||
|
--gce-release-key="${GOOGLE_RELEASE_CREDENTIALS}" \
|
||||||
|
--board="${board}" \
|
||||||
|
--channel="${CHANNEL}" \
|
||||||
|
--version="${VERSION}"
|
||||||
|
done
|
59
jenkins/sdk.sh
Normal file → Executable file
59
jenkins/sdk.sh
Normal file → Executable file
@ -1,4 +1,53 @@
|
|||||||
#!/bin/bash -ex
|
#!/bin/bash
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
# The build may not be started without a tag value.
|
||||||
|
[ -n "${MANIFEST_TAG}" ]
|
||||||
|
|
||||||
|
# Catalyst leaves things chowned as root.
|
||||||
|
[ -d .cache/sdks ] && sudo chown -R "$USER" .cache/sdks
|
||||||
|
|
||||||
|
# Set up GPG for verifying tags.
|
||||||
|
export GNUPGHOME="${PWD}/.gnupg"
|
||||||
|
rm -rf "${GNUPGHOME}"
|
||||||
|
trap 'rm -rf "${GNUPGHOME}"' EXIT
|
||||||
|
mkdir --mode=0700 "${GNUPGHOME}"
|
||||||
|
gpg --import verify.asc
|
||||||
|
# Sometimes this directory is not created automatically making further private
|
||||||
|
# key imports fail, let's create it here as a workaround
|
||||||
|
mkdir -p --mode=0700 "${GNUPGHOME}/private-keys-v1.d/"
|
||||||
|
|
||||||
|
if [[ "${SEED_SDK_VERSION}" == alpha ]]
|
||||||
|
then
|
||||||
|
SEED_SDK_VERSION=$(curl -s -S -f -L "https://alpha.release.flatcar-linux.net/amd64-usr/current/version.txt" | grep -m 1 FLATCAR_SDK_VERSION= | cut -d = -f 2- | tee /dev/stderr)
|
||||||
|
if [[ -z "${SEED_SDK_VERSION}" ]]
|
||||||
|
then
|
||||||
|
echo "Unexpected: Alpha release SDK version not found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
DOWNLOAD_ROOT_SDK=https://storage.googleapis.com/flatcar-jenkins/sdk
|
||||||
|
|
||||||
|
# We do not use a nightly SDK as seed for bootstrapping because the next major Alpha SDK release would also have to use the last published Alpha release SDK as seed.
|
||||||
|
# Also, we don't want compiler bugs to propagate from one nightly SDK to the next even though the commit in question was reverted.
|
||||||
|
# Having a clear bootstrap path is our last safety line before insanity for that kind of bugs, and is a requirement for reproducibility and security.
|
||||||
|
# Fore more info, read Ken Thompson's Turing Award Lecture "Reflections on Trusting Trust".
|
||||||
|
# In rare cases this will mean that a huge compiler update has to be split because first a released SDK with a newer compiler is needed to compile an even newer compiler
|
||||||
|
# (or linker, libc etc). For experiments one can download the nightly/developer SDK and start the bootstrap from it locally but exposing this functionality in Jenkins would
|
||||||
|
# cause more confusion than helping to understand what the requirements are to get SDK changes to a releasable state.
|
||||||
|
|
||||||
|
bin/cork update \
|
||||||
|
--create --downgrade-replace --verify --verify-signature --verbose \
|
||||||
|
--sdk-version "${SEED_SDK_VERSION}" \
|
||||||
|
--force-sync \
|
||||||
|
--manifest-branch "refs/tags/${MANIFEST_TAG}" \
|
||||||
|
--manifest-name "${MANIFEST_NAME}" \
|
||||||
|
--manifest-url "${MANIFEST_URL}" -- --dev_builds_sdk="${DOWNLOAD_ROOT_SDK}"
|
||||||
|
|
||||||
|
if [[ ${FULL_BUILD} == "false" ]]; then
|
||||||
|
export FORCE_STAGES="stage4"
|
||||||
|
fi
|
||||||
|
|
||||||
enter() {
|
enter() {
|
||||||
bin/cork enter --bind-gpg-agent=false -- "$@"
|
bin/cork enter --bind-gpg-agent=false -- "$@"
|
||||||
@ -23,3 +72,11 @@ enter sudo \
|
|||||||
--stage1_overlay_ref="${STAGE1_OVERLAY_REF}" \
|
--stage1_overlay_ref="${STAGE1_OVERLAY_REF}" \
|
||||||
--stage1_portage_ref="${STAGE1_PORTAGE_REF}" \
|
--stage1_portage_ref="${STAGE1_PORTAGE_REF}" \
|
||||||
--upload
|
--upload
|
||||||
|
|
||||||
|
# Update entry for latest nightly build reference (there are no symlinks in GCS and it is also good to keep it deterministic)
|
||||||
|
if [[ "${FLATCAR_BUILD_ID}" == *-*-nightly-* ]]
|
||||||
|
then
|
||||||
|
# Extract the nightly name like "flatcar-MAJOR-nightly" from "dev-flatcar-MAJOR-nightly-NUMBER"
|
||||||
|
NAME=$(echo "${FLATCAR_BUILD_ID}" | grep -o "dev-.*-nightly" | cut -d - -f 2-)
|
||||||
|
echo "${FLATCAR_VERSION}" | enter gsutil cp - "${UPLOAD_ROOT}/sdk/amd64/sdk-${NAME}.txt"
|
||||||
|
fi
|
||||||
|
32
jenkins/systemd-run-wrap.sh
Executable file
32
jenkins/systemd-run-wrap.sh
Executable file
@ -0,0 +1,32 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
# note: to make sure you forward the whole env, you can first run 'source <(export)' before starting this script
|
||||||
|
|
||||||
|
# Add /opt/bin explicitly because the lbzcat binary is there on the Jenkins workers
|
||||||
|
export PATH="$PATH:/opt/bin"
|
||||||
|
|
||||||
|
# Use a system session unit because the user session may not be set up correctly in a CI env
|
||||||
|
ARGS=("--system" "--collect" "--same-dir" "--pipe" "--wait" "--property=User=$USER" "--property=Group=$USER")
|
||||||
|
# Extra "sh -c" is needed to only export the exported variables
|
||||||
|
for VARNAME in $(sh -c 'compgen -v'); do
|
||||||
|
set +u
|
||||||
|
VAL="${!VARNAME}"
|
||||||
|
set -u
|
||||||
|
ARGS+=("--setenv" "${VARNAME}=${VAL}")
|
||||||
|
done
|
||||||
|
|
||||||
|
UNITNAME="run-$(date '+%s')"
|
||||||
|
|
||||||
|
# The --pipe option does not stop the unit when the systemd-run process is killed, we have to do this through a trap
|
||||||
|
# (and --pty as alternative doesn't behave well because it leads to processes expecting stdin when there is none)
|
||||||
|
function cancel() {
|
||||||
|
echo
|
||||||
|
echo "Terminating"
|
||||||
|
sudo systemctl stop "${UNITNAME}"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
trap cancel INT
|
||||||
|
|
||||||
|
ARGS+=("--unit=${UNITNAME}")
|
||||||
|
|
||||||
|
sudo systemd-run "${ARGS[@]}" "$@"
|
43
jenkins/toolchains.sh
Normal file → Executable file
43
jenkins/toolchains.sh
Normal file → Executable file
@ -1,4 +1,45 @@
|
|||||||
#!/bin/bash -ex
|
#!/bin/bash
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
# The build may not be started without a tag value.
|
||||||
|
[ -n "${MANIFEST_TAG}" ]
|
||||||
|
|
||||||
|
# Catalyst leaves things chowned as root.
|
||||||
|
[ -d .cache/sdks ] && sudo chown -R "$USER" .cache/sdks
|
||||||
|
|
||||||
|
# Set up GPG for verifying tags.
|
||||||
|
export GNUPGHOME="${PWD}/.gnupg"
|
||||||
|
rm -rf "${GNUPGHOME}"
|
||||||
|
trap 'rm -rf "${GNUPGHOME}"' EXIT
|
||||||
|
mkdir --mode=0700 "${GNUPGHOME}"
|
||||||
|
gpg --import verify.asc
|
||||||
|
# Sometimes this directory is not created automatically making further private
|
||||||
|
# key imports fail, let's create it here as a workaround
|
||||||
|
mkdir -p --mode=0700 "${GNUPGHOME}/private-keys-v1.d/"
|
||||||
|
|
||||||
|
DOWNLOAD_ROOT_SDK="https://storage.googleapis.com${SDK_URL_PATH}"
|
||||||
|
|
||||||
|
SCRIPTS_PATCH_ARG=""
|
||||||
|
OVERLAY_PATCH_ARG=""
|
||||||
|
PORTAGE_PATCH_ARG=""
|
||||||
|
if [ "$(cat scripts.patch | wc -l)" != 0 ]; then
|
||||||
|
SCRIPTS_PATCH_ARG="--scripts-patch scripts.patch"
|
||||||
|
fi
|
||||||
|
if [ "$(cat overlay.patch | wc -l)" != 0 ]; then
|
||||||
|
OVERLAY_PATCH_ARG="--overlay-patch overlay.patch"
|
||||||
|
fi
|
||||||
|
if [ "$(cat portage.patch | wc -l)" != 0 ]; then
|
||||||
|
PORTAGE_PATCH_ARG="--portage-patch portage.patch"
|
||||||
|
fi
|
||||||
|
|
||||||
|
bin/cork update \
|
||||||
|
--create --downgrade-replace --verify --verify-signature --verbose \
|
||||||
|
--sdk-url-path "${SDK_URL_PATH}" \
|
||||||
|
--force-sync \
|
||||||
|
${SCRIPTS_PATCH_ARG} ${OVERLAY_PATCH_ARG} ${PORTAGE_PATCH_ARG} \
|
||||||
|
--manifest-branch "refs/tags/${MANIFEST_TAG}" \
|
||||||
|
--manifest-name "${MANIFEST_NAME}" \
|
||||||
|
--manifest-url "${MANIFEST_URL}" -- --dev_builds_sdk="${DOWNLOAD_ROOT_SDK}"
|
||||||
|
|
||||||
enter() {
|
enter() {
|
||||||
bin/cork enter --bind-gpg-agent=false -- "$@"
|
bin/cork enter --bind-gpg-agent=false -- "$@"
|
||||||
|
40
jenkins/vm.sh → jenkins/vms.sh
Normal file → Executable file
40
jenkins/vm.sh → jenkins/vms.sh
Normal file → Executable file
@ -1,4 +1,42 @@
|
|||||||
#!/bin/bash -ex
|
#!/bin/bash
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
# The build may not be started without a tag value.
|
||||||
|
[ -n "${MANIFEST_TAG}" ]
|
||||||
|
|
||||||
|
# Set up GPG for verifying tags.
|
||||||
|
export GNUPGHOME="${PWD}/.gnupg"
|
||||||
|
rm -rf "${GNUPGHOME}"
|
||||||
|
trap 'rm -rf "${GNUPGHOME}"' EXIT
|
||||||
|
mkdir --mode=0700 "${GNUPGHOME}"
|
||||||
|
gpg --import verify.asc
|
||||||
|
# Sometimes this directory is not created automatically making further private
|
||||||
|
# key imports fail, let's create it here as a workaround
|
||||||
|
mkdir -p --mode=0700 "${GNUPGHOME}/private-keys-v1.d/"
|
||||||
|
|
||||||
|
DOWNLOAD_ROOT_SDK="https://storage.googleapis.com${SDK_URL_PATH}"
|
||||||
|
|
||||||
|
SCRIPTS_PATCH_ARG=""
|
||||||
|
OVERLAY_PATCH_ARG=""
|
||||||
|
PORTAGE_PATCH_ARG=""
|
||||||
|
if [ "$(cat scripts.patch | wc -l)" != 0 ]; then
|
||||||
|
SCRIPTS_PATCH_ARG="--scripts-patch scripts.patch"
|
||||||
|
fi
|
||||||
|
if [ "$(cat overlay.patch | wc -l)" != 0 ]; then
|
||||||
|
OVERLAY_PATCH_ARG="--overlay-patch overlay.patch"
|
||||||
|
fi
|
||||||
|
if [ "$(cat portage.patch | wc -l)" != 0 ]; then
|
||||||
|
PORTAGE_PATCH_ARG="--portage-patch portage.patch"
|
||||||
|
fi
|
||||||
|
|
||||||
|
bin/cork update \
|
||||||
|
--create --downgrade-replace --verify --verify-signature --verbose \
|
||||||
|
--sdk-url-path "${SDK_URL_PATH}" \
|
||||||
|
--force-sync \
|
||||||
|
${SCRIPTS_PATCH_ARG} ${OVERLAY_PATCH_ARG} ${PORTAGE_PATCH_ARG} \
|
||||||
|
--manifest-branch "refs/tags/${MANIFEST_TAG}" \
|
||||||
|
--manifest-name "${MANIFEST_NAME}" \
|
||||||
|
--manifest-url "${MANIFEST_URL}" -- --dev_builds_sdk="${DOWNLOAD_ROOT_SDK}"
|
||||||
|
|
||||||
# Clear out old images.
|
# Clear out old images.
|
||||||
sudo rm -rf chroot/build tmp
|
sudo rm -rf chroot/build tmp
|
Loading…
x
Reference in New Issue
Block a user