mirror of
https://github.com/flatcar/scripts.git
synced 2025-08-06 04:26:59 +02:00
Delete (almost) all of the jenkins directory
This has been superseded by code in the ci-automation directory. Signed-off-by: James Le Cuirot <jlecuirot@microsoft.com>
This commit is contained in:
parent
33672f7acf
commit
363f2321ab
@ -1 +0,0 @@
|
||||
This folder is unused.
|
@ -1,34 +0,0 @@
|
||||
ami
|
||||
ami_vmdk
|
||||
azure
|
||||
azure_gen2
|
||||
gce
|
||||
iso
|
||||
pxe
|
||||
qemu
|
||||
qemu_uefi
|
||||
brightbox
|
||||
cloudsigma
|
||||
cloudstack
|
||||
cloudstack_vhd
|
||||
digitalocean
|
||||
exoscale
|
||||
hyperv
|
||||
niftycloud
|
||||
openstack
|
||||
openstack_mini
|
||||
packet
|
||||
parallels
|
||||
rackspace
|
||||
rackspace_onmetal
|
||||
rackspace_vhd
|
||||
vagrant
|
||||
vagrant_parallels
|
||||
vagrant_virtualbox
|
||||
vagrant_vmware_fusion
|
||||
virtualbox
|
||||
vmware
|
||||
vmware_insecure
|
||||
vmware_ova
|
||||
vmware_raw
|
||||
xen
|
@ -1,7 +0,0 @@
|
||||
ami_vmdk
|
||||
azure_gen2
|
||||
openstack
|
||||
openstack_mini
|
||||
packet
|
||||
pxe
|
||||
qemu_uefi
|
@ -1,169 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# The build may not be started without a tag value.
|
||||
[ -n "${MANIFEST_TAG}" ]
|
||||
|
||||
# Set up GPG for verifying tags.
|
||||
export GNUPGHOME="${PWD}/.gnupg"
|
||||
rm -rf "${GNUPGHOME}"
|
||||
trap 'rm -rf "${GNUPGHOME}"' EXIT
|
||||
mkdir --mode=0700 "${GNUPGHOME}"
|
||||
gpg --import verify.asc
|
||||
# Sometimes this directory is not created automatically making further private
|
||||
# key imports fail, let's create it here as a workaround
|
||||
mkdir -p --mode=0700 "${GNUPGHOME}/private-keys-v1.d/"
|
||||
|
||||
# since /flatcar-jenkins/developer/sdk starts with a / we only use one
|
||||
DOWNLOAD_ROOT_SDK="gs:/${SDK_URL_PATH}"
|
||||
|
||||
SCRIPTS_PATCH_ARG=""
|
||||
OVERLAY_PATCH_ARG=""
|
||||
PORTAGE_PATCH_ARG=""
|
||||
if [ "$(cat scripts.patch | wc -l)" != 0 ]; then
|
||||
SCRIPTS_PATCH_ARG="--scripts-patch scripts.patch"
|
||||
fi
|
||||
if [ "$(cat overlay.patch | wc -l)" != 0 ]; then
|
||||
OVERLAY_PATCH_ARG="--overlay-patch overlay.patch"
|
||||
fi
|
||||
if [ "$(cat portage.patch | wc -l)" != 0 ]; then
|
||||
PORTAGE_PATCH_ARG="--portage-patch portage.patch"
|
||||
fi
|
||||
|
||||
bin/cork create \
|
||||
--verify --verify-signature --replace \
|
||||
--sdk-url-path "${SDK_URL_PATH}" \
|
||||
--json-key "${GS_DEVEL_CREDS}" \
|
||||
${SCRIPTS_PATCH_ARG} ${OVERLAY_PATCH_ARG} ${PORTAGE_PATCH_ARG} \
|
||||
--manifest-branch "refs/tags/${MANIFEST_TAG}" \
|
||||
--manifest-name "${MANIFEST_NAME}" \
|
||||
--manifest-url "${MANIFEST_URL}" \
|
||||
--sdk-url=storage.googleapis.com
|
||||
|
||||
# Clear out old images.
|
||||
sudo rm -rf chroot/build src/build torcx
|
||||
|
||||
enter() {
|
||||
local verify_key=
|
||||
# Run in a subshell to clean some gangue files on exit without
|
||||
# possibly clobbering the global EXIT trap.
|
||||
(
|
||||
trap 'sudo rm -f chroot/etc/portage/gangue.*' EXIT
|
||||
[ -s verify.asc ] &&
|
||||
sudo ln -f verify.asc chroot/etc/portage/gangue.asc &&
|
||||
verify_key=--verify-key=/etc/portage/gangue.asc
|
||||
sudo ln -f "${GS_DEVEL_CREDS}" chroot/etc/portage/gangue.json
|
||||
bin/cork enter --bind-gpg-agent=false -- env \
|
||||
FLATCAR_DEV_BUILDS="${DOWNLOAD_ROOT}" \
|
||||
FLATCAR_DEV_BUILDS_SDK="${DOWNLOAD_ROOT_SDK}" \
|
||||
{FETCH,RESUME}COMMAND_GS="/mnt/host/source/bin/gangue get \
|
||||
--json-key=/etc/portage/gangue.json $verify_key \
|
||||
"'"${URI}" "${DISTDIR}/${FILE}"' \
|
||||
"$@"
|
||||
)
|
||||
}
|
||||
|
||||
script() {
|
||||
enter "/mnt/host/source/src/scripts/$@"
|
||||
}
|
||||
|
||||
source .repo/manifests/version.txt
|
||||
export FLATCAR_BUILD_ID
|
||||
|
||||
# Set up GPG for signing uploads.
|
||||
gpg --import "${GPG_SECRET_KEY_FILE}"
|
||||
|
||||
script update_chroot \
|
||||
--toolchain_boards="${BOARD}" --dev_builds_sdk="${DOWNLOAD_ROOT_SDK}"
|
||||
|
||||
script setup_board \
|
||||
--board="${BOARD}" \
|
||||
--getbinpkgver="${FLATCAR_VERSION}" \
|
||||
--regen_configs_only
|
||||
|
||||
if [ "x${COREOS_OFFICIAL}" == x1 ]
|
||||
then
|
||||
script set_official --board="${BOARD}" --official
|
||||
else
|
||||
script set_official --board="${BOARD}" --noofficial
|
||||
fi
|
||||
|
||||
# Retrieve this version's torcx manifest
|
||||
mkdir -p torcx/pkgs
|
||||
enter gsutil cp -r \
|
||||
"${DOWNLOAD_ROOT}/torcx/manifests/${BOARD}/${FLATCAR_VERSION}/torcx_manifest.json"{,.sig} \
|
||||
/mnt/host/source/torcx/
|
||||
gpg --verify torcx/torcx_manifest.json.sig
|
||||
|
||||
BASH_SYNTAX_ERROR_WORKAROUND=$(mktemp)
|
||||
exec {keep_open}<>"${BASH_SYNTAX_ERROR_WORKAROUND}"
|
||||
rm "${BASH_SYNTAX_ERROR_WORKAROUND}"
|
||||
jq -r '.value.packages[] | . as $p | .name as $n | $p.versions[] | [.casDigest, .hash] | join(" ") | [$n, .] | join(" ")' "torcx/torcx_manifest.json" > "/proc/$$/fd/${keep_open}"
|
||||
# Download all cas references from the manifest and verify their checksums
|
||||
# TODO: technically we can skip ones that don't have a 'path' since they're not
|
||||
# included in the image.
|
||||
while read name digest hash
|
||||
do
|
||||
mkdir -p "torcx/pkgs/${BOARD}/${name}/${digest}"
|
||||
enter gsutil cp -r "${TORCX_PKG_DOWNLOAD_ROOT}/pkgs/${BOARD}/${name}/${digest}" \
|
||||
"/mnt/host/source/torcx/pkgs/${BOARD}/${name}/"
|
||||
downloaded_hash=$(sha512sum "torcx/pkgs/${BOARD}/${name}/${digest}/"*.torcx.tgz | awk '{print $1}')
|
||||
if [[ "sha512-${downloaded_hash}" != "${hash}" ]]
|
||||
then
|
||||
echo "Torcx package had wrong hash: ${downloaded_hash} instead of ${hash}"
|
||||
exit 1
|
||||
fi
|
||||
done < "/proc/$$/fd/${keep_open}"
|
||||
# This was "done < <(jq ...)" but it suddenly gave a syntax error with bash 4 when run with systemd-run-wrap.sh
|
||||
|
||||
script build_image \
|
||||
--board="${BOARD}" \
|
||||
--group="${GROUP}" \
|
||||
--getbinpkg \
|
||||
--getbinpkgver="${FLATCAR_VERSION}" \
|
||||
--sign="${SIGNING_USER}" \
|
||||
--sign_digests="${SIGNING_USER}" \
|
||||
--torcx_manifest=/mnt/host/source/torcx/torcx_manifest.json \
|
||||
--torcx_root=/mnt/host/source/torcx/ \
|
||||
--upload_root="${UPLOAD_ROOT}" \
|
||||
--upload prodtar container
|
||||
|
||||
set +x
|
||||
# Don't fail the whole job
|
||||
set +e
|
||||
echo "==================================================================="
|
||||
echo
|
||||
export BOARD_A="${BOARD}"
|
||||
export BOARD_B="${BOARD}"
|
||||
if [ "${GROUP}" != "developer" ]; then
|
||||
export CHANNEL_A="${GROUP}"
|
||||
else
|
||||
export CHANNEL_A="${CHANNEL_BASE}"
|
||||
fi
|
||||
|
||||
export VERSION_A=$(curl -s -S -f -L "https://${CHANNEL_A}.release.flatcar-linux.net/${BOARD}/current/version.txt" | grep -m 1 "FLATCAR_VERSION=" | cut -d "=" -f 2)
|
||||
|
||||
if [ "${GROUP}" = "developer" ]; then
|
||||
export CHANNEL_B="developer"
|
||||
export MODE_B="/developer/"
|
||||
else
|
||||
export CHANNEL_B="${GROUP}"
|
||||
fi
|
||||
echo "Image differences compared to ${CHANNEL_A} ${VERSION_A}:"
|
||||
rm -f package-diff
|
||||
curl -fsSLO --retry-delay 1 --retry 60 --retry-connrefused --retry-max-time 60 --connect-timeout 20 "https://raw.githubusercontent.com/flatcar-linux/flatcar-build-scripts/master/package-diff"
|
||||
chmod +x package-diff
|
||||
echo "Package updates, compared to ${CHANNEL_A} ${VERSION_A}:"
|
||||
FILE=flatcar_production_image_packages.txt ./package-diff "${VERSION_A}" "${FLATCAR_VERSION}"
|
||||
echo
|
||||
echo "Image file changes, compared to ${CHANNEL_A} ${VERSION_A}:"
|
||||
FILE=flatcar_production_image_contents.txt FILESONLY=1 CUTKERNEL=1 ./package-diff "${VERSION_A}" "${FLATCAR_VERSION}"
|
||||
echo
|
||||
echo "Image kernel config changes, compared to ${CHANNEL_A} ${VERSION_A}:"
|
||||
FILE=flatcar_production_image_kernel_config.txt ./package-diff "${VERSION_A}" "${FLATCAR_VERSION}"
|
||||
echo
|
||||
echo "Image file size change (includes /boot, /usr and the default rootfs partitions), compared to ${CHANNEL_A} ${VERSION_A}:"
|
||||
FILE=flatcar_production_image_contents.txt CALCSIZE=1 ./package-diff "${VERSION_A}" "${FLATCAR_VERSION}"
|
||||
echo
|
||||
BASE_PATH="https://bucket.release.flatcar-linux.net/$(echo $UPLOAD_ROOT | sed 's|gs://||g')/boards/${BOARD}/${FLATCAR_VERSION}"
|
||||
echo "Image URL: ${BASE_PATH}/flatcar_production_image.bin.bz2"
|
@ -1,4 +0,0 @@
|
||||
FROM debian:11
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y qemu-system-aarch64 qemu-efi-aarch64 lbzip2 sudo dnsmasq gnupg2 git curl iptables
|
@ -1,93 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
rm -rf *.tap _kola_temp*
|
||||
|
||||
NAME="jenkins-${JOB_NAME##*/}-${BUILD_NUMBER}"
|
||||
|
||||
if [[ "${AWS_INSTANCE_TYPE}" != "" ]]; then
|
||||
instance_type="${AWS_INSTANCE_TYPE}"
|
||||
elif [[ "${BOARD}" == "arm64-usr" ]]; then
|
||||
instance_type="a1.large"
|
||||
elif [[ "${BOARD}" == "amd64-usr" ]]; then
|
||||
instance_type="t3.small"
|
||||
fi
|
||||
|
||||
# If the OFFER is empty, it should be treated as the basic offering.
|
||||
if [[ "${OFFER}" == "" ]]; then
|
||||
OFFER="basic"
|
||||
fi
|
||||
|
||||
# Append the offer as oem suffix.
|
||||
if [[ "${OFFER}" != "basic" ]]; then
|
||||
OEM_SUFFIX="_${OFFER}"
|
||||
fi
|
||||
|
||||
if [[ "${KOLA_TESTS}" == "" ]]; then
|
||||
KOLA_TESTS="*"
|
||||
fi
|
||||
|
||||
if [[ "${AWS_AMI_ID}" == "" ]]; then
|
||||
[ -s verify.asc ] && verify_key=--verify-key=verify.asc || verify_key=
|
||||
mkdir -p tmp
|
||||
bin/cork download-image \
|
||||
--cache-dir=tmp \
|
||||
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--platform="aws${OEM_SUFFIX}" \
|
||||
--root="${DOWNLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}" \
|
||||
--sanity-check=false --verify=true $verify_key
|
||||
bunzip2 "tmp/flatcar_production_ami_vmdk${OEM_SUFFIX}_image.vmdk.bz2"
|
||||
BUCKET="flatcar-kola-ami-import-${AWS_REGION}"
|
||||
trap 'bin/ore -d aws delete --region="${AWS_REGION}" --name="${NAME}" --ami-name="${NAME}" --file="tmp/flatcar_production_ami_vmdk${OEM_SUFFIX}_image.vmdk" --bucket "s3://${BUCKET}/${BOARD}/"; rm -r tmp/' EXIT
|
||||
bin/ore aws initialize --region="${AWS_REGION}" --bucket "${BUCKET}"
|
||||
AWS_AMI_ID=$(bin/ore aws upload --force --region="${AWS_REGION}" --name=${NAME} --ami-name="${NAME}" --ami-description="Flatcar Test ${NAME}" --file="tmp/flatcar_production_ami_vmdk${OEM_SUFFIX}_image.vmdk" --bucket "s3://${BUCKET}/${BOARD}/" | jq -r .HVM)
|
||||
echo "Created new AMI ${AWS_AMI_ID} (will be removed after testing)"
|
||||
fi
|
||||
|
||||
# Run the cl.internet test on multiple machine types only if it should run in general
|
||||
cl_internet_included="$(set -o noglob; bin/kola list --platform=aws --filter ${KOLA_TESTS} | { grep cl.internet || true ; } )"
|
||||
if [[ "${BOARD}" == "amd64-usr" ]] && [[ "${cl_internet_included}" != "" ]]; then
|
||||
for INSTANCE in m4.2xlarge; do
|
||||
(
|
||||
set +x
|
||||
OUTPUT=$(timeout --signal=SIGQUIT 6h bin/kola run \
|
||||
--parallel=8 \
|
||||
--basename="${NAME}" \
|
||||
--board="${BOARD}" \
|
||||
--aws-ami="${AWS_AMI_ID}" \
|
||||
--aws-region="${AWS_REGION}" \
|
||||
--aws-type="${INSTANCE}" \
|
||||
--aws-iam-profile="${AWS_IAM_PROFILE}" \
|
||||
--platform=aws \
|
||||
--channel="${GROUP}" \
|
||||
--offering="${OFFER}" \
|
||||
--tapfile="${JOB_NAME##*/}_validate_${INSTANCE}.tap" \
|
||||
--torcx-manifest=torcx_manifest.json \
|
||||
cl.internet 2>&1 || true)
|
||||
echo "=== START $INSTANCE ==="
|
||||
echo "${OUTPUT}" | sed "s/^/${INSTANCE}: /g"
|
||||
echo "=== END $INSTANCE ==="
|
||||
) &
|
||||
done
|
||||
fi
|
||||
|
||||
# Do not expand the kola test patterns globs
|
||||
set -o noglob
|
||||
timeout --signal=SIGQUIT 6h bin/kola run \
|
||||
--parallel=8 \
|
||||
--basename="${NAME}" \
|
||||
--board="${BOARD}" \
|
||||
--aws-ami="${AWS_AMI_ID}" \
|
||||
--aws-region="${AWS_REGION}" \
|
||||
--aws-type="${instance_type}" \
|
||||
--aws-iam-profile="${AWS_IAM_PROFILE}" \
|
||||
--platform=aws \
|
||||
--channel="${GROUP}" \
|
||||
--offering="${OFFER}" \
|
||||
--tapfile="${JOB_NAME##*/}.tap" \
|
||||
--torcx-manifest=torcx_manifest.json \
|
||||
${KOLA_TESTS}
|
||||
set +o noglob
|
||||
|
||||
# wait for the cl.internet test results
|
||||
wait
|
@ -1,55 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
rm -rf *.tap _kola_temp*
|
||||
|
||||
NAME="jenkins-${JOB_NAME##*/}-${BUILD_NUMBER}"
|
||||
|
||||
if [[ "${BOARD}" == "arm64-usr" ]]; then
|
||||
if [[ "${AZURE_HYPER_V_GENERATION}" != "V2" ]]; then
|
||||
echo "Unsupported combination"
|
||||
exit 1
|
||||
fi
|
||||
AZURE_USE_GALLERY="--azure-use-gallery"
|
||||
fi
|
||||
|
||||
if [[ "${KOLA_TESTS}" == "" ]]; then
|
||||
KOLA_TESTS="*"
|
||||
fi
|
||||
|
||||
if [[ "${AZURE_MACHINE_SIZE}" != "" ]]; then
|
||||
AZURE_MACHINE_SIZE_OPT="--azure-size=${AZURE_MACHINE_SIZE}"
|
||||
fi
|
||||
|
||||
# If the OFFER is empty, it should be treated as the basic offering.
|
||||
if [[ "${OFFER}" == "" ]]; then
|
||||
OFFER="basic"
|
||||
fi
|
||||
|
||||
if [ "${BLOB_URL}" = "" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Do not expand the kola test patterns globs
|
||||
set -o noglob
|
||||
# Align timeout with ore azure gc --duration parameter
|
||||
timeout --signal=SIGQUIT 6h bin/kola run \
|
||||
--parallel="${PARALLEL}" \
|
||||
--basename="${NAME}" \
|
||||
--board="${BOARD}" \
|
||||
--channel="${GROUP}" \
|
||||
--platform=azure \
|
||||
--offering="${OFFER}" \
|
||||
--azure-blob-url="${BLOB_URL}" \
|
||||
--azure-location="${LOCATION}" \
|
||||
--azure-profile="${AZURE_CREDENTIALS}" \
|
||||
--azure-auth="${AZURE_AUTH_CREDENTIALS}" \
|
||||
--tapfile="${JOB_NAME##*/}.tap" \
|
||||
--torcx-manifest=torcx_manifest.json \
|
||||
${AZURE_USE_GALLERY} \
|
||||
${AZURE_MACHINE_SIZE_OPT} \
|
||||
${AZURE_HYPER_V_GENERATION:+--azure-hyper-v-generation=${AZURE_HYPER_V_GENERATION}} \
|
||||
${AZURE_VNET_SUBNET_NAME:+--azure-vnet-subnet-name=${AZURE_VNET_SUBNET_NAME}} \
|
||||
${AZURE_USE_PRIVATE_IPS:+--azure-use-private-ips=${AZURE_USE_PRIVATE_IPS}} \
|
||||
${KOLA_TESTS}
|
||||
set +o noglob
|
@ -1,47 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
sudo rm -f flatcar_developer_container.bin*
|
||||
trap 'sudo rm -f flatcar_developer_container.bin*' EXIT
|
||||
|
||||
[ -s verify.asc ] && verify_key=--verify-key=verify.asc || verify_key=
|
||||
|
||||
bin/gangue get \
|
||||
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--verify=true $verify_key \
|
||||
"${DOWNLOAD_ROOT}/boards/${BOARD}/${VERSION}/flatcar_production_image_kernel_config.txt"
|
||||
|
||||
bin/gangue get \
|
||||
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--verify=true $verify_key \
|
||||
"${DOWNLOAD_ROOT}/boards/${BOARD}/${VERSION}/flatcar_developer_container.bin.bz2"
|
||||
bunzip2 flatcar_developer_container.bin.bz2
|
||||
|
||||
if [[ "$(systemd-nspawn --version | grep 'systemd 241')" = "" ]]
|
||||
then
|
||||
PIPEARG="--pipe"
|
||||
else
|
||||
# TODO: Remove this case once Flatcar >=2592 is used on all nodes
|
||||
PIPEARG=""
|
||||
fi
|
||||
|
||||
sudo systemd-nspawn $PIPEARG \
|
||||
--setenv=PORTAGE_BINHOST="${PORTAGE_BINHOST}" \
|
||||
--bind-ro=/lib/modules \
|
||||
--bind-ro="$PWD/flatcar_production_image_kernel_config.txt:/boot/config" \
|
||||
--bind-ro="${GOOGLE_APPLICATION_CREDENTIALS}:/opt/credentials.json" \
|
||||
--bind-ro="$PWD/verify.asc:/opt/verify.asc" \
|
||||
--bind-ro="$PWD/bin/gangue:/opt/bin/gangue" \
|
||||
--image=flatcar_developer_container.bin \
|
||||
--machine=flatcar-developer-container-$(uuidgen) \
|
||||
--tmpfs=/usr/src \
|
||||
--tmpfs=/var/tmp \
|
||||
/bin/bash -eux << 'EOF'
|
||||
export PORTAGE_BINHOST="${PORTAGE_BINHOST}"
|
||||
export {FETCH,RESUME}COMMAND_GS="/opt/bin/gangue get --json-key=/opt/credentials.json --verify=true /opt/verify.asc \"\${URI}\" \"\${DISTDIR}/\${FILE}\""
|
||||
emerge-gitclone
|
||||
. /usr/share/coreos/release
|
||||
emerge -gv coreos-sources
|
||||
ln -fns /boot/config /usr/src/linux/.config
|
||||
exec make -C /usr/src/linux -j"$(nproc)" modules_prepare V=1
|
||||
EOF
|
@ -1,47 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# JOB_NAME will not fit within the character limit
|
||||
NAME="jenkins-${BUILD_NUMBER}"
|
||||
|
||||
set -o pipefail
|
||||
|
||||
if [[ "${DOWNLOAD_ROOT}" == gs://flatcar-jenkins-private/* ]]; then
|
||||
echo "Fetching google/cloud-sdk"
|
||||
docker pull google/cloud-sdk > /dev/null
|
||||
BUCKET_PATH="${DOWNLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}/flatcar_production_digitalocean_image.bin.bz2"
|
||||
IMAGE_URL="$(docker run --rm --net=host -v "${GOOGLE_APPLICATION_CREDENTIALS}:${GOOGLE_APPLICATION_CREDENTIALS}" google/cloud-sdk sh -c "python3 -m pip install pyopenssl > /dev/null; gsutil signurl -d 7d -r us ${GOOGLE_APPLICATION_CREDENTIALS} ${BUCKET_PATH} | grep -o 'https.*'")"
|
||||
else
|
||||
BASE_URL="https://bucket.release.flatcar-linux.net/$(echo $DOWNLOAD_ROOT | sed 's|gs://||g')/boards/${BOARD}/${FLATCAR_VERSION}"
|
||||
IMAGE_URL="${BASE_URL}/flatcar_production_digitalocean_image.bin.bz2"
|
||||
fi
|
||||
|
||||
bin/ore do create-image \
|
||||
--config-file="${DIGITALOCEAN_CREDS}" \
|
||||
--region="${DO_REGION}" \
|
||||
--name="${NAME}" \
|
||||
--url="${IMAGE_URL}"
|
||||
|
||||
trap 'bin/ore do delete-image \
|
||||
--name="${NAME}" \
|
||||
--config-file="${DIGITALOCEAN_CREDS}"' EXIT
|
||||
|
||||
if [[ "${KOLA_TESTS}" == "" ]]; then
|
||||
KOLA_TESTS="*"
|
||||
fi
|
||||
|
||||
# Do not expand the kola test patterns globs
|
||||
set -o noglob
|
||||
timeout --signal=SIGQUIT 4h bin/kola run \
|
||||
--do-size=${DO_MACHINE_SIZE} \
|
||||
--do-region=${DO_REGION} \
|
||||
--basename="${NAME}" \
|
||||
--do-config-file="${DIGITALOCEAN_CREDS}" \
|
||||
--do-image="${NAME}" \
|
||||
--parallel=8 \
|
||||
--platform=do \
|
||||
--channel="${GROUP}" \
|
||||
--tapfile="${JOB_NAME##*/}.tap" \
|
||||
--torcx-manifest=torcx_manifest.json \
|
||||
${KOLA_TESTS}
|
||||
set +o noglob
|
@ -1,52 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
rm -rf *.tap _kola_temp*
|
||||
|
||||
# If the OFFER is empty, it should be treated as the basic offering.
|
||||
if [[ "${OFFER}" == "" ]]; then
|
||||
OFFER="basic"
|
||||
fi
|
||||
|
||||
# Append the offer as oem suffix.
|
||||
if [[ "${OFFER}" != "basic" ]]; then
|
||||
OEM_SUFFIX="_${OFFER}"
|
||||
fi
|
||||
|
||||
# Create a name that includes the OFFER,
|
||||
# but replace _ with -, as gcloud doesn't like it otherwise.
|
||||
OEMNAME="${OFFER}-${BUILD_NUMBER}"
|
||||
NAME=${OEMNAME//_/-}
|
||||
|
||||
bin/ore gcloud create-image \
|
||||
--board="${BOARD}" \
|
||||
--family="${NAME}" \
|
||||
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--source-root="${DOWNLOAD_ROOT}/boards" \
|
||||
--source-name=flatcar_production_gce${OEM_SUFFIX}.tar.gz \
|
||||
--version="${FLATCAR_VERSION}"
|
||||
|
||||
GCE_NAME="${NAME//[+.]/-}-${FLATCAR_VERSION//[+.]/-}"
|
||||
|
||||
trap 'bin/ore gcloud delete-images \
|
||||
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
"${GCE_NAME}"' EXIT
|
||||
|
||||
if [[ "${KOLA_TESTS}" == "" ]]; then
|
||||
KOLA_TESTS="*"
|
||||
fi
|
||||
|
||||
# Do not expand the kola test patterns globs
|
||||
set -o noglob
|
||||
timeout --signal=SIGQUIT 6h bin/kola run \
|
||||
--basename="${NAME}" \
|
||||
--gce-image="${GCE_NAME}" \
|
||||
--gce-json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--gce-machinetype="${GCE_MACHINE_TYPE}" \
|
||||
--parallel=4 \
|
||||
--platform=gce \
|
||||
--channel="${GROUP}" \
|
||||
--tapfile="${JOB_NAME##*/}.tap" \
|
||||
--torcx-manifest=torcx_manifest.json \
|
||||
${KOLA_TESTS}
|
||||
set +o noglob
|
@ -1,95 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# JOB_NAME will not fit within the character limit
|
||||
NAME="jenkins-${BUILD_NUMBER}"
|
||||
|
||||
# same as the GC timeout
|
||||
timeout=6h
|
||||
|
||||
set -o pipefail
|
||||
|
||||
# Construct the URLs of the image to be used during tests.
|
||||
if [[ "${DOWNLOAD_ROOT}" == gs://flatcar-jenkins-private/* ]]; then
|
||||
echo "Fetching google/cloud-sdk"
|
||||
docker pull google/cloud-sdk > /dev/null
|
||||
BUCKET_PATH="${DOWNLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}"
|
||||
IMAGE_URL="$(docker run --rm --net=host -v "${GOOGLE_APPLICATION_CREDENTIALS}:${GOOGLE_APPLICATION_CREDENTIALS}" google/cloud-sdk sh -c "python3 -m pip install pyopenssl > /dev/null; gsutil signurl -d 7d -r us ${GOOGLE_APPLICATION_CREDENTIALS} ${BUCKET_PATH}/flatcar_production_packet_image.bin.bz2 | grep -o 'https.*'")"
|
||||
KERNEL_URL="$(docker run --rm --net=host -v "${GOOGLE_APPLICATION_CREDENTIALS}:${GOOGLE_APPLICATION_CREDENTIALS}" google/cloud-sdk sh -c "python3 -m pip install pyopenssl > /dev/null; gsutil signurl -d 7d -r us ${GOOGLE_APPLICATION_CREDENTIALS} ${BUCKET_PATH}/flatcar_production_pxe.vmlinuz | grep -o 'https.*'")"
|
||||
CPIO_URL="$(docker run --rm --net=host -v "${GOOGLE_APPLICATION_CREDENTIALS}:${GOOGLE_APPLICATION_CREDENTIALS}" google/cloud-sdk sh -c "python3 -m pip install pyopenssl > /dev/null; gsutil signurl -d 7d -r us ${GOOGLE_APPLICATION_CREDENTIALS} ${BUCKET_PATH}/flatcar_production_pxe_image.cpio.gz | grep -o 'https.*'")"
|
||||
else
|
||||
BASE_PATH="bucket.release.flatcar-linux.net/$(echo $DOWNLOAD_ROOT | sed 's|gs://||g')/boards/${BOARD}/${FLATCAR_VERSION}"
|
||||
IMAGE_URL="https://${BASE_PATH}/flatcar_production_packet_image.bin.bz2"
|
||||
KERNEL_URL="https://${BASE_PATH}/flatcar_production_pxe.vmlinuz"
|
||||
CPIO_URL="https://${BASE_PATH}/flatcar_production_pxe_image.cpio.gz"
|
||||
fi
|
||||
|
||||
if [[ "${KOLA_TESTS}" == "" ]]; then
|
||||
KOLA_TESTS="*"
|
||||
fi
|
||||
|
||||
# Equinix Metal ARM server are not yet hourly available in the default `sv15` region
|
||||
# so we override the `PACKET_REGION` to `DC`. ARM servers are available in metro
|
||||
# either DA (Dallas) or DC (Washington), but DC has more servers available.
|
||||
# See also https://metal.equinix.com/developers/docs/locations/capacity/.
|
||||
# We do not override `PACKET_REGION` for both board on top level because we need to keep proximity
|
||||
# for PXE booting.
|
||||
if [[ "${BOARD}" == "arm64-usr" ]]; then
|
||||
PACKET_REGION="DC"
|
||||
fi
|
||||
|
||||
# Run the cl.internet test on multiple machine types only if it should run in general
|
||||
cl_internet_included="$(set -o noglob; bin/kola list --platform=packet --filter ${KOLA_TESTS} | { grep cl.internet || true ; } )"
|
||||
if [[ "${BOARD}" == "amd64-usr" ]] && [[ "${cl_internet_included}" != "" ]]; then
|
||||
for INSTANCE in m3.small.x86 c3.medium.x86 m3.large.x86 s3.xlarge.x86 n2.xlarge.x86; do
|
||||
(
|
||||
set +x
|
||||
OUTPUT=$(timeout --signal=SIGQUIT "${timeout}" bin/kola run \
|
||||
--basename="${NAME}" \
|
||||
--board="${BOARD}" \
|
||||
--channel="${GROUP}" \
|
||||
--gce-json-key="${UPLOAD_CREDS}" \
|
||||
--packet-api-key="${PACKET_API_KEY}" \
|
||||
--packet-image-url="${IMAGE_URL}" \
|
||||
--packet-installer-image-kernel-url="${KERNEL_URL}" \
|
||||
--packet-installer-image-cpio-url="${CPIO_URL}" \
|
||||
--packet-project="${PACKET_PROJECT}" \
|
||||
--packet-storage-url="${UPLOAD_ROOT}/mantle/packet" \
|
||||
--packet-plan="${INSTANCE}" \
|
||||
--equinixmetal-metro="${PACKET_REGION}" \
|
||||
--parallel="${PARALLEL_TESTS}" \
|
||||
--platform=packet \
|
||||
--tapfile="${JOB_NAME##*/}_validate_${INSTANCE}.tap" \
|
||||
--torcx-manifest=torcx_manifest.json \
|
||||
cl.internet 2>&1 || true)
|
||||
echo "=== START $INSTANCE ==="
|
||||
echo "${OUTPUT}" | sed "s/^/${INSTANCE}: /g"
|
||||
echo "=== END $INSTANCE ==="
|
||||
) &
|
||||
done
|
||||
fi
|
||||
|
||||
# Do not expand the kola test patterns globs
|
||||
set -o noglob
|
||||
timeout --signal=SIGQUIT "${timeout}" bin/kola run \
|
||||
--basename="${NAME}" \
|
||||
--board="${BOARD}" \
|
||||
--channel="${GROUP}" \
|
||||
--gce-json-key="${UPLOAD_CREDS}" \
|
||||
--packet-api-key="${PACKET_API_KEY}" \
|
||||
--packet-image-url="${IMAGE_URL}" \
|
||||
--packet-installer-image-kernel-url="${KERNEL_URL}" \
|
||||
--packet-installer-image-cpio-url="${CPIO_URL}" \
|
||||
--packet-project="${PACKET_PROJECT}" \
|
||||
--packet-storage-url="${UPLOAD_ROOT}/mantle/packet" \
|
||||
--packet-plan="${PACKET_MACHINE_TYPE}" \
|
||||
--equinixmetal-metro="${PACKET_REGION}" \
|
||||
--parallel="${PARALLEL_TESTS}" \
|
||||
--platform=packet \
|
||||
--tapfile="${JOB_NAME##*/}.tap" \
|
||||
--torcx-manifest=torcx_manifest.json \
|
||||
${KOLA_TESTS}
|
||||
set +o noglob
|
||||
|
||||
# wait for the cl.internet test results
|
||||
wait
|
@ -1,5 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
SCRIPTFOLDER="$(dirname "$(readlink -f "$0")")"
|
||||
"${SCRIPTFOLDER}/qemu_common.sh" qemu
|
@ -1,151 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
PLATFORM="$1"
|
||||
if [ "${PLATFORM}" = qemu ]; then
|
||||
TIMEOUT="12h"
|
||||
BIOS="bios-256k.bin"
|
||||
elif [ "${PLATFORM}" = qemu_uefi ]; then
|
||||
TIMEOUT="14h"
|
||||
BIOS="/mnt/host/source/tmp/flatcar_production_qemu_uefi_efi_code.qcow2"
|
||||
else
|
||||
echo "Unknown platform: \"${PLATFORM}\""
|
||||
fi
|
||||
|
||||
native_arm64() {
|
||||
[[ "${NATIVE_ARM64}" == true ]]
|
||||
}
|
||||
|
||||
sudo rm -rf *.tap src/scripts/_kola_temp tmp _kola_temp* _tmp
|
||||
|
||||
if native_arm64 ; then
|
||||
# for kola reflinking
|
||||
sudo rm -rf /var/tmp
|
||||
mkdir -p _tmp
|
||||
chmod 1777 _tmp
|
||||
ln -s "$PWD/_tmp" /var/tmp
|
||||
# use arm64 mantle bins
|
||||
rm -rf bin
|
||||
mv bin.arm64 bin
|
||||
# simulate SDK folder structure
|
||||
mkdir -p src
|
||||
ln -s .. src/scripts
|
||||
sudo rm -f chroot
|
||||
ln -s / chroot
|
||||
|
||||
enter() {
|
||||
"$@"
|
||||
}
|
||||
else
|
||||
enter() {
|
||||
bin/cork enter --bind-gpg-agent=false -- "$@"
|
||||
}
|
||||
fi
|
||||
|
||||
script() {
|
||||
enter "/mnt/host/source/src/scripts/$@"
|
||||
}
|
||||
|
||||
# Set up GPG for verifying tags.
|
||||
export GNUPGHOME="${PWD}/.gnupg"
|
||||
rm -rf "${GNUPGHOME}"
|
||||
trap 'rm -rf "${GNUPGHOME}"' EXIT
|
||||
mkdir --mode=0700 "${GNUPGHOME}"
|
||||
gpg --import verify.asc
|
||||
# Sometimes this directory is not created automatically making further private
|
||||
# key imports fail, let's create it here as a workaround
|
||||
mkdir -p --mode=0700 "${GNUPGHOME}/private-keys-v1.d/"
|
||||
|
||||
# since /flatcar-jenkins/developer/sdk starts with a / we only use one
|
||||
DOWNLOAD_ROOT_SDK="gs:/${SDK_URL_PATH}"
|
||||
|
||||
if native_arm64 ; then
|
||||
mkdir -p .repo/
|
||||
if [ ! -e .repo/manifests ]; then
|
||||
mkdir -p ~/.ssh
|
||||
ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts
|
||||
git clone "${MANIFEST_URL}" .repo/manifests
|
||||
fi
|
||||
git -C .repo/manifests tag -v "${MANIFEST_TAG}"
|
||||
git -C .repo/manifests checkout "${MANIFEST_TAG}"
|
||||
else
|
||||
bin/cork create \
|
||||
--verify --verify-signature --replace \
|
||||
--sdk-url-path "${SDK_URL_PATH}" \
|
||||
--json-key "${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--manifest-branch "refs/tags/${MANIFEST_TAG}" \
|
||||
--manifest-name "${MANIFEST_NAME}" \
|
||||
--sdk-url storage.googleapis.com \
|
||||
--manifest-url "${MANIFEST_URL}"
|
||||
fi
|
||||
|
||||
source .repo/manifests/version.txt
|
||||
|
||||
[ -s verify.asc ] && verify_key=--verify-key=verify.asc || verify_key=
|
||||
|
||||
if ! native_arm64; then
|
||||
script update_chroot \
|
||||
--toolchain_boards="${BOARD}" --dev_builds_sdk="${DOWNLOAD_ROOT_SDK}"
|
||||
fi
|
||||
|
||||
mkdir -p tmp
|
||||
bin/cork download-image \
|
||||
--cache-dir=tmp \
|
||||
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--platform="${PLATFORM}" \
|
||||
--root="${DOWNLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}" \
|
||||
--verify=true $verify_key
|
||||
enter lbunzip2 -k -f /mnt/host/source/tmp/flatcar_production_image.bin.bz2
|
||||
|
||||
# create folder to handle case where arm64 is missing
|
||||
sudo mkdir -p chroot/usr/lib/kola/{arm64,amd64}
|
||||
# copy all of the latest mantle binaries into the chroot
|
||||
sudo cp -t chroot/usr/lib/kola/arm64 bin/arm64/*
|
||||
sudo cp -t chroot/usr/lib/kola/amd64 bin/amd64/*
|
||||
sudo cp -t chroot/usr/bin bin/[b-z]*
|
||||
|
||||
if [[ "${KOLA_TESTS}" == "" ]]; then
|
||||
KOLA_TESTS="*"
|
||||
fi
|
||||
|
||||
rm -f flatcar_test_update.gz
|
||||
bin/gangue get \
|
||||
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--verify=true $verify_key \
|
||||
"${DOWNLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}/flatcar_test_update.gz"
|
||||
mv flatcar_test_update.gz tmp/
|
||||
|
||||
if [ "${KOLA_TESTS}" = "*" ] || [ "$(echo "${KOLA_TESTS}" | grep 'cl.update.payload')" != "" ]; then
|
||||
# First test to update from the previous release, this is done before running the real kola suite so that the qemu-latest symlink still points to the full run
|
||||
rm -f flatcar_production_image.bin.bz2
|
||||
curl -fsSLO --retry-delay 1 --retry 60 --retry-connrefused --retry-max-time 60 --connect-timeout 20 "https://${GROUP}.release.flatcar-linux.net/${BOARD}/current/flatcar_production_image.bin.bz2"
|
||||
mv flatcar_production_image.bin.bz2 tmp/flatcar_production_image_previous.bin.bz2
|
||||
enter lbunzip2 -k -f /mnt/host/source/tmp/flatcar_production_image_previous.bin.bz2
|
||||
enter sudo timeout --signal=SIGQUIT "${TIMEOUT}" kola run \
|
||||
--board="${BOARD}" \
|
||||
--channel="${GROUP}" \
|
||||
--parallel="${PARALLEL}" \
|
||||
--platform=qemu \
|
||||
--qemu-bios="${BIOS}" \
|
||||
--qemu-image=/mnt/host/source/tmp/flatcar_production_image_previous.bin \
|
||||
--tapfile="/mnt/host/source/${JOB_NAME##*/}_update_from_previous_release.tap" \
|
||||
--torcx-manifest=/mnt/host/source/torcx_manifest.json \
|
||||
--update-payload=/mnt/host/source/tmp/flatcar_test_update.gz \
|
||||
cl.update.payload || true
|
||||
fi
|
||||
|
||||
# Do not expand the kola test patterns globs
|
||||
set -o noglob
|
||||
enter sudo timeout --signal=SIGQUIT "${TIMEOUT}" kola run \
|
||||
--board="${BOARD}" \
|
||||
--channel="${GROUP}" \
|
||||
--parallel="${PARALLEL}" \
|
||||
--platform=qemu \
|
||||
--qemu-bios="${BIOS}" \
|
||||
--qemu-image=/mnt/host/source/tmp/flatcar_production_image.bin \
|
||||
--tapfile="/mnt/host/source/${JOB_NAME##*/}.tap" \
|
||||
--torcx-manifest=/mnt/host/source/torcx_manifest.json \
|
||||
${KOLA_TESTS}
|
||||
set +o noglob
|
||||
|
||||
sudo rm -rf tmp
|
@ -1,9 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
SCRIPTFOLDER="$(dirname "$(readlink -f "$0")")"
|
||||
if [[ "$NATIVE_ARM64" == true ]]; then
|
||||
"${SCRIPTFOLDER}/qemu_uefi_arm64.sh" qemu_uefi
|
||||
else
|
||||
"${SCRIPTFOLDER}/qemu_common.sh" qemu_uefi
|
||||
fi
|
@ -1,40 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
SCRIPTFOLDER="$(dirname "$(readlink -f "$0")")"
|
||||
# strip $PWD prefix so that we can access the path relative to the container working directory
|
||||
SCRIPTFOLDER=${SCRIPTFOLDER#$PWD/}
|
||||
|
||||
DOCKER_IMG=ghcr.io/kinvolk/kola-test-runner:latest
|
||||
|
||||
envarg=()
|
||||
envflags=(
|
||||
SSH_AUTH_SOCK
|
||||
BOARD
|
||||
MANIFEST_URL
|
||||
SDK_URL_PATH
|
||||
CHANNEL_BASE
|
||||
GROUP
|
||||
KOLA_TESTS
|
||||
MANIFEST_TAG
|
||||
DOWNLOAD_ROOT
|
||||
PARALLEL
|
||||
GOOGLE_APPLICATION_CREDENTIALS
|
||||
NATIVE_ARM64
|
||||
)
|
||||
for envvar in ${envflags[@]}; do
|
||||
envarg+=( -e "${envvar}=${!envvar}" )
|
||||
done
|
||||
|
||||
docker pull ${DOCKER_IMG}
|
||||
exec docker run --privileged \
|
||||
--rm \
|
||||
-v /dev:/dev \
|
||||
-w /mnt/host/source \
|
||||
-v ${PWD}:/mnt/host/source \
|
||||
-v ${GOOGLE_APPLICATION_CREDENTIALS}:${GOOGLE_APPLICATION_CREDENTIALS} \
|
||||
${SSH_AUTH_SOCK:+-v ${SSH_AUTH_SOCK}:${SSH_AUTH_SOCK}} \
|
||||
"${envarg[@]}" \
|
||||
${DOCKER_IMG} \
|
||||
"${SCRIPTFOLDER}/qemu_common.sh" qemu_uefi
|
@ -1,40 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# JOB_NAME will not fit within the character limit
|
||||
NAME="jenkins-${BUILD_NUMBER}"
|
||||
|
||||
[ -s verify.asc ] && verify_key=--verify-key=verify.asc || verify_key=
|
||||
|
||||
mkdir -p tmp
|
||||
bin/cork download-image \
|
||||
--cache-dir=tmp \
|
||||
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--platform=esx \
|
||||
--root="${DOWNLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}" \
|
||||
--verify=true $verify_key
|
||||
|
||||
trap 'bin/ore esx --esx-config-file "${VMWARE_ESX_CREDS}" remove-vms \
|
||||
--pattern "${NAME}*" || true' EXIT
|
||||
|
||||
if [[ "${KOLA_TESTS}" == "" ]]; then
|
||||
KOLA_TESTS="*"
|
||||
fi
|
||||
|
||||
# Delete every VM that is running because we'll use all available spots
|
||||
bin/ore esx --esx-config-file "${VMWARE_ESX_CREDS}" remove-vms || true
|
||||
|
||||
# Do not expand the kola test patterns globs
|
||||
set -o noglob
|
||||
timeout --signal=SIGQUIT 2h bin/kola run \
|
||||
--basename="${NAME}" \
|
||||
--esx-config-file "${VMWARE_ESX_CREDS}" \
|
||||
--esx-ova-path tmp/flatcar_production_vmware_ova.ova \
|
||||
--parallel=4 \
|
||||
--platform=esx \
|
||||
--channel="${GROUP}" \
|
||||
--tapfile="${JOB_NAME##*/}.tap" \
|
||||
--torcx-manifest=torcx_manifest.json \
|
||||
${KOLA_TESTS}
|
||||
set +o noglob
|
||||
sudo rm -rf tmp
|
@ -1,149 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
BASE=$(dirname $(readlink -f "$0"))
|
||||
git -C manifest config user.name "${GIT_AUTHOR_NAME}"
|
||||
git -C manifest config user.email "${GIT_AUTHOR_EMAIL}"
|
||||
|
||||
COREOS_OFFICIAL=0
|
||||
|
||||
finish() {
|
||||
local tag="$1"
|
||||
git -C manifest tag -v "${tag}"
|
||||
git -C manifest push "${BUILDS_PUSH_URL}" "refs/tags/${tag}:refs/tags/${tag}"
|
||||
tee manifest.properties << EOF
|
||||
MANIFEST_URL = ${BUILDS_CLONE_URL}
|
||||
MANIFEST_REF = refs/tags/${tag}
|
||||
MANIFEST_NAME = release.xml
|
||||
COREOS_OFFICIAL = ${COREOS_OFFICIAL:-0}
|
||||
EOF
|
||||
}
|
||||
|
||||
# Set up GPG for verifying tags.
|
||||
export GNUPGHOME="${PWD}/.gnupg"
|
||||
rm -rf "${GNUPGHOME}"
|
||||
trap 'rm -rf "${GNUPGHOME}"' EXIT
|
||||
mkdir --mode=0700 "${GNUPGHOME}"
|
||||
gpg --import verify.asc
|
||||
# Sometimes this directory is not created automatically making further private
|
||||
# key imports fail, let's create it here as a workaround
|
||||
mkdir -p --mode=0700 "${GNUPGHOME}/private-keys-v1.d/"
|
||||
|
||||
# Branches are of the form remote-name/branch-name. Tags are just tag-name.
|
||||
# If we have a release tag use it, for branches we need to make a tag.
|
||||
if [[ "${GIT_BRANCH}" != */* ]]
|
||||
then
|
||||
COREOS_OFFICIAL=1
|
||||
finish "${GIT_BRANCH}"
|
||||
exit
|
||||
fi
|
||||
|
||||
MANIFEST_BRANCH="${GIT_BRANCH##*/}"
|
||||
MANIFEST_ID="${MANIFEST_BRANCH}"
|
||||
# Nightly builds use the "default" manifest from flatcar-master and have the same scripts/overlay/portage branches without a "user/" prefix.
|
||||
# No further exclusions are made because nothing bad happens if other branches were used.
|
||||
if [[ "${MANIFEST_NAME}" = default ]] && [[ "${MANIFEST_BRANCH}" = flatcar-master ]] && \
|
||||
[[ "${SCRIPTS_REF}" = "${OVERLAY_REF}" ]] && [[ "${OVERLAY_REF}" = "${PORTAGE_REF}" ]] && \
|
||||
[[ "${SCRIPTS_REF}" != */* ]] && [[ "${SCRIPTS_REF}" != "" ]]
|
||||
then
|
||||
# Use SCRIPTS_REF but others also work since they have the same value
|
||||
MANIFEST_ID="${SCRIPTS_REF}-nightly"
|
||||
fi
|
||||
|
||||
MANIFEST_NAME="${MANIFEST_NAME}.xml"
|
||||
[[ -f "manifest/${MANIFEST_NAME}" ]]
|
||||
|
||||
source manifest/version.txt
|
||||
|
||||
if [[ "${SDK_VERSION}" == sdk-*-nightly ]]
|
||||
then
|
||||
# Get the SDK version from GCS - we use gsutil to get access to the bucket since it's private.
|
||||
SDK_VERSION=$(docker run --rm -v "${GOOGLE_APPLICATION_CREDENTIALS}:/opt/release.json:ro" google/cloud-sdk:alpine bash -c "gcloud auth activate-service-account --key-file /opt/release.json && gsutil cat gs://flatcar-jenkins/developer/sdk/amd64/${SDK_VERSION}.txt" | tee /dev/stderr)
|
||||
if [[ -z "${SDK_VERSION}" ]]
|
||||
then
|
||||
echo "No SDK found, retrigger the manifest job with default SDK_VERSION and SDK_URL_PATH values."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
export FLATCAR_BUILD_ID="${BUILD_ID_PREFIX}${MANIFEST_ID}-${BUILD_NUMBER}"
|
||||
# Nightlies and dev builds have the current date as Flatcar version
|
||||
if [[ "${MANIFEST_BRANCH}" = flatcar-master ]]
|
||||
then
|
||||
FLATCAR_VERSION_ID="$(date '+%Y.%m.%d')"
|
||||
fi
|
||||
|
||||
if [[ "${SDK_VERSION}" = sdk-new ]]
|
||||
then
|
||||
# Use the version of the current developer build for DOWNSTREAM=all(-full), requires a seed SDK to be set
|
||||
# (releases use git tags where all this code here is not executed because the manifest
|
||||
# and version.txt should not be modified, the Alpha release version.txt has to refer to
|
||||
# the release to be build for its SDK version)
|
||||
SDK_VERSION="${FLATCAR_VERSION_ID}+${FLATCAR_BUILD_ID}"
|
||||
fi
|
||||
|
||||
if [[ -n "${SDK_VERSION}" ]]
|
||||
then
|
||||
export FLATCAR_SDK_VERSION="${SDK_VERSION}"
|
||||
fi
|
||||
|
||||
# Ensure that each XML tag occupies exactly one line each by first removing all line breaks and then adding
|
||||
# a line break after each tag.
|
||||
# This way set_manifest_ref can find the right tag by matching for "/$reponame".
|
||||
cat manifest/"${MANIFEST_NAME}" | tr '\n' ' ' | sed 's#/>#/>\n#g' > "manifest/${FLATCAR_BUILD_ID}.xml"
|
||||
|
||||
set_manifest_ref() {
|
||||
local reponame="$1"
|
||||
local reference="$2"
|
||||
# Select lines with "/$reponame" (kept as first group) and "revision" (kept as second group) and replace the value
|
||||
# of "revision" (third group, not kept) with the new reference.
|
||||
sed -i -E "s#(/$reponame.*)(revision=\")([^\"]*)#\1\2$reference#g" "manifest/${FLATCAR_BUILD_ID}.xml"
|
||||
}
|
||||
|
||||
setup_manifest_ref() {
|
||||
local reponame="${1}"
|
||||
local ref="${2}"
|
||||
local full_ref="refs/heads/${ref}"
|
||||
|
||||
if [[ -z "${ref//[0-9]}" ]]; then
|
||||
full_ref="refs/pull/${ref}/head"
|
||||
fi
|
||||
set_manifest_ref "${reponame}" "${full_ref}"
|
||||
"${BASE}/post-github-status.sh" --repo "flatcar-linux/${reponame}" --ref "${full_ref}" --status pending
|
||||
}
|
||||
|
||||
if [[ -n "${SCRIPTS_REF}" ]]
|
||||
then
|
||||
setup_manifest_ref scripts "${SCRIPTS_REF}"
|
||||
fi
|
||||
if [[ -n "${OVERLAY_REF}" ]]
|
||||
then
|
||||
setup_manifest_ref coreos-overlay "${OVERLAY_REF}"
|
||||
fi
|
||||
if [[ -n "${PORTAGE_REF}" ]]
|
||||
then
|
||||
setup_manifest_ref portage-stable "${PORTAGE_REF}"
|
||||
fi
|
||||
|
||||
ln -fns "${FLATCAR_BUILD_ID}.xml" manifest/default.xml
|
||||
ln -fns "${FLATCAR_BUILD_ID}.xml" manifest/release.xml
|
||||
|
||||
tee manifest/version.txt << EOF
|
||||
FLATCAR_VERSION=${FLATCAR_VERSION_ID}+${FLATCAR_BUILD_ID}
|
||||
FLATCAR_VERSION_ID=${FLATCAR_VERSION_ID}
|
||||
FLATCAR_BUILD_ID=${FLATCAR_BUILD_ID}
|
||||
FLATCAR_SDK_VERSION=${FLATCAR_SDK_VERSION}
|
||||
EOF
|
||||
# Note: You have to keep FLATCAR_VERSION in sync with the value used in the "sdk-new" case.
|
||||
|
||||
# Set up GPG for signing tags.
|
||||
gpg --import "${GPG_SECRET_KEY_FILE}"
|
||||
|
||||
# Tag a development build manifest.
|
||||
git -C manifest add "${FLATCAR_BUILD_ID}.xml" default.xml release.xml version.txt
|
||||
git -C manifest commit \
|
||||
-m "${FLATCAR_BUILD_ID}: add build manifest" \
|
||||
-m "Based on ${GIT_URL} branch ${MANIFEST_BRANCH}" \
|
||||
-m "${BUILD_URL}"
|
||||
git -C manifest tag -u "${SIGNING_USER}" -m "${FLATCAR_BUILD_ID}" "${FLATCAR_BUILD_ID}"
|
||||
|
||||
finish "${FLATCAR_BUILD_ID}"
|
@ -1,120 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# The build may not be started without a tag value.
|
||||
[ -n "${MANIFEST_TAG}" ]
|
||||
|
||||
# For developer builds that are based on a non-developer release,
|
||||
# we need the DOWNLOAD_ROOT variable to be the base path, keeping the
|
||||
# UPLOAD_ROOT variable as the developer path.
|
||||
if [[ "${RELEASE_BASE_IS_DEV}" = "false" && "${GROUP}" = "developer" && "${RELEASE_BASE}" != "" ]]; then
|
||||
DOWNLOAD_ROOT=$(echo ${DOWNLOAD_ROOT} | sed 's,/developer,,');
|
||||
fi
|
||||
# since /flatcar-jenkins/developer/sdk starts with a / we only use one
|
||||
DOWNLOAD_ROOT_SDK="gs:/${SDK_URL_PATH}"
|
||||
|
||||
# Set up GPG for verifying tags.
|
||||
export GNUPGHOME="${PWD}/.gnupg"
|
||||
rm -rf "${GNUPGHOME}"
|
||||
trap 'rm -rf "${GNUPGHOME}"' EXIT
|
||||
mkdir --mode=0700 "${GNUPGHOME}"
|
||||
gpg --import verify.asc
|
||||
# Sometimes this directory is not created automatically making further private
|
||||
# key imports fail, let's create it here as a workaround
|
||||
mkdir -p --mode=0700 "${GNUPGHOME}/private-keys-v1.d/"
|
||||
|
||||
SCRIPTS_PATCH_ARG=""
|
||||
OVERLAY_PATCH_ARG=""
|
||||
PORTAGE_PATCH_ARG=""
|
||||
if [ "$(cat scripts.patch | wc -l)" != 0 ]; then
|
||||
SCRIPTS_PATCH_ARG="--scripts-patch scripts.patch"
|
||||
fi
|
||||
if [ "$(cat overlay.patch | wc -l)" != 0 ]; then
|
||||
OVERLAY_PATCH_ARG="--overlay-patch overlay.patch"
|
||||
fi
|
||||
if [ "$(cat portage.patch | wc -l)" != 0 ]; then
|
||||
PORTAGE_PATCH_ARG="--portage-patch portage.patch"
|
||||
fi
|
||||
|
||||
bin/cork create \
|
||||
--verify --verify-signature --replace \
|
||||
--sdk-url-path "${SDK_URL_PATH}" \
|
||||
--json-key "${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
${SCRIPTS_PATCH_ARG} ${OVERLAY_PATCH_ARG} ${PORTAGE_PATCH_ARG} \
|
||||
--manifest-branch "refs/tags/${MANIFEST_TAG}" \
|
||||
--manifest-name "${MANIFEST_NAME}" \
|
||||
--manifest-url "${MANIFEST_URL}" \
|
||||
--sdk-url=storage.googleapis.com
|
||||
|
||||
enter() {
|
||||
local verify_key=
|
||||
# Run in a subshell to clean some gangue files on exit without
|
||||
# possibly clobbering the global EXIT trap.
|
||||
(
|
||||
trap 'sudo rm -f chroot/etc/portage/gangue.*' EXIT
|
||||
[ -s verify.asc ] &&
|
||||
sudo ln -f verify.asc chroot/etc/portage/gangue.asc &&
|
||||
verify_key=--verify-key=/etc/portage/gangue.asc
|
||||
sudo ln -f "${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
chroot/etc/portage/gangue.json
|
||||
bin/cork enter --bind-gpg-agent=false -- env \
|
||||
FLATCAR_DEV_BUILDS="${DOWNLOAD_ROOT}" \
|
||||
FLATCAR_DEV_BUILDS_SDK="${DOWNLOAD_ROOT_SDK}" \
|
||||
{FETCH,RESUME}COMMAND_GS="/mnt/host/source/bin/gangue get \
|
||||
--json-key=/etc/portage/gangue.json $verify_key \
|
||||
"'"${URI}" "${DISTDIR}/${FILE}"' \
|
||||
"$@"
|
||||
)
|
||||
}
|
||||
|
||||
script() {
|
||||
enter "/mnt/host/source/src/scripts/$@"
|
||||
}
|
||||
|
||||
source .repo/manifests/version.txt
|
||||
export FLATCAR_BUILD_ID
|
||||
|
||||
# Set up GPG for signing uploads.
|
||||
gpg --import "${GPG_SECRET_KEY_FILE}"
|
||||
|
||||
script update_chroot \
|
||||
--toolchain_boards="${BOARD}" --dev_builds_sdk="${DOWNLOAD_ROOT_SDK}"
|
||||
|
||||
script setup_board \
|
||||
--board="${BOARD}" \
|
||||
--getbinpkgver=${RELEASE_BASE:-"${FLATCAR_VERSION}" --toolchainpkgonly} \
|
||||
--skip_chroot_upgrade \
|
||||
--force
|
||||
|
||||
script build_packages \
|
||||
--board="${BOARD}" \
|
||||
--getbinpkgver=${RELEASE_BASE:-"${FLATCAR_VERSION}" --toolchainpkgonly} \
|
||||
--usepkg_exclude="${BINARY_PACKAGES_TO_EXCLUDE}" \
|
||||
--skip_chroot_upgrade \
|
||||
--skip_torcx_store \
|
||||
--sign="${SIGNING_USER}" \
|
||||
--sign_digests="${SIGNING_USER}" \
|
||||
--upload_root="${UPLOAD_ROOT}" \
|
||||
--upload
|
||||
|
||||
script build_torcx_store \
|
||||
--board="${BOARD}" \
|
||||
--sign="${SIGNING_USER}" \
|
||||
--sign_digests="${SIGNING_USER}" \
|
||||
--upload_root="${UPLOAD_ROOT}" \
|
||||
--torcx_upload_root="${TORCX_PKG_DOWNLOAD_ROOT}" \
|
||||
--tectonic_torcx_download_root="${TECTONIC_TORCX_DOWNLOAD_ROOT}" \
|
||||
--upload
|
||||
|
||||
if [[ "${GROUP}" = "developer" ]]
|
||||
then
|
||||
GROUP="${CHANNEL_BASE}"
|
||||
fi
|
||||
|
||||
# Update entry for latest nightly build reference (there are no symlinks in GCS and it is also good to keep it deterministic)
|
||||
if [[ "${FLATCAR_BUILD_ID}" == *-*-nightly-* ]]
|
||||
then
|
||||
# Extract the nightly name like "flatcar-MAJOR-nightly" from "dev-flatcar-MAJOR-nightly-NUMBER"
|
||||
NAME=$(echo "${FLATCAR_BUILD_ID}" | grep -o "dev-.*-nightly" | cut -d - -f 2-)
|
||||
echo "${FLATCAR_VERSION}" | bin/cork enter --bind-gpg-agent=false -- gsutil cp - "${UPLOAD_ROOT}/boards/${BOARD}/${NAME}.txt"
|
||||
fi
|
@ -1,53 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
SHFLAGS=$(dirname $(readlink -f "$0"))/../lib/shflags/shflags
|
||||
. "${SHFLAGS}" || exit 1
|
||||
|
||||
DEFINE_string repo "" "Name of the repository to which to post status"
|
||||
DEFINE_string ref "" "Reference from which to figure out commit"
|
||||
DEFINE_string github_token "${GITHUB_TOKEN}" "Github Personal Access Token used to submit the commit status"
|
||||
DEFINE_string status "pending" "Status to submit for commit. [failure,pending,success,error]"
|
||||
DEFINE_string context "ci/jenkins" "Context to use for commit status."
|
||||
DEFINE_boolean verbose "${FLAGS_FALSE}" "Show curl output"
|
||||
|
||||
# Parse command line
|
||||
FLAGS "$@" || exit 1
|
||||
eval set -- "${FLAGS_ARGV}"
|
||||
|
||||
if [ -z "${FLAGS_repo}" ]; then
|
||||
echo >&2 "Error: --repo is required"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${FLAGS_ref}" ]; then
|
||||
echo >&2 "Error: --ref is required"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${FLAGS_github_token}" ]; then
|
||||
echo >&2 "Error: --github_token is required"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CURLOPTS="-sS"
|
||||
if [[ "${FLAGS_verbose}" -eq "${FLAGS_true}" ]]; then
|
||||
CURLOPTS=""
|
||||
fi
|
||||
|
||||
GITHUB_API="https://api.github.com"
|
||||
# BUILD_URL = JENKINS_URL + JOB_NAME + BUILD_NUMBER
|
||||
target_url="${BUILD_URL}cldsv"
|
||||
commit=$(git ls-remote "https://github.com/${FLAGS_repo}" "${FLAGS_ref}"| cut -f1)
|
||||
if [ -z "${commit}" ]; then
|
||||
echo >&2 "Can't figure out commit for repo ${FLAGS_repo} ref ${FLAGS_ref}"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
curl ${CURLOPTS} "${GITHUB_API}/repos/${FLAGS_repo}/statuses/${commit}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: token ${FLAGS_github_token}" \
|
||||
-X POST -d @- <<EOF
|
||||
{
|
||||
"state":"${FLAGS_status}",
|
||||
"context": "${FLAGS_context}",
|
||||
"target_url":"${target_url}"
|
||||
}
|
||||
EOF
|
@ -1,23 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
rm -f ami.properties images.json
|
||||
|
||||
[ -s verify.asc ] && verify_key=--verify-key=verify.asc || verify_key=
|
||||
|
||||
bin/plume pre-release --force \
|
||||
--debug \
|
||||
--platform=aws \
|
||||
--aws-credentials="${AWS_CREDENTIALS}" \
|
||||
--gce-json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--board="${BOARD}" \
|
||||
--channel="${CHANNEL}" \
|
||||
--version="${FLATCAR_VERSION}" \
|
||||
--write-image-list=images.json \
|
||||
$verify_key
|
||||
|
||||
hvm_ami_id=$(jq -r '.aws.amis[]|select(.name == "'"${AWS_REGION}"'").hvm' images.json)
|
||||
|
||||
tee ami.properties << EOF
|
||||
HVM_AMI_ID = ${hvm_ami_id:?}
|
||||
EOF
|
@ -1,33 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
AZURE_CATEGORY_OPT=""
|
||||
if [[ "${IS_NON_SPONSORED}" == true ]]
|
||||
then
|
||||
AZURE_CATEGORY_OPT="--azure-category=pro"
|
||||
fi
|
||||
|
||||
rm -f images.json
|
||||
|
||||
[ -s verify.asc ] && verify_key=--verify-key=verify.asc || verify_key=
|
||||
|
||||
bin/plume pre-release --force \
|
||||
--debug \
|
||||
--platform=azure \
|
||||
--azure-profile="${AZURE_CREDENTIALS}" \
|
||||
--azure-auth="${AZURE_AUTH_CREDENTIALS}" \
|
||||
--gce-json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--board="${BOARD}" \
|
||||
--channel="${CHANNEL}" \
|
||||
--version="${FLATCAR_VERSION}" \
|
||||
--write-image-list=images.json \
|
||||
${AZURE_CATEGORY_OPT} \
|
||||
$verify_key
|
||||
|
||||
sas_url=$(jq -r '.azure.image' images.json)
|
||||
if [ "${sas_url}" = "null" ]; then
|
||||
sas_url=""
|
||||
fi
|
||||
tee test.properties << EOF
|
||||
SAS_URL ^ ${sas_url:?}
|
||||
EOF
|
@ -1,21 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
case "${CHANNEL}" in
|
||||
*)
|
||||
boards=( amd64-usr arm64-usr )
|
||||
;;
|
||||
esac
|
||||
|
||||
for board in "${boards[@]}"
|
||||
do
|
||||
bin/plume release \
|
||||
--debug \
|
||||
--aws-credentials="${AWS_CREDENTIALS}" \
|
||||
--azure-profile="${AZURE_CREDENTIALS}" \
|
||||
--azure-auth="${AZURE_AUTH_CREDENTIALS}" \
|
||||
--gce-json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--gce-release-key="${GOOGLE_RELEASE_CREDENTIALS}" \
|
||||
--board="${board}" \
|
||||
--channel="${CHANNEL}" \
|
||||
--version="${VERSION}"
|
||||
done
|
@ -1,94 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# The build may not be started without a tag value.
|
||||
[ -n "${MANIFEST_TAG}" ]
|
||||
|
||||
# Catalyst leaves things chowned as root.
|
||||
[ -d .cache/sdks ] && sudo chown -R "$USER" .cache/sdks
|
||||
|
||||
# Set up GPG for verifying tags.
|
||||
export GNUPGHOME="${PWD}/.gnupg"
|
||||
rm -rf "${GNUPGHOME}"
|
||||
trap 'rm -rf "${GNUPGHOME}"' EXIT
|
||||
mkdir --mode=0700 "${GNUPGHOME}"
|
||||
gpg --import verify.asc
|
||||
# Sometimes this directory is not created automatically making further private
|
||||
# key imports fail, let's create it here as a workaround
|
||||
mkdir -p --mode=0700 "${GNUPGHOME}/private-keys-v1.d/"
|
||||
|
||||
if [[ "${SEED_SDK_VERSION}" == alpha ]]
|
||||
then
|
||||
SEED_SDK_VERSION=$(curl -s -S -f -L "https://alpha.release.flatcar-linux.net/amd64-usr/current/version.txt" | grep -m 1 FLATCAR_SDK_VERSION= | cut -d = -f 2- | tee /dev/stderr)
|
||||
if [[ -z "${SEED_SDK_VERSION}" ]]
|
||||
then
|
||||
echo "Unexpected: Alpha release SDK version not found"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
DOWNLOAD_ROOT=${DOWNLOAD_ROOT:-"gs://flatcar-jenkins"}
|
||||
# The seed SDK is always a release SDK
|
||||
DOWNLOAD_ROOT_SDK="gs://flatcar-jenkins/sdk"
|
||||
|
||||
# We do not use a nightly SDK as seed for bootstrapping because the next major Alpha SDK release would also have to use the last published Alpha release SDK as seed.
|
||||
# Also, we don't want compiler bugs to propagate from one nightly SDK to the next even though the commit in question was reverted.
|
||||
# Having a clear bootstrap path is our last safety line before insanity for that kind of bugs, and is a requirement for reproducibility and security.
|
||||
# Fore more info, read Ken Thompson's Turing Award Lecture "Reflections on Trusting Trust".
|
||||
# In rare cases this will mean that a huge compiler update has to be split because first a released SDK with a newer compiler is needed to compile an even newer compiler
|
||||
# (or linker, libc etc). For experiments one can download the nightly/developer SDK and start the bootstrap from it locally but exposing this functionality in Jenkins would
|
||||
# cause more confusion than helping to understand what the requirements are to get SDK changes to a releasable state.
|
||||
|
||||
bin/cork update \
|
||||
--create --downgrade-replace --verify --verify-signature --verbose \
|
||||
--sdk-version "${SEED_SDK_VERSION}" \
|
||||
--force-sync \
|
||||
--json-key "${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--manifest-branch "refs/tags/${MANIFEST_TAG}" \
|
||||
--sdk-url storage.googleapis.com \
|
||||
--sdk-url-path "/flatcar-jenkins/sdk" \
|
||||
--manifest-name "${MANIFEST_NAME}" \
|
||||
--manifest-url "${MANIFEST_URL}" -- --dev_builds_sdk="${DOWNLOAD_ROOT_SDK}" --setuponly
|
||||
|
||||
if [[ ${FULL_BUILD} == "false" ]]; then
|
||||
export FORCE_STAGES="stage4"
|
||||
fi
|
||||
|
||||
enter() {
|
||||
# we add the public key to verify the signature with gangue
|
||||
sudo ln -f ./verify.asc chroot/opt/verify.asc
|
||||
# GCP service account to get access to private bucket during the gangue downloading
|
||||
sudo ln -f "${GOOGLE_APPLICATION_CREDENTIALS}" chroot/etc/portage/gangue.json
|
||||
bin/cork enter --bind-gpg-agent=false -- "$@"
|
||||
}
|
||||
|
||||
source .repo/manifests/version.txt
|
||||
export FLATCAR_BUILD_ID
|
||||
|
||||
# Set up GPG for signing uploads.
|
||||
gpg --import "${GPG_SECRET_KEY_FILE}"
|
||||
|
||||
# Wipe all of catalyst.
|
||||
sudo rm -rf src/build
|
||||
|
||||
# Fetch DIGEST to prevent re-downloading the same SDK tarball
|
||||
enter /mnt/host/source/bin/gangue get --verify-key /opt/verify.asc --json-key /etc/portage/gangue.json "${DOWNLOAD_ROOT_SDK}/amd64/${FLATCAR_SDK_VERSION}/flatcar-sdk-amd64-${FLATCAR_SDK_VERSION}.tar.bz2.DIGESTS" /mnt/host/source/.cache/sdks/
|
||||
|
||||
enter sudo \
|
||||
FLATCAR_DEV_BUILDS_SDK="${DOWNLOAD_ROOT_SDK}" \
|
||||
FORCE_STAGES="${FORCE_STAGES}" \
|
||||
/mnt/host/source/src/scripts/bootstrap_sdk \
|
||||
--sign="${SIGNING_USER}" \
|
||||
--sign_digests="${SIGNING_USER}" \
|
||||
--upload_root="${UPLOAD_ROOT}" \
|
||||
--stage1_overlay_ref="${STAGE1_OVERLAY_REF}" \
|
||||
--stage1_portage_ref="${STAGE1_PORTAGE_REF}" \
|
||||
--upload
|
||||
|
||||
# Update entry for latest nightly build reference (there are no symlinks in GCS and it is also good to keep it deterministic)
|
||||
if [[ "${FLATCAR_BUILD_ID}" == *-*-nightly-* ]]
|
||||
then
|
||||
# Extract the nightly name like "flatcar-MAJOR-nightly" from "dev-flatcar-MAJOR-nightly-NUMBER"
|
||||
NAME=$(echo "${FLATCAR_BUILD_ID}" | grep -o "dev-.*-nightly" | cut -d - -f 2-)
|
||||
echo "${FLATCAR_VERSION}" | enter gsutil cp - "${UPLOAD_ROOT}/sdk/amd64/sdk-${NAME}.txt"
|
||||
fi
|
@ -1,84 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# The build may not be started without a tag value.
|
||||
[ -n "${MANIFEST_TAG}" ]
|
||||
|
||||
# Catalyst leaves things chowned as root.
|
||||
[ -d .cache/sdks ] && sudo chown -R "$USER" .cache/sdks
|
||||
|
||||
# Set up GPG for verifying tags.
|
||||
export GNUPGHOME="${PWD}/.gnupg"
|
||||
rm -rf "${GNUPGHOME}"
|
||||
trap 'rm -rf "${GNUPGHOME}"' EXIT
|
||||
mkdir --mode=0700 "${GNUPGHOME}"
|
||||
gpg --import verify.asc
|
||||
# Sometimes this directory is not created automatically making further private
|
||||
# key imports fail, let's create it here as a workaround
|
||||
mkdir -p --mode=0700 "${GNUPGHOME}/private-keys-v1.d/"
|
||||
|
||||
DOWNLOAD_ROOT=${DOWNLOAD_ROOT:-"${UPLOAD_ROOT}"}
|
||||
# since /flatcar-jenkins/developer/sdk starts with a / we only use one
|
||||
DOWNLOAD_ROOT_SDK="gs:/${SDK_URL_PATH}"
|
||||
|
||||
SCRIPTS_PATCH_ARG=""
|
||||
OVERLAY_PATCH_ARG=""
|
||||
PORTAGE_PATCH_ARG=""
|
||||
if [ "$(cat scripts.patch | wc -l)" != 0 ]; then
|
||||
SCRIPTS_PATCH_ARG="--scripts-patch scripts.patch"
|
||||
fi
|
||||
if [ "$(cat overlay.patch | wc -l)" != 0 ]; then
|
||||
OVERLAY_PATCH_ARG="--overlay-patch overlay.patch"
|
||||
fi
|
||||
if [ "$(cat portage.patch | wc -l)" != 0 ]; then
|
||||
PORTAGE_PATCH_ARG="--portage-patch portage.patch"
|
||||
fi
|
||||
|
||||
bin/cork create \
|
||||
--verify --verify-signature --replace \
|
||||
--sdk-url-path "${SDK_URL_PATH}" \
|
||||
--json-key "${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--sdk-url storage.googleapis.com \
|
||||
${SCRIPTS_PATCH_ARG} ${OVERLAY_PATCH_ARG} ${PORTAGE_PATCH_ARG} \
|
||||
--manifest-branch "refs/tags/${MANIFEST_TAG}" \
|
||||
--manifest-name "${MANIFEST_NAME}" \
|
||||
--manifest-url "${MANIFEST_URL}"
|
||||
|
||||
enter() {
|
||||
sudo ln -f "${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
chroot/etc/portage/gangue.json
|
||||
# we add the public key to verify the signature with gangue
|
||||
sudo ln -f ./verify.asc chroot/opt/verify.asc
|
||||
bin/cork enter --bind-gpg-agent=false -- env \
|
||||
FLATCAR_DEV_BUILDS="${DOWNLOAD_ROOT}" \
|
||||
FLATCAR_DEV_BUILDS_SDK="${DOWNLOAD_ROOT_SDK}" \
|
||||
{FETCH,RESUME}COMMAND_GS="/mnt/host/source/bin/gangue get \
|
||||
--json-key=/etc/portage/gangue.json $verify_key \
|
||||
"'"${URI}" "${DISTDIR}/${FILE}"' \
|
||||
"$@"
|
||||
}
|
||||
|
||||
script() {
|
||||
enter "/mnt/host/source/src/scripts/$@"
|
||||
}
|
||||
|
||||
source .repo/manifests/version.txt
|
||||
export FLATCAR_BUILD_ID
|
||||
|
||||
# Fetch DIGEST to prevent re-downloading the same SDK tarball
|
||||
enter /mnt/host/source/bin/gangue get --verify-key /opt/verify.asc --json-key /etc/portage/gangue.json "${DOWNLOAD_ROOT_SDK}/amd64/${FLATCAR_SDK_VERSION}/flatcar-sdk-amd64-${FLATCAR_SDK_VERSION}.tar.bz2.DIGESTS" /mnt/host/source/.cache/sdks/
|
||||
|
||||
script update_chroot \
|
||||
--toolchain_boards="${BOARD}" --dev_builds_sdk="${DOWNLOAD_ROOT_SDK}" --setuponly
|
||||
|
||||
# Set up GPG for signing uploads.
|
||||
gpg --import "${GPG_SECRET_KEY_FILE}"
|
||||
|
||||
# Wipe all of catalyst.
|
||||
sudo rm -rf src/build
|
||||
|
||||
enter sudo FLATCAR_DEV_BUILDS_SDK="${DOWNLOAD_ROOT_SDK}" /mnt/host/source/src/scripts/build_toolchains \
|
||||
--sign="${SIGNING_USER}" \
|
||||
--sign_digests="${SIGNING_USER}" \
|
||||
--upload_root="${UPLOAD_ROOT}" \
|
||||
--upload
|
126
jenkins/vms.sh
126
jenkins/vms.sh
@ -1,126 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# The build may not be started without a tag value.
|
||||
[ -n "${MANIFEST_TAG}" ]
|
||||
|
||||
# Set up GPG for verifying tags.
|
||||
export GNUPGHOME="${PWD}/.gnupg"
|
||||
rm -rf "${GNUPGHOME}"
|
||||
trap 'rm -rf "${GNUPGHOME}"' EXIT
|
||||
mkdir --mode=0700 "${GNUPGHOME}"
|
||||
gpg --import verify.asc
|
||||
# Sometimes this directory is not created automatically making further private
|
||||
# key imports fail, let's create it here as a workaround
|
||||
mkdir -p --mode=0700 "${GNUPGHOME}/private-keys-v1.d/"
|
||||
|
||||
# since /flatcar-jenkins/developer/sdk starts with a / we only use one
|
||||
DOWNLOAD_ROOT_SDK="gs:/${SDK_URL_PATH}"
|
||||
|
||||
SCRIPTS_PATCH_ARG=""
|
||||
OVERLAY_PATCH_ARG=""
|
||||
PORTAGE_PATCH_ARG=""
|
||||
if [ "$(cat scripts.patch | wc -l)" != 0 ]; then
|
||||
SCRIPTS_PATCH_ARG="--scripts-patch scripts.patch"
|
||||
fi
|
||||
if [ "$(cat overlay.patch | wc -l)" != 0 ]; then
|
||||
OVERLAY_PATCH_ARG="--overlay-patch overlay.patch"
|
||||
fi
|
||||
if [ "$(cat portage.patch | wc -l)" != 0 ]; then
|
||||
PORTAGE_PATCH_ARG="--portage-patch portage.patch"
|
||||
fi
|
||||
|
||||
bin/cork create \
|
||||
--replace --verify --verify-signature --verbose \
|
||||
--sdk-url-path "${SDK_URL_PATH}" \
|
||||
--json-key "${GS_DEVEL_CREDS}" \
|
||||
${SCRIPTS_PATCH_ARG} ${OVERLAY_PATCH_ARG} ${PORTAGE_PATCH_ARG} \
|
||||
--manifest-branch "refs/tags/${MANIFEST_TAG}" \
|
||||
--manifest-name "${MANIFEST_NAME}" \
|
||||
--manifest-url "${MANIFEST_URL}" \
|
||||
--sdk-url=storage.googleapis.com
|
||||
|
||||
# Clear out old images.
|
||||
sudo rm -rf chroot/build tmp
|
||||
|
||||
enter() {
|
||||
local verify_key=
|
||||
# Run in a subshell to clean some gangue files on exit without
|
||||
# possibly clobbering the global EXIT trap.
|
||||
(
|
||||
trap 'sudo rm -f chroot/etc/portage/gangue.*' EXIT
|
||||
[ -s verify.asc ] &&
|
||||
sudo ln -f verify.asc chroot/etc/portage/gangue.asc &&
|
||||
verify_key=--verify-key=/etc/portage/gangue.asc
|
||||
sudo ln -f "${GS_DEVEL_CREDS}" chroot/etc/portage/gangue.json
|
||||
bin/cork enter --bind-gpg-agent=false -- env \
|
||||
FLATCAR_DEV_BUILDS="${GS_DEVEL_ROOT}" \
|
||||
FLATCAR_DEV_BUILDS_SDK="${DOWNLOAD_ROOT_SDK}" \
|
||||
{FETCH,RESUME}COMMAND_GS="/mnt/host/source/bin/gangue get \
|
||||
--json-key=/etc/portage/gangue.json $verify_key \
|
||||
"'"${URI}" "${DISTDIR}/${FILE}"' \
|
||||
"$@"
|
||||
)
|
||||
}
|
||||
|
||||
script() {
|
||||
enter "/mnt/host/source/src/scripts/$@"
|
||||
}
|
||||
|
||||
source .repo/manifests/version.txt
|
||||
export FLATCAR_BUILD_ID
|
||||
|
||||
script update_chroot \
|
||||
--toolchain_boards="${BOARD}" --dev_builds_sdk="${DOWNLOAD_ROOT_SDK}"
|
||||
|
||||
# Set up GPG for signing uploads.
|
||||
gpg --import "${GPG_SECRET_KEY_FILE}"
|
||||
|
||||
[ -s verify.asc ] && verify_key=--verify-key=verify.asc || verify_key=
|
||||
|
||||
mkdir -p src tmp
|
||||
bin/cork download-image \
|
||||
--root="${UPLOAD_ROOT}/boards/${BOARD}/${FLATCAR_VERSION}" \
|
||||
--json-key="${GOOGLE_APPLICATION_CREDENTIALS}" \
|
||||
--cache-dir=./src \
|
||||
--platform=qemu \
|
||||
--verify=true $verify_key
|
||||
|
||||
img=src/flatcar_production_image.bin
|
||||
[[ "${img}.bz2" -nt "${img}" ]] &&
|
||||
enter lbunzip2 -k -f "/mnt/host/source/${img}.bz2"
|
||||
|
||||
if [[ "${FORMATS}" = "" ]]
|
||||
then
|
||||
FORMATS="${FORMAT}"
|
||||
fi
|
||||
|
||||
if [[ "${FORMATS}" == *"azure_gen2"* ]] ; then
|
||||
# azure_gen2 shares an image with azure
|
||||
if [[ " ${FORMATS} " != *" azure "* ]]; then
|
||||
FORMATS+=" azure"
|
||||
fi
|
||||
FORMATS=${FORMATS/azure_gen2/}
|
||||
fi
|
||||
|
||||
for FORMAT in ${FORMATS}; do
|
||||
COMPRESSION_FORMAT="bz2"
|
||||
|
||||
if [[ "${FORMAT}" =~ ^(openstack|openstack_mini|digitalocean)$ ]];then
|
||||
COMPRESSION_FORMAT="gz,bz2"
|
||||
fi
|
||||
|
||||
script image_to_vm.sh \
|
||||
--board="${BOARD}" \
|
||||
--format="${FORMAT}" \
|
||||
--getbinpkg \
|
||||
--getbinpkgver="${FLATCAR_VERSION}" \
|
||||
--from=/mnt/host/source/src \
|
||||
--to=/mnt/host/source/tmp \
|
||||
--sign="${SIGNING_USER}" \
|
||||
--sign_digests="${SIGNING_USER}" \
|
||||
--download_root="${DOWNLOAD_ROOT}" \
|
||||
--upload_root="${UPLOAD_ROOT}" \
|
||||
--image_compression_formats="${COMPRESSION_FORMAT}" \
|
||||
--upload
|
||||
done
|
Loading…
Reference in New Issue
Block a user