From 20ed1ad3a43e45e26f9e4b5b156e1497b6b2d442 Mon Sep 17 00:00:00 2001 From: Kai Lueke Date: Thu, 15 Sep 2022 12:36:22 +0200 Subject: [PATCH 1/7] ci-automation/release.sh: Run plume to release cloud images The mantle plume tool has two steps, pre-release is the mere upload and release is the publication. In the past this was used to run the tests inbetween but we don't do this anymore. Run plume pre-release and release in a single job. Since plume can't push to GCS in our case, we upload the files to bincache. Also do the cloudformation update which was previously done in flatcar-build-scripts but could only be run after the sync to Origin. It requires the "aws" tool in the mantle container until we implement this in plume directly. --- ci-automation/release.sh | 249 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 242 insertions(+), 7 deletions(-) diff --git a/ci-automation/release.sh b/ci-automation/release.sh index 37003bf129..d6c17fdae4 100644 --- a/ci-automation/release.sh +++ b/ci-automation/release.sh @@ -62,17 +62,74 @@ function _inside_mantle() { ( set -euo pipefail + source sdk_lib/sdk_container_common.sh source ci-automation/ci_automation_common.sh source sdk_container/.repo/manifests/version.txt + source sdk_container/.env + CHANNEL="$(get_git_channel)" + VERSION="${FLATCAR_VERSION}" + azure_profile_config_file="" + secret_to_file azure_profile_config_file "${AZURE_PROFILE}" + azure_auth_config_file="" + secret_to_file azure_auth_config_file "${AZURE_AUTH_CREDENTIALS}" + aws_credentials_config_file="" + secret_to_file aws_credentials_config_file "${AWS_CREDENTIALS}" + aws_marketplace_credentials_file="" + secret_to_file aws_marketplace_credentials_file "${AWS_MARKETPLACE_CREDENTIALS}" + gcp_json_key_path="" + secret_to_file gcp_json_key_path "${GCP_JSON_KEY}" + google_release_credentials_file="" + secret_to_file google_release_credentials_file "${GOOGLE_RELEASE_CREDENTIALS}" - # TODO: set up credentials - # TODO: run mantle pre-release and release for all platforms - # (needs changes in mantle to consume from buildcache via https) - # TODO: run ore for AWS marketplace upload + for platform in aws azure; do + for arch in amd64 arm64; do + # Create a folder where plume stores flatcar_production_ami_*txt and flatcar_production_ami_*json + # for later push to bincache + rm -rf "${platform}-${arch}" + mkdir "${platform}-${arch}" + cd "${platform}-${arch}" + plume pre-release --force \ + --debug \ + --platform="${platform}" \ + --aws-credentials="${aws_credentials_config_file}" \ + --azure-profile="${azure_profile_config_file}" \ + --azure-auth="${azure_auth_config_file}" \ + --board="${arch}-usr" \ + --channel="${CHANNEL}" \ + --version="${FLATCAR_VERSION}" \ + --write-image-list="images.json" + plume release \ + --debug \ + --aws-credentials="${aws_credentials_config_file}" \ + --aws-marketplace-credentials="${aws_marketplace_credentials_file}" \ + --publish-marketplace \ + --access-role-arn="${AWS_MARKETPLACE_ARN}" \ + --azure-profile="${azure_profile_config_file}" \ + --azure-auth="${azure_auth_config_file}" \ + --gce-json-key="${gcp_json_key_path}" \ + --gce-release-key="${google_release_credentials_file}" \ + --board="${arch}-usr" \ + --channel="${CHANNEL}" \ + --version="${VERSION}" + cd .. + done + done + + # Future: move this to "plume release", in the past this was done in "update-cloudformation-template" + aws_cloudformation_credentials_file="" + secret_to_file aws_cloudformation_credentials_file "${AWS_CLOUDFORMATION_CREDENTIALS}" + export AWS_SHARED_CREDENTIALS_FILE="${aws_cloudformation_credentials_file}" + rm -rf cloudformation-files + mkdir cloudformation-files + for arch in amd64 arm64; do + generate_templates "aws-${arch}/flatcar_production_ami_all.json" "${arch}-usr" + done + aws s3 cp --recursive --acl public-read cloudformation-files/ s3://flatcar-prod-ami-import-eu-central-1/dist/aws/ ) } function _release_build_impl() { + source sdk_lib/sdk_container_common.sh source ci-automation/ci_automation_common.sh source ci-automation/gpg_setup.sh init_submodules @@ -92,9 +149,14 @@ function _release_build_impl() { touch sdk_container/.env # This file should already contain the required credentials as env vars docker run --pull always --rm --name="${container_name}" --net host \ -w /work -v "$PWD":/work "${mantle_ref}" bash -c "source ci-automation/release.sh; _inside_mantle" - # TODO: sign and copy resulting AMI text file to buildcache - # TODO: run CF template update - # TODO: publish SDK container image if not published yet (i.e., on new majors) + # Push flatcar_production_ami_*txt and flatcar_production_ami_*json to the right bincache folder + for arch in amd64 arm64; do + sudo chown -R "$USER:$USER" "aws-${arch}" + create_digests "${SIGNER}" "aws-${arch}/flatcar_production_ami_"*txt "aws-${arch}/flatcar_production_ami_"*json + sign_artifacts "${SIGNER}" "aws-${arch}/flatcar_production_ami_"*txt "aws-${arch}/flatcar_production_ami_"*json + copy_to_buildcache "images/${arch}/${vernum}/" "aws-${arch}/flatcar_production_ami_"*txt* "aws-${arch}/flatcar_production_ami_"*json* + done + # TODO: publish SDK container image if SDK version is the same as the image version (e.g., on new major versions) echo "====" echo "Done, now you can copy the images to Origin" echo "====" @@ -105,3 +167,176 @@ function _release_build_impl() { # Future: trigger push to nebraska # Future: trigger Origin symlink switch } + +TEMPLATE=' +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Flatcar Linux on EC2: https://kinvolk.io/docs/flatcar-container-linux/latest/installing/cloud/aws-ec2/", + "Mappings" : { + "RegionMap" : { +###AMIS### + } + }, + "Parameters": { + "InstanceType" : { + "Description" : "EC2 HVM instance type (m3.medium, etc).", + "Type" : "String", + "Default" : "m3.medium", + "ConstraintDescription" : "Must be a valid EC2 HVM instance type." + }, + "ClusterSize": { + "Default": "3", + "MinValue": "3", + "MaxValue": "12", + "Description": "Number of nodes in cluster (3-12).", + "Type": "Number" + }, + "DiscoveryURL": { + "Description": "An unique etcd cluster discovery URL. Grab a new token from https://discovery.etcd.io/new?size=", + "Type": "String" + }, + "AdvertisedIPAddress": { + "Description": "Use 'private' if your etcd cluster is within one region or 'public' if it spans regions or cloud providers.", + "Default": "private", + "AllowedValues": ["private", "public"], + "Type": "String" + }, + "AllowSSHFrom": { + "Description": "The net block (CIDR) that SSH is available to.", + "Default": "0.0.0.0/0", + "Type": "String" + }, + "KeyPair" : { + "Description" : "The name of an EC2 Key Pair to allow SSH access to the instance.", + "Type" : "String" + } + }, + "Resources": { + "FlatcarSecurityGroup": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "Flatcar Linux SecurityGroup", + "SecurityGroupIngress": [ + {"IpProtocol": "tcp", "FromPort": "22", "ToPort": "22", "CidrIp": {"Ref": "AllowSSHFrom"}} + ] + } + }, + "Ingress4001": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupName": {"Ref": "FlatcarSecurityGroup"}, "IpProtocol": "tcp", "FromPort": "4001", "ToPort": "4001", "SourceSecurityGroupId": { + "Fn::GetAtt" : [ "FlatcarSecurityGroup", "GroupId" ] + } + } + }, + "Ingress2379": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupName": {"Ref": "FlatcarSecurityGroup"}, "IpProtocol": "tcp", "FromPort": "2379", "ToPort": "2379", "SourceSecurityGroupId": { + "Fn::GetAtt" : [ "FlatcarSecurityGroup", "GroupId" ] + } + } + }, + "Ingress2380": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupName": {"Ref": "FlatcarSecurityGroup"}, "IpProtocol": "tcp", "FromPort": "2380", "ToPort": "2380", "SourceSecurityGroupId": { + "Fn::GetAtt" : [ "FlatcarSecurityGroup", "GroupId" ] + } + } + }, + "FlatcarServerAutoScale": { + "Type": "AWS::AutoScaling::AutoScalingGroup", + "Properties": { + "AvailabilityZones": {"Fn::GetAZs": ""}, + "LaunchConfigurationName": {"Ref": "FlatcarServerLaunchConfig"}, + "MinSize": "3", + "MaxSize": "12", + "DesiredCapacity": {"Ref": "ClusterSize"}, + "Tags": [ + {"Key": "Name", "Value": { "Ref" : "AWS::StackName" }, "PropagateAtLaunch": true} + ] + } + }, + "FlatcarServerLaunchConfig": { + "Type": "AWS::AutoScaling::LaunchConfiguration", + "Properties": { + "ImageId" : { "Fn::FindInMap" : [ "RegionMap", { "Ref" : "AWS::Region" }, "AMI" ]}, + "InstanceType": {"Ref": "InstanceType"}, + "KeyName": {"Ref": "KeyPair"}, + "SecurityGroups": [{"Ref": "FlatcarSecurityGroup"}], + "UserData" : { "Fn::Base64": + { "Fn::Join": [ "", [ + "#cloud-config\n\n", + "coreos:\n", + " etcd2:\n", + " discovery: ", { "Ref": "DiscoveryURL" }, "\n", + " advertise-client-urls: http://$", { "Ref": "AdvertisedIPAddress" }, "_ipv4:2379\n", + " initial-advertise-peer-urls: http://$", { "Ref": "AdvertisedIPAddress" }, "_ipv4:2380\n", + " listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001\n", + " listen-peer-urls: http://$", { "Ref": "AdvertisedIPAddress" }, "_ipv4:2380\n", + " units:\n", + " - name: etcd2.service\n", + " command: start\n", + " - name: fleet.service\n", + " command: start\n" + ] ] + } + } + } + } + } +} +' +function generate_templates() { + CHANNEL="$1" + BOARD="$2" + + local REGIONS=("eu-central-1" + "ap-northeast-1" + "ap-northeast-2" + # "ap-northeast-3" # Disabled for now because we do not have access + "af-south-1" + "ca-central-1" + "ap-south-1" + "sa-east-1" + "ap-southeast-1" + "ap-southeast-2" + "ap-southeast-3" + "us-east-1" + "us-east-2" + "us-west-2" + "us-west-1" + "eu-west-1" + "eu-west-2" + "eu-west-3" + "eu-north-1" + "eu-south-1" + "ap-east-1" + "me-south-1") + + if [ "${BOARD}" = "amd64-usr" ]; then + ARCHTAG="" + elif [ "${BOARD}" = "arm64-usr" ]; then + ARCHTAG="-arm64" + else + echo "No architecture tag defined for board \"${BOARD}\"" + exit 1 + fi + + TMPFILE=$(mktemp) + + >${TMPFILE} + for region in "${REGIONS[@]}"; do + echo " \"${region}\" : {" >> ${TMPFILE} + echo -n ' "AMI" : ' >> ${TMPFILE} + cat "${CHANNEL}".json | jq ".[] | map(select(.name == \"${region}\")) | .[0] | .\"hvm\"" >> ${TMPFILE} + echo " }," >> ${TMPFILE} + done + + truncate -s-2 ${TMPFILE} + + echo "${TEMPLATE}" | perl -i -0pe "s/###AMIS###/$(cat -- ${TMPFILE})/g" > "cloudformation-files/flatcar-${CHANNEL}${ARCHTAG}-hvm.template" + + rm "${TMPFILE}" +} From 27b62deb81cb6a157d22ce067f860e5b07e5360c Mon Sep 17 00:00:00 2001 From: Mathieu Tortuyaux Date: Wed, 31 Aug 2022 16:59:47 +0200 Subject: [PATCH 2/7] sdk_container: publish the SDK on a Docker registry Signed-off-by: Mathieu Tortuyaux --- ci-automation/release.sh | 38 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/ci-automation/release.sh b/ci-automation/release.sh index d6c17fdae4..6fba16fddb 100644 --- a/ci-automation/release.sh +++ b/ci-automation/release.sh @@ -38,6 +38,16 @@ # Defaults to nothing if not set - in such case, artifacts will not be signed. # If provided, SIGNER environment variable should also be provided, otherwise this environment variable will be ignored. # +# 3. REGISTRY_USERNAME. Environment variable. The username to use for Docker registry login. +# Defaults to nothing if not set - in such case, SDK container will not be pushed. +# +# 4. REGISTRY_PASSWORD. Environment variable. The password to use for Docker registry login. +# Defaults to nothing if not set - in such case, SDK container will not be pushed. +# +# 5. Cloud credentials as secrets via the environment variables AZURE_PROFILE, AZURE_AUTH_CREDENTIALS, +# AWS_CREDENTIALS, AWS_MARKETPLACE_CREDENTIALS, AWS_MARKETPLACE_ARN, AWS_CLOUDFORMATION_CREDENTIALS, +# GCP_JSON_KEY, GOOGLE_RELEASE_CREDENTIALS. +# # OUTPUT: # # 1. The cloud images are published with mantle's plume and ore tools @@ -124,10 +134,34 @@ function _inside_mantle() { for arch in amd64 arm64; do generate_templates "aws-${arch}/flatcar_production_ami_all.json" "${arch}-usr" done - aws s3 cp --recursive --acl public-read cloudformation-files/ s3://flatcar-prod-ami-import-eu-central-1/dist/aws/ + aws s3 cp --recursive --acl public-read cloudformation-files/ "s3://flatcar-prod-ami-import-eu-central-1/dist/aws/" ) } +function publish_sdk() { + local docker_sdk_vernum="$1" + local sdk_name="" + local image_name="" + + # If the registry password or the registry username is not set, we leave early. + [[ -z "${REGISTRY_PASSWORD}" ]] || [[ -z "${REGISTRY_USERNAME}" ]] && return + + ( + # Don't print the password to stderr when logging in + set +x + local container_registry="" + container_registry=$(echo "${sdk_container_common_registry}" | cut -d / -f 1) + echo "${REGISTRY_PASSWORD}" | docker login "${container_registry}" -u "${REGISTRY_USERNAME}" --password-stdin + ) + + # Docker images are pushed in the container registry. + for a in all amd64 arm64; do + sdk_name="flatcar-sdk-${a}" + docker_image_from_registry_or_buildcache "${sdk_name}" "${docker_sdk_vernum}" + docker push "${sdk_container_common_registry}/flatcar-sdk-${a}":"${docker_sdk_vernum}" + done +} + function _release_build_impl() { source sdk_lib/sdk_container_common.sh source ci-automation/ci_automation_common.sh @@ -156,7 +190,7 @@ function _release_build_impl() { sign_artifacts "${SIGNER}" "aws-${arch}/flatcar_production_ami_"*txt "aws-${arch}/flatcar_production_ami_"*json copy_to_buildcache "images/${arch}/${vernum}/" "aws-${arch}/flatcar_production_ami_"*txt* "aws-${arch}/flatcar_production_ami_"*json* done - # TODO: publish SDK container image if SDK version is the same as the image version (e.g., on new major versions) + publish_sdk "${docker_sdk_vernum}" echo "====" echo "Done, now you can copy the images to Origin" echo "====" From 593cf19a7a95ac7f1c7a2d082727f98738837f7d Mon Sep 17 00:00:00 2001 From: Mathieu Tortuyaux Date: Mon, 19 Sep 2022 16:11:41 +0200 Subject: [PATCH 3/7] release: get product IDs from Jenkins the JSON object is passed from the Groovy script to the release script, we just need to extract the correct AWS Marketplace product ID based on the "-". Exception for the stable-amd64 where we also need to get the stable-pro product ID. Signed-off-by: Mathieu Tortuyaux --- ci-automation/release.sh | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/ci-automation/release.sh b/ci-automation/release.sh index 6fba16fddb..51bcf3fd99 100644 --- a/ci-automation/release.sh +++ b/ci-automation/release.sh @@ -98,6 +98,14 @@ function _inside_mantle() { rm -rf "${platform}-${arch}" mkdir "${platform}-${arch}" cd "${platform}-${arch}" + + export product="${CHANNEL}-${arch}" + pid=$(jq -r ".[env.product]" ../product-ids.json) + + # If the channel is 'stable' and the arch 'amd64', we add the stable-pro-amd64 product ID to the product IDs. + # The published AMI ID is the same for both offer. + [[ "${CHANNEL}" == "stable" ]] && [[ "${arch}" == "amd64" ]] && pid="${pid},$(jq -r '.["stable-pro-amd64"]' ../product-ids.json)" + plume pre-release --force \ --debug \ --platform="${platform}" \ @@ -114,6 +122,7 @@ function _inside_mantle() { --aws-marketplace-credentials="${aws_marketplace_credentials_file}" \ --publish-marketplace \ --access-role-arn="${AWS_MARKETPLACE_ARN}" \ + --product-ids="${pid}" \ --azure-profile="${azure_profile_config_file}" \ --azure-auth="${azure_auth_config_file}" \ --gce-json-key="${gcp_json_key_path}" \ From ef8f20f9dd4b2b6848c91fb769f9a899cc8f3106 Mon Sep 17 00:00:00 2001 From: Kai Lueke Date: Tue, 20 Sep 2022 14:14:08 +0200 Subject: [PATCH 4/7] ci-automation/release: Disable GCS auth for plume pre-release When GCS auth is expected, plume would upload the AMI list to GCS. --- ci-automation/release.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ci-automation/release.sh b/ci-automation/release.sh index 51bcf3fd99..6ccd127d90 100644 --- a/ci-automation/release.sh +++ b/ci-automation/release.sh @@ -75,6 +75,7 @@ function _inside_mantle() { source sdk_lib/sdk_container_common.sh source ci-automation/ci_automation_common.sh source sdk_container/.repo/manifests/version.txt + # Needed because we are not the SDK container here source sdk_container/.env CHANNEL="$(get_git_channel)" VERSION="${FLATCAR_VERSION}" @@ -106,12 +107,17 @@ function _inside_mantle() { # The published AMI ID is the same for both offer. [[ "${CHANNEL}" == "stable" ]] && [[ "${arch}" == "amd64" ]] && pid="${pid},$(jq -r '.["stable-pro-amd64"]' ../product-ids.json)" + # For pre-release we don't use the Google Cloud token because it's not needed + # and we don't want to upload the AMIs to GCS anymore + # (change https://github.com/flatcar/mantle/blob/bc6bc232677c45e389feb221da295cc674882f8c/cmd/plume/prerelease.go#L663-L667 + # if you want to add GCP release code in plume pre-release instead of plume release) plume pre-release --force \ --debug \ --platform="${platform}" \ --aws-credentials="${aws_credentials_config_file}" \ --azure-profile="${azure_profile_config_file}" \ --azure-auth="${azure_auth_config_file}" \ + --gce-json-key=none \ --board="${arch}-usr" \ --channel="${CHANNEL}" \ --version="${FLATCAR_VERSION}" \ From 79d89faf918abb1295d635d8edc09b1ecedf423d Mon Sep 17 00:00:00 2001 From: Kai Lueke Date: Tue, 20 Sep 2022 17:06:15 +0200 Subject: [PATCH 5/7] ci-automation/secret_to_file: Fix usage from subshell This failed when used from ( secret_to_file ... VAR ; cat $VAR ) because ( ) starts a new subshell PID and secret_to_file's returned /proc/PID/fd/X path was then using the wrong PID. --- ci-automation/ci_automation_common.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ci-automation/ci_automation_common.sh b/ci-automation/ci_automation_common.sh index d1db1aa8f6..488b4d6a1c 100644 --- a/ci-automation/ci_automation_common.sh +++ b/ci-automation/ci_automation_common.sh @@ -310,7 +310,8 @@ function secret_to_file() { exec {fd}<>"${tmpfile}" rm -f "${tmpfile}" echo "${secret}" | base64 --decode >&${fd} - config_ref="/proc/${$}/fd/${fd}" + # Use BASHPID because we may be in a subshell but $$ is only the main shell's PID + config_ref="/proc/${BASHPID}/fd/${fd}" } # -- From ffee812d322afa0ddb236954e4e6e33faf961e87 Mon Sep 17 00:00:00 2001 From: Kai Lueke Date: Thu, 22 Sep 2022 13:14:35 +0200 Subject: [PATCH 6/7] ci-automation/release: Run plume release only once We need to run plume only once for each arch, move it out of the loop. Also, address some smaller things that shellcheck complains about. --- ci-automation/release.sh | 74 +++++++++++++++++++++++----------------- 1 file changed, 43 insertions(+), 31 deletions(-) diff --git a/ci-automation/release.sh b/ci-automation/release.sh index 6ccd127d90..c4453d8655 100644 --- a/ci-automation/release.sh +++ b/ci-automation/release.sh @@ -98,14 +98,7 @@ function _inside_mantle() { # for later push to bincache rm -rf "${platform}-${arch}" mkdir "${platform}-${arch}" - cd "${platform}-${arch}" - - export product="${CHANNEL}-${arch}" - pid=$(jq -r ".[env.product]" ../product-ids.json) - - # If the channel is 'stable' and the arch 'amd64', we add the stable-pro-amd64 product ID to the product IDs. - # The published AMI ID is the same for both offer. - [[ "${CHANNEL}" == "stable" ]] && [[ "${arch}" == "amd64" ]] && pid="${pid},$(jq -r '.["stable-pro-amd64"]' ../product-ids.json)" + pushd "${platform}-${arch}" # For pre-release we don't use the Google Cloud token because it's not needed # and we don't want to upload the AMIs to GCS anymore @@ -122,23 +115,38 @@ function _inside_mantle() { --channel="${CHANNEL}" \ --version="${FLATCAR_VERSION}" \ --write-image-list="images.json" - plume release \ - --debug \ - --aws-credentials="${aws_credentials_config_file}" \ - --aws-marketplace-credentials="${aws_marketplace_credentials_file}" \ - --publish-marketplace \ - --access-role-arn="${AWS_MARKETPLACE_ARN}" \ - --product-ids="${pid}" \ - --azure-profile="${azure_profile_config_file}" \ - --azure-auth="${azure_auth_config_file}" \ - --gce-json-key="${gcp_json_key_path}" \ - --gce-release-key="${google_release_credentials_file}" \ - --board="${arch}-usr" \ - --channel="${CHANNEL}" \ - --version="${VERSION}" - cd .. + popd done done + for arch in amd64 arm64; do + # Create a folder where plume stores any temporarily downloaded files + rm -rf "release-${arch}" + mkdir "release-${arch}" + pushd "release-${arch}" + + export product="${CHANNEL}-${arch}" + pid=$(jq -r ".[env.product]" ../product-ids.json) + + # If the channel is 'stable' and the arch 'amd64', we add the stable-pro-amd64 product ID to the product IDs. + # The published AMI ID is the same for both offer. + [[ "${CHANNEL}" == "stable" ]] && [[ "${arch}" == "amd64" ]] && pid="${pid},$(jq -r '.["stable-pro-amd64"]' ../product-ids.json)" + + plume release \ + --debug \ + --aws-credentials="${aws_credentials_config_file}" \ + --aws-marketplace-credentials="${aws_marketplace_credentials_file}" \ + --publish-marketplace \ + --access-role-arn="${AWS_MARKETPLACE_ARN}" \ + --product-ids="${pid}" \ + --azure-profile="${azure_profile_config_file}" \ + --azure-auth="${azure_auth_config_file}" \ + --gce-json-key="${gcp_json_key_path}" \ + --gce-release-key="${google_release_credentials_file}" \ + --board="${arch}-usr" \ + --channel="${CHANNEL}" \ + --version="${VERSION}" + popd + done # Future: move this to "plume release", in the past this was done in "update-cloudformation-template" aws_cloudformation_credentials_file="" @@ -147,7 +155,7 @@ function _inside_mantle() { rm -rf cloudformation-files mkdir cloudformation-files for arch in amd64 arm64; do - generate_templates "aws-${arch}/flatcar_production_ami_all.json" "${arch}-usr" + generate_templates "aws-${arch}/flatcar_production_ami_all.json" "${CHANNEL}" "${arch}-usr" done aws s3 cp --recursive --acl public-read cloudformation-files/ "s3://flatcar-prod-ami-import-eu-central-1/dist/aws/" ) @@ -156,7 +164,6 @@ function _inside_mantle() { function publish_sdk() { local docker_sdk_vernum="$1" local sdk_name="" - local image_name="" # If the registry password or the registry username is not set, we leave early. [[ -z "${REGISTRY_PASSWORD}" ]] || [[ -z "${REGISTRY_USERNAME}" ]] && return @@ -173,7 +180,7 @@ function publish_sdk() { for a in all amd64 arm64; do sdk_name="flatcar-sdk-${a}" docker_image_from_registry_or_buildcache "${sdk_name}" "${docker_sdk_vernum}" - docker push "${sdk_container_common_registry}/flatcar-sdk-${a}":"${docker_sdk_vernum}" + docker push "${sdk_container_common_registry}/flatcar-sdk-${a}:${docker_sdk_vernum}" done } @@ -185,9 +192,11 @@ function _release_build_impl() { source sdk_container/.repo/manifests/version.txt local sdk_version="${FLATCAR_SDK_VERSION}" - local docker_sdk_vernum="$(vernum_to_docker_image_version "${sdk_version}")" + local docker_sdk_vernum="" + docker_sdk_vernum="$(vernum_to_docker_image_version "${sdk_version}")" local vernum="${FLATCAR_VERSION}" - local docker_vernum="$(vernum_to_docker_image_version "${vernum}")" + local docker_vernum="" + docker_vernum="$(vernum_to_docker_image_version "${vernum}")" local container_name="flatcar-publish-${docker_vernum}" local mantle_ref @@ -338,8 +347,11 @@ TEMPLATE=' } ' function generate_templates() { - CHANNEL="$1" - BOARD="$2" + local IFILE="$1" + local CHANNEL="$2" + local BOARD="$3" + local TMPFILE="" + local ARCHTAG="" local REGIONS=("eu-central-1" "ap-northeast-1" @@ -379,7 +391,7 @@ function generate_templates() { for region in "${REGIONS[@]}"; do echo " \"${region}\" : {" >> ${TMPFILE} echo -n ' "AMI" : ' >> ${TMPFILE} - cat "${CHANNEL}".json | jq ".[] | map(select(.name == \"${region}\")) | .[0] | .\"hvm\"" >> ${TMPFILE} + cat "${IFILE}" | jq ".[] | map(select(.name == \"${region}\")) | .[0] | .\"hvm\"" >> ${TMPFILE} echo " }," >> ${TMPFILE} done From 3fef1eb80153e4a33bfbc145e3436c13e74d8416 Mon Sep 17 00:00:00 2001 From: Kai Lueke Date: Thu, 22 Sep 2022 17:34:42 +0200 Subject: [PATCH 7/7] ci-automation/release: Set up secret envs --- ci-automation/release.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ci-automation/release.sh b/ci-automation/release.sh index c4453d8655..5d8fa263a9 100644 --- a/ci-automation/release.sh +++ b/ci-automation/release.sh @@ -191,6 +191,8 @@ function _release_build_impl() { init_submodules source sdk_container/.repo/manifests/version.txt + # Needed because we are not the SDK container here + source sdk_container/.env local sdk_version="${FLATCAR_SDK_VERSION}" local docker_sdk_vernum="" docker_sdk_vernum="$(vernum_to_docker_image_version "${sdk_version}")"