flatcar-eks: facilitate provisioning EKS workers

This change adds a new flatcar-eks package, that ships with all scripts
needed to join a Flatcar instance to an EKS cluster.

It includes the bootstrap.sh script used on Amazon Linux, to keep
compatibility with existing provisioning tools.

The package is included from the oem-ec2-compat package, when the board
is aws_pro, and it's part of board-packages, so that it's built by the
os/board/packages job.
This commit is contained in:
Margarita Manterola 2021-01-15 17:54:27 +01:00
parent 90a55e6aac
commit 9b18ee2ede
13 changed files with 1132 additions and 4 deletions

View File

@ -0,0 +1,98 @@
--- orig/bootstrap.sh 2021-01-21 15:07:34.749539965 +0100
+++ flatcar/bootstrap.sh 2021-01-22 12:21:58.080452841 +0100
@@ -202,6 +202,9 @@
exit 1
fi
+# Store the cluster name in a config file, to use it from other sources
+mkdir -p /etc/eks
+echo "CLUSTER_NAME=\"${CLUSTER_NAME}\"" > /etc/eks/cluster.conf
TOKEN=$(curl -X PUT -H "X-aws-ec2-metadata-token-ttl-seconds: 600" "http://169.254.169.254/latest/api/token")
AWS_DEFAULT_REGION=$(curl -s --retry 5 -H "X-aws-ec2-metadata-token: $TOKEN" http://169.254.169.254/latest/dynamic/instance-identity/document | jq .region -r)
@@ -218,7 +221,8 @@
PAUSE_CONTAINER="$PAUSE_CONTAINER_IMAGE:$PAUSE_CONTAINER_VERSION"
### kubelet kubeconfig
-
+shopt -s expand_aliases
+alias aws="docker run --rm --network host amazon/aws-cli"
CA_CERTIFICATE_DIRECTORY=/etc/kubernetes/pki
CA_CERTIFICATE_FILE_PATH=$CA_CERTIFICATE_DIRECTORY/ca.crt
mkdir -p $CA_CERTIFICATE_DIRECTORY
@@ -258,9 +262,9 @@
echo $B64_CLUSTER_CA | base64 -d > $CA_CERTIFICATE_FILE_PATH
-sed -i s,CLUSTER_NAME,$CLUSTER_NAME,g /var/lib/kubelet/kubeconfig
-sed -i s,MASTER_ENDPOINT,$APISERVER_ENDPOINT,g /var/lib/kubelet/kubeconfig
-sed -i s,AWS_REGION,$AWS_DEFAULT_REGION,g /var/lib/kubelet/kubeconfig
+sed -i s,CLUSTER_NAME,$CLUSTER_NAME,g /usr/share/oem/eks/kubelet-kubeconfig
+sed -i s,MASTER_ENDPOINT,$APISERVER_ENDPOINT,g /usr/share/oem/eks/kubelet-kubeconfig
+sed -i s,AWS_REGION,$AWS_DEFAULT_REGION,g /usr/share/oem/eks/kubelet-kubeconfig
### kubelet.service configuration
if [[ -z "${DNS_CLUSTER_IP}" ]]; then
@@ -279,7 +283,7 @@
DNS_CLUSTER_IP="${DNS_CLUSTER_IP}"
fi
-KUBELET_CONFIG=/etc/kubernetes/kubelet/kubelet-config.json
+KUBELET_CONFIG=/usr/share/oem/eks/kubelet-config.json
echo "$(jq ".clusterDNS=[\"$DNS_CLUSTER_IP\"]" $KUBELET_CONFIG)" > $KUBELET_CONFIG
INTERNAL_IP=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/local-ipv4)
@@ -291,7 +295,7 @@
# with this formula when scheduling pods: Allocatable = Capacity - Reserved - Eviction Threshold.
#calculate the max number of pods per instance type
-MAX_PODS_FILE="/etc/eks/eni-max-pods.txt"
+MAX_PODS_FILE="/usr/share/oem/eks/eni-max-pods.txt"
set +o pipefail
MAX_PODS=$(cat $MAX_PODS_FILE | awk "/^${INSTANCE_TYPE:-unset}/"' { print $2 }')
set -o pipefail
@@ -316,6 +320,8 @@
fi
fi
+cp /usr/share/oem/eks/kubelet.service /etc/systemd/system/
+
mkdir -p /etc/systemd/system/kubelet.service.d
cat <<EOF > /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf
@@ -330,10 +336,15 @@
EOF
fi
+
# Replace with custom docker config contents.
if [[ -n "$DOCKER_CONFIG_JSON" ]]; then
echo "$DOCKER_CONFIG_JSON" > /etc/docker/daemon.json
systemctl restart docker
+else
+ # Copy the docker config shipped in /usr/share/oem
+ cp /usr/share/oem/eks/docker-daemon.json /etc/docker/daemon.json
+ systemctl restart docker
fi
if [[ "$ENABLE_DOCKER_BRIDGE" = "true" ]]; then
@@ -343,7 +354,19 @@
systemctl restart docker
fi
+# sysctl tweaking
+cat <<EOF | sudo tee -a /etc/sysctl.d/99-kubelet.conf
+# Needed for protectKernelDefaults=true
+vm.overcommit_memory=1
+kernel.panic=10
+kernel.panic_on_oops=1
+# Other kubelet tweaks
+fs.inotify.max_user_watches=524288
+vm.max_map_count=524288
+EOF
+
systemctl daemon-reload
+systemctl restart systemd-sysctl
systemctl enable kubelet
systemctl start kubelet

View File

@ -0,0 +1,376 @@
#!/usr/bin/env bash
set -o pipefail
set -o nounset
set -o errexit
err_report() {
echo "Exited with error on line $1"
}
trap 'err_report $LINENO' ERR
IFS=$'\n\t'
function print_help {
echo "usage: $0 [options] <cluster-name>"
echo "Bootstraps an instance into an EKS cluster"
echo ""
echo "-h,--help print this help"
echo "--use-max-pods Sets --max-pods for the kubelet when true. (default: true)"
echo "--b64-cluster-ca The base64 encoded cluster CA content. Only valid when used with --apiserver-endpoint. Bypasses calling \"aws eks describe-cluster\""
echo "--apiserver-endpoint The EKS cluster API Server endpoint. Only valid when used with --b64-cluster-ca. Bypasses calling \"aws eks describe-cluster\""
echo "--kubelet-extra-args Extra arguments to add to the kubelet. Useful for adding labels or taints."
echo "--enable-docker-bridge Restores the docker default bridge network. (default: false)"
echo "--aws-api-retry-attempts Number of retry attempts for AWS API call (DescribeCluster) (default: 3)"
echo "--docker-config-json The contents of the /etc/docker/daemon.json file. Useful if you want a custom config differing from the default one in the AMI"
echo "--dns-cluster-ip Overrides the IP address to use for DNS queries within the cluster. Defaults to 10.100.0.10 or 172.20.0.10 based on the IP address of the primary interface"
echo "--pause-container-account The AWS account (number) to pull the pause container from"
echo "--pause-container-version The tag of the pause container"
}
POSITIONAL=()
while [[ $# -gt 0 ]]; do
key="$1"
case $key in
-h|--help)
print_help
exit 1
;;
--use-max-pods)
USE_MAX_PODS="$2"
shift
shift
;;
--b64-cluster-ca)
B64_CLUSTER_CA=$2
shift
shift
;;
--apiserver-endpoint)
APISERVER_ENDPOINT=$2
shift
shift
;;
--kubelet-extra-args)
KUBELET_EXTRA_ARGS=$2
shift
shift
;;
--enable-docker-bridge)
ENABLE_DOCKER_BRIDGE=$2
shift
shift
;;
--aws-api-retry-attempts)
API_RETRY_ATTEMPTS=$2
shift
shift
;;
--docker-config-json)
DOCKER_CONFIG_JSON=$2
shift
shift
;;
--pause-container-account)
PAUSE_CONTAINER_ACCOUNT=$2
shift
shift
;;
--pause-container-version)
PAUSE_CONTAINER_VERSION=$2
shift
shift
;;
--dns-cluster-ip)
DNS_CLUSTER_IP=$2
shift
shift
;;
*) # unknown option
POSITIONAL+=("$1") # save it in an array for later
shift # past argument
;;
esac
done
set +u
set -- "${POSITIONAL[@]}" # restore positional parameters
CLUSTER_NAME="$1"
set -u
USE_MAX_PODS="${USE_MAX_PODS:-true}"
B64_CLUSTER_CA="${B64_CLUSTER_CA:-}"
APISERVER_ENDPOINT="${APISERVER_ENDPOINT:-}"
SERVICE_IPV4_CIDR="${SERVICE_IPV4_CIDR:-}"
DNS_CLUSTER_IP="${DNS_CLUSTER_IP:-}"
KUBELET_EXTRA_ARGS="${KUBELET_EXTRA_ARGS:-}"
ENABLE_DOCKER_BRIDGE="${ENABLE_DOCKER_BRIDGE:-false}"
API_RETRY_ATTEMPTS="${API_RETRY_ATTEMPTS:-3}"
DOCKER_CONFIG_JSON="${DOCKER_CONFIG_JSON:-}"
PAUSE_CONTAINER_VERSION="${PAUSE_CONTAINER_VERSION:-3.1-eksbuild.1}"
function get_pause_container_account_for_region () {
local region="$1"
case "${region}" in
ap-east-1)
echo "${PAUSE_CONTAINER_ACCOUNT:-800184023465}";;
me-south-1)
echo "${PAUSE_CONTAINER_ACCOUNT:-558608220178}";;
cn-north-1)
echo "${PAUSE_CONTAINER_ACCOUNT:-918309763551}";;
cn-northwest-1)
echo "${PAUSE_CONTAINER_ACCOUNT:-961992271922}";;
us-gov-west-1)
echo "${PAUSE_CONTAINER_ACCOUNT:-013241004608}";;
us-gov-east-1)
echo "${PAUSE_CONTAINER_ACCOUNT:-151742754352}";;
af-south-1)
echo "${PAUSE_CONTAINER_ACCOUNT:-877085696533}";;
eu-south-1)
echo "${PAUSE_CONTAINER_ACCOUNT:-590381155156}";;
*)
echo "${PAUSE_CONTAINER_ACCOUNT:-602401143452}";;
esac
}
# Helper function which calculates the amount of the given resource (either CPU or memory)
# to reserve in a given resource range, specified by a start and end of the range and a percentage
# of the resource to reserve. Note that we return zero if the start of the resource range is
# greater than the total resource capacity on the node. Additionally, if the end range exceeds the total
# resource capacity of the node, we use the total resource capacity as the end of the range.
# Args:
# $1 total available resource on the worker node in input unit (either millicores for CPU or Mi for memory)
# $2 start of the resource range in input unit
# $3 end of the resource range in input unit
# $4 percentage of range to reserve in percent*100 (to allow for two decimal digits)
# Return:
# amount of resource to reserve in input unit
get_resource_to_reserve_in_range() {
local total_resource_on_instance=$1
local start_range=$2
local end_range=$3
local percentage=$4
resources_to_reserve="0"
if (( $total_resource_on_instance > $start_range )); then
resources_to_reserve=$(((($total_resource_on_instance < $end_range ? \
$total_resource_on_instance : $end_range) - $start_range) * $percentage / 100 / 100))
fi
echo $resources_to_reserve
}
# Calculates the amount of memory to reserve for kubeReserved in mebibytes. KubeReserved is a function of pod
# density so we are calculating the amount of memory to reserve for Kubernetes systems daemons by
# considering the maximum number of pods this instance type supports.
# Args:
# $1 the max number of pods per instance type (MAX_PODS) based on values from /etc/eks/eni-max-pods.txt
# Return:
# memory to reserve in Mi for the kubelet
get_memory_mebibytes_to_reserve() {
local max_num_pods=$1
memory_to_reserve=$((11 * $max_num_pods + 255))
echo $memory_to_reserve
}
# Calculates the amount of CPU to reserve for kubeReserved in millicores from the total number of vCPUs available on the instance.
# From the total core capacity of this worker node, we calculate the CPU resources to reserve by reserving a percentage
# of the available cores in each range up to the total number of cores available on the instance.
# We are using these CPU ranges from GKE (https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture#node_allocatable):
# 6% of the first core
# 1% of the next core (up to 2 cores)
# 0.5% of the next 2 cores (up to 4 cores)
# 0.25% of any cores above 4 cores
# Return:
# CPU resources to reserve in millicores (m)
get_cpu_millicores_to_reserve() {
local total_cpu_on_instance=$(($(nproc) * 1000))
local cpu_ranges=(0 1000 2000 4000 $total_cpu_on_instance)
local cpu_percentage_reserved_for_ranges=(600 100 50 25)
cpu_to_reserve="0"
for i in ${!cpu_percentage_reserved_for_ranges[@]}; do
local start_range=${cpu_ranges[$i]}
local end_range=${cpu_ranges[(($i+1))]}
local percentage_to_reserve_for_range=${cpu_percentage_reserved_for_ranges[$i]}
cpu_to_reserve=$(($cpu_to_reserve + \
$(get_resource_to_reserve_in_range $total_cpu_on_instance $start_range $end_range $percentage_to_reserve_for_range)))
done
echo $cpu_to_reserve
}
if [ -z "$CLUSTER_NAME" ]; then
echo "CLUSTER_NAME is not defined"
exit 1
fi
TOKEN=$(curl -X PUT -H "X-aws-ec2-metadata-token-ttl-seconds: 600" "http://169.254.169.254/latest/api/token")
AWS_DEFAULT_REGION=$(curl -s --retry 5 -H "X-aws-ec2-metadata-token: $TOKEN" http://169.254.169.254/latest/dynamic/instance-identity/document | jq .region -r)
AWS_SERVICES_DOMAIN=$(curl -s --retry 5 -H "X-aws-ec2-metadata-token: $TOKEN" http://169.254.169.254/2018-09-24/meta-data/services/domain)
MACHINE=$(uname -m)
if [[ "$MACHINE" != "x86_64" && "$MACHINE" != "aarch64" ]]; then
echo "Unknown machine architecture '$MACHINE'" >&2
exit 1
fi
PAUSE_CONTAINER_ACCOUNT=$(get_pause_container_account_for_region "${AWS_DEFAULT_REGION}")
PAUSE_CONTAINER_IMAGE=${PAUSE_CONTAINER_IMAGE:-$PAUSE_CONTAINER_ACCOUNT.dkr.ecr.$AWS_DEFAULT_REGION.$AWS_SERVICES_DOMAIN/eks/pause}
PAUSE_CONTAINER="$PAUSE_CONTAINER_IMAGE:$PAUSE_CONTAINER_VERSION"
### kubelet kubeconfig
CA_CERTIFICATE_DIRECTORY=/etc/kubernetes/pki
CA_CERTIFICATE_FILE_PATH=$CA_CERTIFICATE_DIRECTORY/ca.crt
mkdir -p $CA_CERTIFICATE_DIRECTORY
if [[ -z "${B64_CLUSTER_CA}" ]] || [[ -z "${APISERVER_ENDPOINT}" ]]; then
DESCRIBE_CLUSTER_RESULT="/tmp/describe_cluster_result.txt"
# Retry the DescribeCluster API for API_RETRY_ATTEMPTS
for attempt in `seq 0 $API_RETRY_ATTEMPTS`; do
rc=0
if [[ $attempt -gt 0 ]]; then
echo "Attempt $attempt of $API_RETRY_ATTEMPTS"
fi
aws eks wait cluster-active \
--region=${AWS_DEFAULT_REGION} \
--name=${CLUSTER_NAME}
aws eks describe-cluster \
--region=${AWS_DEFAULT_REGION} \
--name=${CLUSTER_NAME} \
--output=text \
--query 'cluster.{certificateAuthorityData: certificateAuthority.data, endpoint: endpoint, kubernetesNetworkConfig: kubernetesNetworkConfig.serviceIpv4Cidr}' > $DESCRIBE_CLUSTER_RESULT || rc=$?
if [[ $rc -eq 0 ]]; then
break
fi
if [[ $attempt -eq $API_RETRY_ATTEMPTS ]]; then
exit $rc
fi
jitter=$((1 + RANDOM % 10))
sleep_sec="$(( $(( 5 << $((1+$attempt)) )) + $jitter))"
sleep $sleep_sec
done
B64_CLUSTER_CA=$(cat $DESCRIBE_CLUSTER_RESULT | awk '{print $1}')
APISERVER_ENDPOINT=$(cat $DESCRIBE_CLUSTER_RESULT | awk '{print $2}')
SERVICE_IPV4_CIDR=$(cat $DESCRIBE_CLUSTER_RESULT | awk '{print $3}')
fi
echo $B64_CLUSTER_CA | base64 -d > $CA_CERTIFICATE_FILE_PATH
sed -i s,CLUSTER_NAME,$CLUSTER_NAME,g /var/lib/kubelet/kubeconfig
sed -i s,MASTER_ENDPOINT,$APISERVER_ENDPOINT,g /var/lib/kubelet/kubeconfig
sed -i s,AWS_REGION,$AWS_DEFAULT_REGION,g /var/lib/kubelet/kubeconfig
### kubelet.service configuration
if [[ -z "${DNS_CLUSTER_IP}" ]]; then
if [[ ! -z "${SERVICE_IPV4_CIDR}" ]] && [[ "${SERVICE_IPV4_CIDR}" != "None" ]] ; then
#Sets the DNS Cluster IP address that would be chosen from the serviceIpv4Cidr. (x.y.z.10)
DNS_CLUSTER_IP=${SERVICE_IPV4_CIDR%.*}.10
else
MAC=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/ -s | head -n 1 | sed 's/\/$//')
TEN_RANGE=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/$MAC/vpc-ipv4-cidr-blocks | grep -c '^10\..*' || true )
DNS_CLUSTER_IP=10.100.0.10
if [[ "$TEN_RANGE" != "0" ]]; then
DNS_CLUSTER_IP=172.20.0.10
fi
fi
else
DNS_CLUSTER_IP="${DNS_CLUSTER_IP}"
fi
KUBELET_CONFIG=/etc/kubernetes/kubelet/kubelet-config.json
echo "$(jq ".clusterDNS=[\"$DNS_CLUSTER_IP\"]" $KUBELET_CONFIG)" > $KUBELET_CONFIG
INTERNAL_IP=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/local-ipv4)
INSTANCE_TYPE=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/instance-type)
# Sets kubeReserved and evictionHard in /etc/kubernetes/kubelet/kubelet-config.json for worker nodes. The following two function
# calls calculate the CPU and memory resources to reserve for kubeReserved based on the instance type of the worker node.
# Note that allocatable memory and CPU resources on worker nodes is calculated by the Kubernetes scheduler
# with this formula when scheduling pods: Allocatable = Capacity - Reserved - Eviction Threshold.
#calculate the max number of pods per instance type
MAX_PODS_FILE="/etc/eks/eni-max-pods.txt"
set +o pipefail
MAX_PODS=$(cat $MAX_PODS_FILE | awk "/^${INSTANCE_TYPE:-unset}/"' { print $2 }')
set -o pipefail
if [ -z "$MAX_PODS" ] || [ -z "$INSTANCE_TYPE" ]; then
echo "No entry for type '$INSTANCE_TYPE' in $MAX_PODS_FILE"
exit 1
fi
# calculates the amount of each resource to reserve
mebibytes_to_reserve=$(get_memory_mebibytes_to_reserve $MAX_PODS)
cpu_millicores_to_reserve=$(get_cpu_millicores_to_reserve)
# writes kubeReserved and evictionHard to the kubelet-config using the amount of CPU and memory to be reserved
echo "$(jq '. += {"evictionHard": {"memory.available": "100Mi", "nodefs.available": "10%", "nodefs.inodesFree": "5%"}}' $KUBELET_CONFIG)" > $KUBELET_CONFIG
echo "$(jq --arg mebibytes_to_reserve "${mebibytes_to_reserve}Mi" --arg cpu_millicores_to_reserve "${cpu_millicores_to_reserve}m" \
'. += {kubeReserved: {"cpu": $cpu_millicores_to_reserve, "ephemeral-storage": "1Gi", "memory": $mebibytes_to_reserve}}' $KUBELET_CONFIG)" > $KUBELET_CONFIG
if [[ "$USE_MAX_PODS" = "true" ]]; then
if [[ -n "$MAX_PODS" ]]; then
echo "$(jq ".maxPods=$MAX_PODS" $KUBELET_CONFIG)" > $KUBELET_CONFIG
else
echo "No entry for $INSTANCE_TYPE in $MAX_PODS_FILE. Not setting max pods for kubelet"
fi
fi
mkdir -p /etc/systemd/system/kubelet.service.d
cat <<EOF > /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf
[Service]
Environment='KUBELET_ARGS=--node-ip=$INTERNAL_IP --pod-infra-container-image=$PAUSE_CONTAINER'
EOF
if [[ -n "$KUBELET_EXTRA_ARGS" ]]; then
cat <<EOF > /etc/systemd/system/kubelet.service.d/30-kubelet-extra-args.conf
[Service]
Environment='KUBELET_EXTRA_ARGS=$KUBELET_EXTRA_ARGS'
EOF
fi
# Replace with custom docker config contents.
if [[ -n "$DOCKER_CONFIG_JSON" ]]; then
echo "$DOCKER_CONFIG_JSON" > /etc/docker/daemon.json
systemctl restart docker
fi
if [[ "$ENABLE_DOCKER_BRIDGE" = "true" ]]; then
# Enabling the docker bridge network. We have to disable live-restore as it
# prevents docker from recreating the default bridge network on restart
echo "$(jq '.bridge="docker0" | ."live-restore"=false' /etc/docker/daemon.json)" > /etc/docker/daemon.json
systemctl restart docker
fi
systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet
# gpu boost clock
if command -v nvidia-smi &>/dev/null ; then
echo "nvidia-smi found"
sudo nvidia-smi -pm 1 # set persistence mode
sudo nvidia-smi --auto-boost-default=0
GPUNAME=$(nvidia-smi -L | head -n1)
echo $GPUNAME
# set application clock to maximum
if [[ $GPUNAME == *"A100"* ]]; then
nvidia-smi -ac 1215,1410
elif [[ $GPUNAME == *"V100"* ]]; then
nvidia-smi -ac 877,1530
elif [[ $GPUNAME == *"K80"* ]]; then
nvidia-smi -ac 2505,875
elif [[ $GPUNAME == *"T4"* ]]; then
nvidia-smi -ac 5001,1590
elif [[ $GPUNAME == *"M60"* ]]; then
nvidia-smi -ac 2505,1177
else
echo "unsupported gpu"
fi
else
echo "nvidia-smi not found"
fi

View File

@ -0,0 +1,17 @@
{
"bridge": "none",
"log-driver": "json-file",
"log-opts": {
"max-size": "10m",
"max-file": "10"
},
"live-restore": true,
"max-concurrent-downloads": 10,
"default-ulimits": {
"memlock": {
"Hard": -1,
"Name": "memlock",
"Soft": -1
}
}
}

View File

@ -0,0 +1,52 @@
#!/bin/bash
set -euo pipefail
# Get the cluster name from /etc/eks/cluster.conf
. /etc/eks/cluster.conf
if [[ -z "${CLUSTER_NAME}" ]]; then
echo "No cluster name found. Aborting"
exit 1
fi
# Query the Kubernetes version of the cluster
mkdir -p /opt/eks
shopt -s expand_aliases
alias aws="docker run --rm --network host -v /opt/eks:/eks amazon/aws-cli"
CLUSTER_VERSION=$(aws eks describe-cluster --name "${CLUSTER_NAME}" --output text --query cluster.version)
if [[ -z "${CLUSTER_VERSION}" ]]; then
echo "No cluster version found. Aborting"
exit 1
fi
# Select the right path depending on the Kubernetes version.
case $CLUSTER_VERSION in
1.18)
S3_PATH="1.18.9/2020-11-02"
;;
1.17)
S3_PATH="1.17.12/2020-11-02"
;;
1.16)
S3_PATH="1.16.15/2020-11-02"
;;
1.15)
S3_PATH="1.15.12/2020-11-02"
;;
*)
echo "Unsupported Kubernetes version"
exit 1
;;
esac
# Sync the contents of the corresponding EKS bucket
aws s3 sync s3://amazon-eks/${S3_PATH}/bin/linux/amd64/ /eks/
# Install AWS CNI
mkdir -p /opt/cni/bin /etc/cni/net.d
tar -C /opt/cni/bin -zxvf /opt/eks/cni-amd64-v0.6.0.tgz
tar -C /opt/cni/bin -zxvf /opt/eks/cni-plugins-linux-amd64-v0.8.6.tgz
# Make binaries executable
chmod +x /opt/eks/kubelet
chmod +x /opt/eks/aws-iam-authenticator

View File

@ -0,0 +1,403 @@
# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# This file was generated at 2020-12-12T18:33:04-03:00
#
# Mapping is calculated from AWS EC2 API using the following formula:
# * First IP on each ENI is not used for pods
# * +2 for the pods that use host-networking (AWS CNI and kube-proxy)
#
# # of ENI * (# of IPv4 per ENI - 1) + 2
#
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI
#
a1.2xlarge 58
a1.4xlarge 234
a1.large 29
a1.medium 8
a1.metal 234
a1.xlarge 58
c1.medium 12
c1.xlarge 58
c3.2xlarge 58
c3.4xlarge 234
c3.8xlarge 234
c3.large 29
c3.xlarge 58
c4.2xlarge 58
c4.4xlarge 234
c4.8xlarge 234
c4.large 29
c4.xlarge 58
c5.12xlarge 234
c5.18xlarge 737
c5.24xlarge 737
c5.2xlarge 58
c5.4xlarge 234
c5.9xlarge 234
c5.large 29
c5.metal 737
c5.xlarge 58
c5a.12xlarge 234
c5a.16xlarge 737
c5a.24xlarge 737
c5a.2xlarge 58
c5a.4xlarge 234
c5a.8xlarge 234
c5a.large 29
c5a.metal 737
c5a.xlarge 58
c5ad.12xlarge 234
c5ad.16xlarge 737
c5ad.24xlarge 737
c5ad.2xlarge 58
c5ad.4xlarge 234
c5ad.8xlarge 234
c5ad.large 29
c5ad.metal 737
c5ad.xlarge 58
c5d.12xlarge 234
c5d.18xlarge 737
c5d.24xlarge 737
c5d.2xlarge 58
c5d.4xlarge 234
c5d.9xlarge 234
c5d.large 29
c5d.metal 737
c5d.xlarge 58
c5n.18xlarge 737
c5n.2xlarge 58
c5n.4xlarge 234
c5n.9xlarge 234
c5n.large 29
c5n.metal 737
c5n.xlarge 58
c6g.12xlarge 234
c6g.16xlarge 737
c6g.2xlarge 58
c6g.4xlarge 234
c6g.8xlarge 234
c6g.large 29
c6g.medium 8
c6g.metal 737
c6g.xlarge 58
c6gd.12xlarge 234
c6gd.16xlarge 737
c6gd.2xlarge 58
c6gd.4xlarge 234
c6gd.8xlarge 234
c6gd.large 29
c6gd.medium 8
c6gd.metal 737
c6gd.xlarge 58
cc2.8xlarge 234
cr1.8xlarge 234
d2.2xlarge 58
d2.4xlarge 234
d2.8xlarge 234
d2.xlarge 58
d3.2xlarge 18
d3.4xlarge 38
d3.8xlarge 59
d3.xlarge 10
d3en.12xlarge 89
d3en.2xlarge 18
d3en.4xlarge 38
d3en.6xlarge 58
d3en.8xlarge 78
d3en.xlarge 10
f1.16xlarge 394
f1.2xlarge 58
f1.4xlarge 234
g2.2xlarge 58
g2.8xlarge 234
g3.16xlarge 737
g3.4xlarge 234
g3.8xlarge 234
g3s.xlarge 58
g4ad.16xlarge 234
g4ad.4xlarge 29
g4ad.8xlarge 58
g4dn.12xlarge 234
g4dn.16xlarge 58
g4dn.2xlarge 29
g4dn.4xlarge 29
g4dn.8xlarge 58
g4dn.metal 737
g4dn.xlarge 29
h1.16xlarge 737
h1.2xlarge 58
h1.4xlarge 234
h1.8xlarge 234
hs1.8xlarge 234
i2.2xlarge 58
i2.4xlarge 234
i2.8xlarge 234
i2.xlarge 58
i3.16xlarge 737
i3.2xlarge 58
i3.4xlarge 234
i3.8xlarge 234
i3.large 29
i3.metal 737
i3.xlarge 58
i3en.12xlarge 234
i3en.24xlarge 737
i3en.2xlarge 58
i3en.3xlarge 58
i3en.6xlarge 234
i3en.large 29
i3en.metal 737
i3en.xlarge 58
inf1.24xlarge 321
inf1.2xlarge 38
inf1.6xlarge 234
inf1.xlarge 38
m1.large 29
m1.medium 12
m1.small 8
m1.xlarge 58
m2.2xlarge 118
m2.4xlarge 234
m2.xlarge 58
m3.2xlarge 118
m3.large 29
m3.medium 12
m3.xlarge 58
m4.10xlarge 234
m4.16xlarge 234
m4.2xlarge 58
m4.4xlarge 234
m4.large 20
m4.xlarge 58
m5.12xlarge 234
m5.16xlarge 737
m5.24xlarge 737
m5.2xlarge 58
m5.4xlarge 234
m5.8xlarge 234
m5.large 29
m5.metal 737
m5.xlarge 58
m5a.12xlarge 234
m5a.16xlarge 737
m5a.24xlarge 737
m5a.2xlarge 58
m5a.4xlarge 234
m5a.8xlarge 234
m5a.large 29
m5a.xlarge 58
m5ad.12xlarge 234
m5ad.16xlarge 737
m5ad.24xlarge 737
m5ad.2xlarge 58
m5ad.4xlarge 234
m5ad.8xlarge 234
m5ad.large 29
m5ad.xlarge 58
m5d.12xlarge 234
m5d.16xlarge 737
m5d.24xlarge 737
m5d.2xlarge 58
m5d.4xlarge 234
m5d.8xlarge 234
m5d.large 29
m5d.metal 737
m5d.xlarge 58
m5dn.12xlarge 234
m5dn.16xlarge 737
m5dn.24xlarge 737
m5dn.2xlarge 58
m5dn.4xlarge 234
m5dn.8xlarge 234
m5dn.large 29
m5dn.xlarge 58
m5n.12xlarge 234
m5n.16xlarge 737
m5n.24xlarge 737
m5n.2xlarge 58
m5n.4xlarge 234
m5n.8xlarge 234
m5n.large 29
m5n.xlarge 58
m5zn.12xlarge 737
m5zn.2xlarge 58
m5zn.3xlarge 234
m5zn.6xlarge 234
m5zn.large 29
m5zn.metal 737
m5zn.xlarge 58
m6g.12xlarge 234
m6g.16xlarge 737
m6g.2xlarge 58
m6g.4xlarge 234
m6g.8xlarge 234
m6g.large 29
m6g.medium 8
m6g.metal 737
m6g.xlarge 58
m6gd.12xlarge 234
m6gd.16xlarge 737
m6gd.2xlarge 58
m6gd.4xlarge 234
m6gd.8xlarge 234
m6gd.large 29
m6gd.medium 8
m6gd.metal 737
m6gd.xlarge 58
mac1.metal 234
p2.16xlarge 234
p2.8xlarge 234
p2.xlarge 58
p3.16xlarge 234
p3.2xlarge 58
p3.8xlarge 234
p3dn.24xlarge 737
p4d.24xlarge 737
r3.2xlarge 58
r3.4xlarge 234
r3.8xlarge 234
r3.large 29
r3.xlarge 58
r4.16xlarge 737
r4.2xlarge 58
r4.4xlarge 234
r4.8xlarge 234
r4.large 29
r4.xlarge 58
r5.12xlarge 234
r5.16xlarge 737
r5.24xlarge 737
r5.2xlarge 58
r5.4xlarge 234
r5.8xlarge 234
r5.large 29
r5.metal 737
r5.xlarge 58
r5a.12xlarge 234
r5a.16xlarge 737
r5a.24xlarge 737
r5a.2xlarge 58
r5a.4xlarge 234
r5a.8xlarge 234
r5a.large 29
r5a.xlarge 58
r5ad.12xlarge 234
r5ad.16xlarge 737
r5ad.24xlarge 737
r5ad.2xlarge 58
r5ad.4xlarge 234
r5ad.8xlarge 234
r5ad.large 29
r5ad.xlarge 58
r5b.12xlarge 234
r5b.16xlarge 737
r5b.24xlarge 737
r5b.2xlarge 58
r5b.4xlarge 234
r5b.8xlarge 234
r5b.large 29
r5b.metal 737
r5b.xlarge 58
r5d.12xlarge 234
r5d.16xlarge 737
r5d.24xlarge 737
r5d.2xlarge 58
r5d.4xlarge 234
r5d.8xlarge 234
r5d.large 29
r5d.metal 737
r5d.xlarge 58
r5dn.12xlarge 234
r5dn.16xlarge 737
r5dn.24xlarge 737
r5dn.2xlarge 58
r5dn.4xlarge 234
r5dn.8xlarge 234
r5dn.large 29
r5dn.xlarge 58
r5n.12xlarge 234
r5n.16xlarge 737
r5n.24xlarge 737
r5n.2xlarge 58
r5n.4xlarge 234
r5n.8xlarge 234
r5n.large 29
r5n.xlarge 58
r6g.12xlarge 234
r6g.16xlarge 737
r6g.2xlarge 58
r6g.4xlarge 234
r6g.8xlarge 234
r6g.large 29
r6g.medium 8
r6g.metal 737
r6g.xlarge 58
r6gd.12xlarge 234
r6gd.16xlarge 737
r6gd.2xlarge 58
r6gd.4xlarge 234
r6gd.8xlarge 234
r6gd.large 29
r6gd.medium 8
r6gd.metal 737
r6gd.xlarge 58
t1.micro 4
t2.2xlarge 44
t2.large 35
t2.medium 17
t2.micro 4
t2.nano 4
t2.small 11
t2.xlarge 44
t3.2xlarge 58
t3.large 35
t3.medium 17
t3.micro 4
t3.nano 4
t3.small 11
t3.xlarge 58
t3a.2xlarge 58
t3a.large 35
t3a.medium 17
t3a.micro 4
t3a.nano 4
t3a.small 8
t3a.xlarge 58
t4g.2xlarge 58
t4g.large 35
t4g.medium 17
t4g.micro 4
t4g.nano 4
t4g.small 11
t4g.xlarge 58
u-12tb1.metal 147
u-18tb1.metal 737
u-24tb1.metal 737
u-6tb1.metal 147
u-9tb1.metal 147
x1.16xlarge 234
x1.32xlarge 234
x1e.16xlarge 234
x1e.2xlarge 58
x1e.32xlarge 234
x1e.4xlarge 58
x1e.8xlarge 58
x1e.xlarge 29
z1d.12xlarge 737
z1d.2xlarge 58
z1d.3xlarge 234
z1d.6xlarge 234
z1d.large 29
z1d.metal 737
z1d.xlarge 58

View File

@ -0,0 +1,36 @@
{
"kind": "KubeletConfiguration",
"apiVersion": "kubelet.config.k8s.io/v1beta1",
"address": "0.0.0.0",
"authentication": {
"anonymous": {
"enabled": false
},
"webhook": {
"cacheTTL": "2m0s",
"enabled": true
},
"x509": {
"clientCAFile": "/etc/kubernetes/pki/ca.crt"
}
},
"authorization": {
"mode": "Webhook",
"webhook": {
"cacheAuthorizedTTL": "5m0s",
"cacheUnauthorizedTTL": "30s"
}
},
"clusterDomain": "cluster.local",
"hairpinMode": "hairpin-veth",
"readOnlyPort": 0,
"cgroupDriver": "cgroupfs",
"cgroupRoot": "/",
"featureGates": {
"RotateKubeletServerCertificate": true
},
"protectKernelDefaults": true,
"serializeImagePulls": false,
"serverTLSBootstrap": true,
"tlsCipherSuites": ["TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256"]
}

View File

@ -0,0 +1,25 @@
apiVersion: v1
kind: Config
clusters:
- cluster:
certificate-authority: /etc/kubernetes/pki/ca.crt
server: MASTER_ENDPOINT
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubelet
name: kubelet
current-context: kubelet
users:
- name: kubelet
user:
exec:
apiVersion: client.authentication.k8s.io/v1alpha1
command: /opt/eks/aws-iam-authenticator
args:
- "token"
- "-i"
- "CLUSTER_NAME"
- --region
- "AWS_REGION"

View File

@ -0,0 +1,29 @@
[Unit]
Description=EKS Kubelet
Documentation=https://github.com/kubernetes/kubernetes
Wants=docker.service coreos-metadata.service
Requires=docker.service coreos-metadata.service
After=docker.service iptables-restore.service
[Service]
EnvironmentFile=/run/metadata/flatcar
ExecStartPre=/usr/share/oem/eks/download-kubelet.sh
ExecStartPre=/sbin/iptables -P FORWARD ACCEPT -w 5
ExecStart=/opt/eks/kubelet \
--cloud-provider aws \
--config /usr/share/oem/eks/kubelet-config.json \
--kubeconfig /usr/share/oem/eks/kubelet-kubeconfig \
--container-runtime docker \
--network-plugin cni \
--cni-bin-dir=/opt/cni/bin \
--cni-conf-dir=/etc/cni/net.d \
--hostname-override ${COREOS_EC2_HOSTNAME} \
$KUBELET_ARGS \
$KUBELET_EXTRA_ARGS
Restart=always
RestartSec=5
KillMode=process
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,57 @@
# Copyright (c) 2021 Kinvolk GmbH
# Distributed under the terms of the Apache License 2.0
# This package is heavily based on the files distributed in
# https://github.com/awslabs/amazon-eks-ami, the files have been adapted to fit
# Flatcar Container Linux instead of Amazon Linux.
EAPI=6
inherit eutils
DESCRIPTION="Configuration for EKS worker nodes"
HOMEPAGE=""
SRC_URI=""
LICENSE="Apache-2.0"
SLOT="0"
KEYWORDS="amd64 arm64"
IUSE=""
# no source directory
S="${WORKDIR}"
src_prepare() {
# The bootstrap.sh file has been downloaded from:
# https://raw.githubusercontent.com/awslabs/amazon-eks-ami/master/files/bootstrap.sh
# We keep our patches separate to facilitate sychronizing changes
cp "${FILESDIR}/bootstrap.sh" "${WORKDIR}/"
eapply -p1 "${FILESDIR}/bootstrap.patch"
eapply_user
}
src_install() {
insinto /usr/share/oem/eks
doins "${WORKDIR}/bootstrap.sh"
# These files are based on the ones found on the amazon-eks-ami repository,
# but adapted to fit Flatcar needs. Since they are a lot simpler, we don't
# use the patching technique, but rather just edit them as needed.
doins "${FILESDIR}/kubelet-kubeconfig"
doins "${FILESDIR}/kubelet.service"
# These files are taken verbatim from the amazon-eks-ami repository:
# https://raw.githubusercontent.com/awslabs/amazon-eks-ami/master/files/kubelet-config.json
# https://raw.githubusercontent.com/awslabs/amazon-eks-ami/master/files/docker-daemon.json
# https://raw.githubusercontent.com/awslabs/amazon-eks-ami/master/files/eni-max-pods.txt
doins "${FILESDIR}/kubelet-config.json"
doins "${FILESDIR}/docker-daemon.json"
doins "${FILESDIR}/eni-max-pods.txt"
# This downloading script has been created specially for Flatcar. It gets
# the current EKS Cluster Kubernetes version and downloads all the
# necessary files to run the kubelet on the node.
doins "${FILESDIR}/download-kubelet.sh"
chmod +x "${D}/usr/share/oem/eks/bootstrap.sh" "${D}/usr/share/oem/eks/download-kubelet.sh"
}

View File

@ -0,0 +1,4 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE pkgmetadata SYSTEM "http://www.gentoo.org/dtd/metadata.dtd">
<pkgmetadata>
</pkgmetadata>

View File

@ -0,0 +1,25 @@
{
"ignition": {
"version": "2.1.0"
},
"storage": {
"files": [
{
"filesystem": "root",
"path": "/etc/eks/bootstrap.sh",
"contents": {
"source": "oem:///eks/bootstrap.sh"
},
"mode": 493
}
]
},
"systemd": {
"units": [
{
"name": "coreos-metadata-sshkeys@.service",
"enabled": true
}
]
}
}

View File

@ -10,8 +10,8 @@ SRC_URI=""
LICENSE="Apache-2.0"
SLOT="0"
KEYWORDS="amd64 arm64 x86"
IUSE="ec2 openstack brightbox"
REQUIRED_USE="^^ ( ec2 openstack brightbox )"
IUSE="ec2 openstack brightbox aws_pro"
REQUIRED_USE="^^ ( ec2 openstack brightbox aws_pro )"
# TODO: The AWS SSM Agent is currently too big for the OEM partition
# but if it fits, uncomment the following and revert
@ -19,12 +19,15 @@ REQUIRED_USE="^^ ( ec2 openstack brightbox )"
#RDEPEND="
# ec2? ( app-emulation/amazon-ssm-agent )
#"
RDEPEND="
aws_pro? ( coreos-base/flatcar-eks )
"
# no source directory
S="${WORKDIR}"
src_prepare() {
if use ec2 ; then
if use ec2 || use aws_pro ; then
ID="ami"
NAME="Amazon EC2"
HOME_URL="http://aws.amazon.com/ec2/"
@ -50,7 +53,7 @@ src_prepare() {
src_install() {
insinto "/usr/share/oem"
doins "${T}/oem-release"
if use ec2 ; then
if use ec2 || use aws_pro ; then
newins "${FILESDIR}/grub-ec2.cfg" grub.cfg
elif use openstack ; then
newins "${FILESDIR}/grub-openstack.cfg" grub.cfg
@ -63,5 +66,7 @@ src_install() {
doins "${FILESDIR}/base/default.ign"
if use ec2 ; then
newins "${FILESDIR}/base/base-ec2.ign" base.ign
elif use aws_pro ; then
newins "${FILESDIR}/base/base-aws-pro.ign" base.ign
fi
}

View File

@ -23,6 +23,7 @@ RDEPEND="
app-emulation/open-vm-tools
app-emulation/wa-linux-agent
coreos-base/coreos-oem-gce
coreos-base/flatcar-eks
coreos-base/nova-agent-container
coreos-base/nova-agent-watcher
dev-lang/python-oem