talos/hack/test/basic-integration.sh
Spencer Smith 8b019d8f33 chore: update provider-components for capi v0.1.9
This PR updates our e2e tests with the provider-components file that's
generated by our capi v0.1.9 update.

Signed-off-by: Spencer Smith <robertspencersmith@gmail.com>
2019-09-06 22:45:44 -04:00

81 lines
2.3 KiB
Bash
Executable File

#!/bin/bash
set -eou pipefail
export KUBERNETES_VERSION=v1.15.2
export TALOS_IMG="docker.io/autonomy/talos:${TAG}"
export TMP="/tmp/e2e"
export TALOSCONFIG="${TMP}/talosconfig"
export KUBECONFIG="${TMP}/kubeconfig"
export TIMEOUT=300
export OSCTL="${PWD}/build/osctl-linux-amd64"
case $(uname -s) in
Linux*)
export LOCALOSCTL="${PWD}/build/osctl-linux-amd64"
;;
Darwin*)
export LOCALOSCTL="${PWD}/build/osctl-darwin-amd64"
;;
*)
exit 1
;;
esac
## Create tmp dir
mkdir -p ${TMP}
run() {
docker run \
--rm \
--interactive \
--net=integration \
--entrypoint=bash \
--mount type=bind,source=${TMP},target=${TMP} \
--mount type=bind,source=${PWD}/hack/dev/manifests,target=/manifests \
-v ${OSCTL}:/bin/osctl:ro \
-e KUBECONFIG=${KUBECONFIG} \
-e TALOSCONFIG=${TALOSCONFIG} \
k8s.gcr.io/hyperkube:${KUBERNETES_VERSION} -c "${1}"
}
${LOCALOSCTL} cluster create --name integration --image ${TALOS_IMG} --mtu 1440 --cpus 4.0
${LOCALOSCTL} config target 10.5.0.2
## Fetch kubeconfig
run "timeout=\$((\$(date +%s) + ${TIMEOUT}))
until osctl kubeconfig > ${KUBECONFIG}; do
[[ \$(date +%s) -gt \$timeout ]] && exit 1
sleep 2
done"
## Wait for the init node to report in
run "timeout=\$((\$(date +%s) + ${TIMEOUT}))
until kubectl get node master-1 >/dev/null; do
[[ \$(date +%s) -gt \$timeout ]] && exit 1
kubectl get nodes -o wide
sleep 5
done"
## Deploy needed manifests
run "kubectl apply -f /manifests/psp.yaml -f /manifests/flannel.yaml -f /manifests/coredns.yaml"
## Wait for all nodes to report in
run "timeout=\$((\$(date +%s) + ${TIMEOUT}))
until kubectl get nodes -o go-template='{{ len .items }}' | grep 4 >/dev/null; do
[[ \$(date +%s) -gt \$timeout ]] && exit 1
kubectl get nodes -o wide
sleep 5
done"
## Wait for all nodes ready
run "kubectl wait --timeout=${TIMEOUT}s --for=condition=ready=true --all nodes"
## Verify that we have an HA controlplane
run "timeout=\$((\$(date +%s) + ${TIMEOUT}))
until kubectl get nodes -l node-role.kubernetes.io/master='' -o go-template='{{ len .items }}' | grep 3 >/dev/null; do
[[ \$(date +%s) -gt \$timeout ]] && exit 1
kubectl get nodes -o wide -l node-role.kubernetes.io/master=''
sleep 5
done"