chore: refactor E2E scripts

This PR aims to simplify our E2E scripts.

Signed-off-by: Andrew Rynhard <andrew@andrewrynhard.com>
This commit is contained in:
Andrew Rynhard 2020-01-23 20:52:02 -08:00
parent c359caef3d
commit 88667641df
44 changed files with 650 additions and 819 deletions

View File

@ -2,7 +2,7 @@ coverage:
status:
project:
default:
target: 29%
target: 28%
threshold: 0.5%
base: auto
patch: off

View File

@ -23,6 +23,8 @@ steps:
- echo -e "$BUILDX_KUBECONFIG" > /root/.kube/config
- docker buildx create --driver kubernetes --driver-opt replicas=2 --driver-opt namespace=ci --driver-opt image=moby/buildkit:v0.6.2 --name ci --buildkitd-flags="--allow-insecure-entitlement security.insecure" --use
- docker buildx inspect --bootstrap
- make ./_out/sonobuoy
- make ./_out/kubectl
environment:
BUILDX_KUBECONFIG:
from_secret: kubeconfig
@ -482,11 +484,12 @@ steps:
- lint-go
- name: coverage
image: plugins/codecov
settings:
files:
- coverage.txt
token:
image: alpine:3.10
commands:
- apk --no-cache add bash curl git
- bash -c "bash <(curl -s https://codecov.io/bash) -f _out/coverage.txt -X fix"
environment:
CODECOV_TOKEN:
from_secret: codecov_token
when:
event:
@ -494,14 +497,11 @@ steps:
depends_on:
- unit-tests
- name: basic-integration-docker
- name: e2e-docker
pull: always
image: autonomy/build-container:latest
commands:
- make basic-integration-docker
environment:
DOCKER_NET: basic-integration
TALOS_PLATFORM: docker
- make e2e-docker
volumes:
- name: dockersock
path: /var/run
@ -518,14 +518,11 @@ steps:
- talos
- osctl-linux
- name: basic-integration-firecracker
- name: e2e-firecracker
pull: always
image: autonomy/build-container:latest
commands:
- make basic-integration-firecracker
environment:
DOCKER_NET: host
TALOS_PLATFORM: firecracker
- make e2e-firecracker
privileged: true
volumes:
- name: dockersock
@ -539,8 +536,9 @@ steps:
- name: tmp
path: /tmp
depends_on:
- unit-tests
- osctl-linux
- kernel
- basic-integration-docker
- name: push
pull: always
@ -570,8 +568,8 @@ steps:
- promote
- cron
depends_on:
- basic-integration-docker
- basic-integration-firecracker
- e2e-docker
- e2e-firecracker
- name: push-latest
pull: always
@ -600,8 +598,8 @@ steps:
event:
- push
depends_on:
- basic-integration-docker
- basic-integration-firecracker
- e2e-docker
- e2e-firecracker
services:
- name: docker
@ -671,6 +669,8 @@ steps:
- echo -e "$BUILDX_KUBECONFIG" > /root/.kube/config
- docker buildx create --driver kubernetes --driver-opt replicas=2 --driver-opt namespace=ci --driver-opt image=moby/buildkit:v0.6.2 --name ci --buildkitd-flags="--allow-insecure-entitlement security.insecure" --use
- docker buildx inspect --bootstrap
- make ./_out/sonobuoy
- make ./_out/kubectl
environment:
BUILDX_KUBECONFIG:
from_secret: kubeconfig
@ -1130,11 +1130,12 @@ steps:
- lint-go
- name: coverage
image: plugins/codecov
settings:
files:
- coverage.txt
token:
image: alpine:3.10
commands:
- apk --no-cache add bash curl git
- bash -c "bash <(curl -s https://codecov.io/bash) -f _out/coverage.txt -X fix"
environment:
CODECOV_TOKEN:
from_secret: codecov_token
when:
event:
@ -1142,14 +1143,11 @@ steps:
depends_on:
- unit-tests
- name: basic-integration-docker
- name: e2e-docker
pull: always
image: autonomy/build-container:latest
commands:
- make basic-integration-docker
environment:
DOCKER_NET: basic-integration
TALOS_PLATFORM: docker
- make e2e-docker
volumes:
- name: dockersock
path: /var/run
@ -1166,14 +1164,11 @@ steps:
- talos
- osctl-linux
- name: basic-integration-firecracker
- name: e2e-firecracker
pull: always
image: autonomy/build-container:latest
commands:
- make basic-integration-firecracker
environment:
DOCKER_NET: host
TALOS_PLATFORM: firecracker
- make e2e-firecracker
privileged: true
volumes:
- name: dockersock
@ -1187,8 +1182,9 @@ steps:
- name: tmp
path: /tmp
depends_on:
- unit-tests
- osctl-linux
- kernel
- basic-integration-docker
- name: push
pull: always
@ -1218,8 +1214,8 @@ steps:
- promote
- cron
depends_on:
- basic-integration-docker
- basic-integration-firecracker
- e2e-docker
- e2e-firecracker
- name: push-latest
pull: always
@ -1248,45 +1244,19 @@ steps:
event:
- push
depends_on:
- basic-integration-docker
- basic-integration-firecracker
- e2e-docker
- e2e-firecracker
- name: capi
- name: e2e-capi
pull: always
image: autonomy/build-container:latest
commands:
- make capi
environment:
AWS_SVC_ACCT:
from_secret: aws_svc_acct
AZURE_SVC_ACCT:
from_secret: azure_svc_acct
DOCKER_NET: basic-integration
GCE_SVC_ACCT:
from_secret: gce_svc_acct
PACKET_AUTH_TOKEN:
from_secret: packet_auth_token
volumes:
- name: dockersock
path: /var/run
- name: docker
path: /root/.docker/buildx
- name: kube
path: /root/.kube
- name: dev
path: /dev
- name: tmp
path: /tmp
depends_on:
- basic-integration-docker
- basic-integration-firecracker
- name: push-image-aws
pull: always
image: autonomy/build-container:latest
commands:
- make push-image-aws
- make e2e-capi
environment:
AWS_ACCESS_KEY_ID:
from_secret: aws_access_key_id
AWS_SECRET_ACCESS_KEY:
from_secret: aws_secret_access_key
AWS_SVC_ACCT:
from_secret: aws_svc_acct
AZURE_SVC_ACCT:
@ -1307,14 +1277,19 @@ steps:
- name: tmp
path: /tmp
depends_on:
- image-aws
- e2e-docker
- e2e-firecracker
- name: push-image-gcp
- name: e2e-aws
pull: always
image: autonomy/build-container:latest
commands:
- make push-image-gcp
- make e2e-aws
environment:
AWS_ACCESS_KEY_ID:
from_secret: aws_access_key_id
AWS_SECRET_ACCESS_KEY:
from_secret: aws_secret_access_key
AWS_SVC_ACCT:
from_secret: aws_svc_acct
AZURE_SVC_ACCT:
@ -1335,16 +1310,26 @@ steps:
- name: tmp
path: /tmp
depends_on:
- image-gcp
- e2e-capi
- name: e2e-integration-aws
- name: e2e-gcp
pull: always
image: autonomy/build-container:latest
commands:
- make e2e-integration
- make e2e-gcp
environment:
DOCKER_NET: basic-integration
TALOS_PLATFORM: aws
AWS_ACCESS_KEY_ID:
from_secret: aws_access_key_id
AWS_SECRET_ACCESS_KEY:
from_secret: aws_secret_access_key
AWS_SVC_ACCT:
from_secret: aws_svc_acct
AZURE_SVC_ACCT:
from_secret: azure_svc_acct
GCE_SVC_ACCT:
from_secret: gce_svc_acct
PACKET_AUTH_TOKEN:
from_secret: packet_auth_token
volumes:
- name: dockersock
path: /var/run
@ -1357,31 +1342,7 @@ steps:
- name: tmp
path: /tmp
depends_on:
- capi
- push-image-aws
- name: e2e-integration-gcp
pull: always
image: autonomy/build-container:latest
commands:
- make e2e-integration
environment:
DOCKER_NET: basic-integration
TALOS_PLATFORM: gcp
volumes:
- name: dockersock
path: /var/run
- name: docker
path: /root/.docker/buildx
- name: kube
path: /root/.kube
- name: dev
path: /dev
- name: tmp
path: /tmp
depends_on:
- capi
- push-image-gcp
- e2e-capi
services:
- name: docker
@ -1446,6 +1407,8 @@ steps:
- echo -e "$BUILDX_KUBECONFIG" > /root/.kube/config
- docker buildx create --driver kubernetes --driver-opt replicas=2 --driver-opt namespace=ci --driver-opt image=moby/buildkit:v0.6.2 --name ci --buildkitd-flags="--allow-insecure-entitlement security.insecure" --use
- docker buildx inspect --bootstrap
- make ./_out/sonobuoy
- make ./_out/kubectl
environment:
BUILDX_KUBECONFIG:
from_secret: kubeconfig
@ -1905,11 +1868,12 @@ steps:
- lint-go
- name: coverage
image: plugins/codecov
settings:
files:
- coverage.txt
token:
image: alpine:3.10
commands:
- apk --no-cache add bash curl git
- bash -c "bash <(curl -s https://codecov.io/bash) -f _out/coverage.txt -X fix"
environment:
CODECOV_TOKEN:
from_secret: codecov_token
when:
event:
@ -1917,14 +1881,11 @@ steps:
depends_on:
- unit-tests
- name: basic-integration-docker
- name: e2e-docker
pull: always
image: autonomy/build-container:latest
commands:
- make basic-integration-docker
environment:
DOCKER_NET: basic-integration
TALOS_PLATFORM: docker
- make e2e-docker
volumes:
- name: dockersock
path: /var/run
@ -1941,14 +1902,11 @@ steps:
- talos
- osctl-linux
- name: basic-integration-firecracker
- name: e2e-firecracker
pull: always
image: autonomy/build-container:latest
commands:
- make basic-integration-firecracker
environment:
DOCKER_NET: host
TALOS_PLATFORM: firecracker
- make e2e-firecracker
privileged: true
volumes:
- name: dockersock
@ -1962,8 +1920,9 @@ steps:
- name: tmp
path: /tmp
depends_on:
- unit-tests
- osctl-linux
- kernel
- basic-integration-docker
- name: push
pull: always
@ -1993,8 +1952,8 @@ steps:
- promote
- cron
depends_on:
- basic-integration-docker
- basic-integration-firecracker
- e2e-docker
- e2e-firecracker
- name: push-latest
pull: always
@ -2023,45 +1982,19 @@ steps:
event:
- push
depends_on:
- basic-integration-docker
- basic-integration-firecracker
- e2e-docker
- e2e-firecracker
- name: capi
- name: e2e-capi
pull: always
image: autonomy/build-container:latest
commands:
- make capi
environment:
AWS_SVC_ACCT:
from_secret: aws_svc_acct
AZURE_SVC_ACCT:
from_secret: azure_svc_acct
DOCKER_NET: basic-integration
GCE_SVC_ACCT:
from_secret: gce_svc_acct
PACKET_AUTH_TOKEN:
from_secret: packet_auth_token
volumes:
- name: dockersock
path: /var/run
- name: docker
path: /root/.docker/buildx
- name: kube
path: /root/.kube
- name: dev
path: /dev
- name: tmp
path: /tmp
depends_on:
- basic-integration-docker
- basic-integration-firecracker
- name: push-image-aws
pull: always
image: autonomy/build-container:latest
commands:
- make push-image-aws
- make e2e-capi
environment:
AWS_ACCESS_KEY_ID:
from_secret: aws_access_key_id
AWS_SECRET_ACCESS_KEY:
from_secret: aws_secret_access_key
AWS_SVC_ACCT:
from_secret: aws_svc_acct
AZURE_SVC_ACCT:
@ -2082,14 +2015,19 @@ steps:
- name: tmp
path: /tmp
depends_on:
- image-aws
- e2e-docker
- e2e-firecracker
- name: push-image-gcp
- name: e2e-aws
pull: always
image: autonomy/build-container:latest
commands:
- make push-image-gcp
- make e2e-aws
environment:
AWS_ACCESS_KEY_ID:
from_secret: aws_access_key_id
AWS_SECRET_ACCESS_KEY:
from_secret: aws_secret_access_key
AWS_SVC_ACCT:
from_secret: aws_svc_acct
AZURE_SVC_ACCT:
@ -2098,29 +2036,7 @@ steps:
from_secret: gce_svc_acct
PACKET_AUTH_TOKEN:
from_secret: packet_auth_token
volumes:
- name: dockersock
path: /var/run
- name: docker
path: /root/.docker/buildx
- name: kube
path: /root/.kube
- name: dev
path: /dev
- name: tmp
path: /tmp
depends_on:
- image-gcp
- name: conformance-aws
pull: always
image: autonomy/build-container:latest
commands:
- make e2e-integration
environment:
DOCKER_NET: basic-integration
SONOBUOY_MODE: certified-conformance
TALOS_PLATFORM: aws
volumes:
- name: dockersock
path: /var/run
@ -2133,18 +2049,27 @@ steps:
- name: tmp
path: /tmp
depends_on:
- capi
- push-image-aws
- e2e-capi
- name: conformance-gcp
- name: e2e-gcp
pull: always
image: autonomy/build-container:latest
commands:
- make e2e-integration
- make e2e-gcp
environment:
DOCKER_NET: basic-integration
AWS_ACCESS_KEY_ID:
from_secret: aws_access_key_id
AWS_SECRET_ACCESS_KEY:
from_secret: aws_secret_access_key
AWS_SVC_ACCT:
from_secret: aws_svc_acct
AZURE_SVC_ACCT:
from_secret: azure_svc_acct
GCE_SVC_ACCT:
from_secret: gce_svc_acct
PACKET_AUTH_TOKEN:
from_secret: packet_auth_token
SONOBUOY_MODE: certified-conformance
TALOS_PLATFORM: gcp
volumes:
- name: dockersock
path: /var/run
@ -2157,8 +2082,7 @@ steps:
- name: tmp
path: /tmp
depends_on:
- capi
- push-image-gcp
- e2e-capi
- name: push-edge
pull: always
@ -2185,8 +2109,8 @@ steps:
cron:
- nightly
depends_on:
- conformance-aws
- conformance-gcp
- e2e-aws
- e2e-gcp
services:
- name: docker
@ -2251,6 +2175,8 @@ steps:
- echo -e "$BUILDX_KUBECONFIG" > /root/.kube/config
- docker buildx create --driver kubernetes --driver-opt replicas=2 --driver-opt namespace=ci --driver-opt image=moby/buildkit:v0.6.2 --name ci --buildkitd-flags="--allow-insecure-entitlement security.insecure" --use
- docker buildx inspect --bootstrap
- make ./_out/sonobuoy
- make ./_out/kubectl
environment:
BUILDX_KUBECONFIG:
from_secret: kubeconfig
@ -2710,11 +2636,12 @@ steps:
- lint-go
- name: coverage
image: plugins/codecov
settings:
files:
- coverage.txt
token:
image: alpine:3.10
commands:
- apk --no-cache add bash curl git
- bash -c "bash <(curl -s https://codecov.io/bash) -f _out/coverage.txt -X fix"
environment:
CODECOV_TOKEN:
from_secret: codecov_token
when:
event:
@ -2722,14 +2649,11 @@ steps:
depends_on:
- unit-tests
- name: basic-integration-docker
- name: e2e-docker
pull: always
image: autonomy/build-container:latest
commands:
- make basic-integration-docker
environment:
DOCKER_NET: basic-integration
TALOS_PLATFORM: docker
- make e2e-docker
volumes:
- name: dockersock
path: /var/run
@ -2746,14 +2670,11 @@ steps:
- talos
- osctl-linux
- name: basic-integration-firecracker
- name: e2e-firecracker
pull: always
image: autonomy/build-container:latest
commands:
- make basic-integration-firecracker
environment:
DOCKER_NET: host
TALOS_PLATFORM: firecracker
- make e2e-firecracker
privileged: true
volumes:
- name: dockersock
@ -2767,8 +2688,9 @@ steps:
- name: tmp
path: /tmp
depends_on:
- unit-tests
- osctl-linux
- kernel
- basic-integration-docker
- name: push
pull: always
@ -2798,8 +2720,8 @@ steps:
- promote
- cron
depends_on:
- basic-integration-docker
- basic-integration-firecracker
- e2e-docker
- e2e-firecracker
- name: push-latest
pull: always
@ -2828,45 +2750,19 @@ steps:
event:
- push
depends_on:
- basic-integration-docker
- basic-integration-firecracker
- e2e-docker
- e2e-firecracker
- name: capi
- name: e2e-capi
pull: always
image: autonomy/build-container:latest
commands:
- make capi
environment:
AWS_SVC_ACCT:
from_secret: aws_svc_acct
AZURE_SVC_ACCT:
from_secret: azure_svc_acct
DOCKER_NET: basic-integration
GCE_SVC_ACCT:
from_secret: gce_svc_acct
PACKET_AUTH_TOKEN:
from_secret: packet_auth_token
volumes:
- name: dockersock
path: /var/run
- name: docker
path: /root/.docker/buildx
- name: kube
path: /root/.kube
- name: dev
path: /dev
- name: tmp
path: /tmp
depends_on:
- basic-integration-docker
- basic-integration-firecracker
- name: push-image-aws
pull: always
image: autonomy/build-container:latest
commands:
- make push-image-aws
- make e2e-capi
environment:
AWS_ACCESS_KEY_ID:
from_secret: aws_access_key_id
AWS_SECRET_ACCESS_KEY:
from_secret: aws_secret_access_key
AWS_SVC_ACCT:
from_secret: aws_svc_acct
AZURE_SVC_ACCT:
@ -2887,14 +2783,19 @@ steps:
- name: tmp
path: /tmp
depends_on:
- image-aws
- e2e-docker
- e2e-firecracker
- name: push-image-gcp
- name: e2e-aws
pull: always
image: autonomy/build-container:latest
commands:
- make push-image-gcp
- make e2e-aws
environment:
AWS_ACCESS_KEY_ID:
from_secret: aws_access_key_id
AWS_SECRET_ACCESS_KEY:
from_secret: aws_secret_access_key
AWS_SVC_ACCT:
from_secret: aws_svc_acct
AZURE_SVC_ACCT:
@ -2903,29 +2804,7 @@ steps:
from_secret: gce_svc_acct
PACKET_AUTH_TOKEN:
from_secret: packet_auth_token
volumes:
- name: dockersock
path: /var/run
- name: docker
path: /root/.docker/buildx
- name: kube
path: /root/.kube
- name: dev
path: /dev
- name: tmp
path: /tmp
depends_on:
- image-gcp
- name: conformance-aws
pull: always
image: autonomy/build-container:latest
commands:
- make e2e-integration
environment:
DOCKER_NET: basic-integration
SONOBUOY_MODE: certified-conformance
TALOS_PLATFORM: aws
volumes:
- name: dockersock
path: /var/run
@ -2938,18 +2817,27 @@ steps:
- name: tmp
path: /tmp
depends_on:
- capi
- push-image-aws
- e2e-capi
- name: conformance-gcp
- name: e2e-gcp
pull: always
image: autonomy/build-container:latest
commands:
- make e2e-integration
- make e2e-gcp
environment:
DOCKER_NET: basic-integration
AWS_ACCESS_KEY_ID:
from_secret: aws_access_key_id
AWS_SECRET_ACCESS_KEY:
from_secret: aws_secret_access_key
AWS_SVC_ACCT:
from_secret: aws_svc_acct
AZURE_SVC_ACCT:
from_secret: azure_svc_acct
GCE_SVC_ACCT:
from_secret: gce_svc_acct
PACKET_AUTH_TOKEN:
from_secret: packet_auth_token
SONOBUOY_MODE: certified-conformance
TALOS_PLATFORM: gcp
volumes:
- name: dockersock
path: /var/run
@ -2962,8 +2850,7 @@ steps:
- name: tmp
path: /tmp
depends_on:
- capi
- push-image-gcp
- e2e-capi
- name: push-edge
pull: always
@ -2990,8 +2877,8 @@ steps:
cron:
- nightly
depends_on:
- conformance-aws
- conformance-gcp
- e2e-aws
- e2e-gcp
services:
- name: docker
@ -3056,6 +2943,8 @@ steps:
- echo -e "$BUILDX_KUBECONFIG" > /root/.kube/config
- docker buildx create --driver kubernetes --driver-opt replicas=2 --driver-opt namespace=ci --driver-opt image=moby/buildkit:v0.6.2 --name ci --buildkitd-flags="--allow-insecure-entitlement security.insecure" --use
- docker buildx inspect --bootstrap
- make ./_out/sonobuoy
- make ./_out/kubectl
environment:
BUILDX_KUBECONFIG:
from_secret: kubeconfig
@ -3515,11 +3404,12 @@ steps:
- lint-go
- name: coverage
image: plugins/codecov
settings:
files:
- coverage.txt
token:
image: alpine:3.10
commands:
- apk --no-cache add bash curl git
- bash -c "bash <(curl -s https://codecov.io/bash) -f _out/coverage.txt -X fix"
environment:
CODECOV_TOKEN:
from_secret: codecov_token
when:
event:
@ -3527,14 +3417,11 @@ steps:
depends_on:
- unit-tests
- name: basic-integration-docker
- name: e2e-docker
pull: always
image: autonomy/build-container:latest
commands:
- make basic-integration-docker
environment:
DOCKER_NET: basic-integration
TALOS_PLATFORM: docker
- make e2e-docker
volumes:
- name: dockersock
path: /var/run
@ -3551,14 +3438,11 @@ steps:
- talos
- osctl-linux
- name: basic-integration-firecracker
- name: e2e-firecracker
pull: always
image: autonomy/build-container:latest
commands:
- make basic-integration-firecracker
environment:
DOCKER_NET: host
TALOS_PLATFORM: firecracker
- make e2e-firecracker
privileged: true
volumes:
- name: dockersock
@ -3572,8 +3456,9 @@ steps:
- name: tmp
path: /tmp
depends_on:
- unit-tests
- osctl-linux
- kernel
- basic-integration-docker
- name: push
pull: always
@ -3603,8 +3488,8 @@ steps:
- promote
- cron
depends_on:
- basic-integration-docker
- basic-integration-firecracker
- e2e-docker
- e2e-firecracker
- name: push-latest
pull: always
@ -3633,8 +3518,8 @@ steps:
event:
- push
depends_on:
- basic-integration-docker
- basic-integration-firecracker
- e2e-docker
- e2e-firecracker
- name: iso
pull: always
@ -3653,8 +3538,8 @@ steps:
- name: tmp
path: /tmp
depends_on:
- basic-integration-docker
- basic-integration-firecracker
- e2e-docker
- e2e-firecracker
- name: boot
pull: always
@ -3673,8 +3558,8 @@ steps:
- name: tmp
path: /tmp
depends_on:
- basic-integration-docker
- basic-integration-firecracker
- e2e-docker
- e2e-firecracker
- name: release
image: plugins/github-release

5
.gitignore vendored
View File

@ -7,8 +7,8 @@ join.yaml
docgen
talosconfig
kubeconfig
hack/test/integration/matchbox/assets/*
!hack/test/integration/matchbox/assets/.gitkeep
hack/test/libvirt/matchbox/assets/*
!hack/test/libvirt/matchbox/assets/.gitkeep
# vim Swap
[._]*.s[a-v][a-z]
@ -18,7 +18,6 @@ hack/test/integration/matchbox/assets/*
[._]sw[a-p]
# Go
coverage.txt
.artifacts/
sha256sum.txt

View File

@ -12,6 +12,9 @@ GO_VERSION ?= 1.13
OPERATING_SYSTEM := $(shell uname -s | tr "[:upper:]" "[:lower:]")
OSCTL_DEFAULT_TARGET := osctl-$(OPERATING_SYSTEM)
INTEGRATION_TEST_DEFAULT_TARGET := integration-test-$(OPERATING_SYSTEM)
KUBECTL_URL ?= https://storage.googleapis.com/kubernetes-release/release/v1.17.1/bin/$(OPERATING_SYSTEM)/amd64/kubectl
SONOBUOY_VERSION ?= 0.17.1
SONOBUOY_URL ?= https://github.com/heptio/sonobuoy/releases/download/v$(SONOBUOY_VERSION)/sonobuoy_$(SONOBUOY_VERSION)_$(OPERATING_SYSTEM)_amd64.tar.gz
TESTPKGS ?= ./...
BUILD := docker buildx build
@ -89,7 +92,7 @@ docker-%: ## Builds the specified target defined in the Dockerfile using the doc
@$(MAKE) target-$* TARGET_ARGS="--output type=docker,dest=$(DEST)/$*.tar,name=$(REGISTRY_AND_USERNAME)/$*:$(TAG) $(TARGET_ARGS)"
hack-test-%: ## Runs the specied script in ./hack/test with well known environment variables.
@TAG=$(TAG) SHA=$(SHA) ARTIFACTS=$(ARTIFACTS) ./hack/test/$*.sh
@./hack/test/$*.sh
# Generators
@ -180,30 +183,36 @@ lint: ## Runs linters on go, protobuf, and markdown file types.
.PHONY: unit-tests
unit-tests: apps ## Performs unit tests.
@$(MAKE) local-$@ DEST=./ TARGET_ARGS="--allow security.insecure"
@$(MAKE) local-$@ DEST=$(ARTIFACTS) TARGET_ARGS="--allow security.insecure"
.PHONY: unit-tests-race
unit-tests-race: ## Performs unit tests with race detection enabled.
@$(MAKE) local-$@ DEST=./
@$(MAKE) target-$@
integration-test-%:
@$(MAKE) local-$@ DEST=$(ARTIFACTS)
$(ARTIFACTS)/$(INTEGRATION_TEST_DEFAULT_TARGET)-amd64:
@$(MAKE) local-$(INTEGRATION_TEST_DEFAULT_TARGET) DEST=$(ARTIFACTS)
integration-test: $(INTEGRATION_TEST_DEFAULT_TARGET) ## Builds the integration-test binary for the local machine.
$(ARTIFACTS)/sonobuoy:
@mkdir -p $(ARTIFACTS)
@curl -L -o /tmp/sonobuoy.tar.gz ${SONOBUOY_URL}
@tar -xf /tmp/sonobuoy.tar.gz -C $(ARTIFACTS)
basic-integration-%: integration-test osctl talos ## Runs the basic integration test.
@$(MAKE) hack-test-basic-integration PROVISIONER=$*
$(ARTIFACTS)/kubectl:
@mkdir -p $(ARTIFACTS)
@curl -L -o $(ARTIFACTS)/kubectl "$(KUBECTL_URL)"
@chmod +x $(ARTIFACTS)/kubectl
.PHONY: e2e-integration
e2e-integration: ## Runs the E2E integration for the specified cloud provider.
@$(MAKE) hack-test-$@
push-image-%: ## Pushes a VM image into the specified cloud provider. Valid options are aws, azure, and gcp (e.g. push-image-aws).
@$(MAKE) hack-test-$*-setup
.PHONY: capi
capi: ## Deploys Cluster API to the basic integration cluster.
@$(MAKE) hack-test-$@
e2e-%: $(ARTIFACTS)/$(INTEGRATION_TEST_DEFAULT_TARGET)-amd64 $(ARTIFACTS)/sonobuoy $(ARTIFACTS)/kubectl ## Runs the E2E test for the specified platform (e.g. e2e-docker).
@$(MAKE) hack-test-$@ \
PLATFORM=$* \
TAG=$(TAG) \
SHA=$(SHA) \
IMAGE=$(REGISTRY_AND_USERNAME)/talos:$(TAG) \
ARTIFACTS=$(ARTIFACTS) \
OSCTL=$(PWD)/$(ARTIFACTS)/$(OSCTL_DEFAULT_TARGET)-amd64 \
INTEGRATION_TEST=$(PWD)/$(ARTIFACTS)/$(INTEGRATION_TEST_DEFAULT_TARGET)-amd64 \
KUBECTL=$(PWD)/$(ARTIFACTS)/kubectl \
SONOBUOY=$(PWD)/$(ARTIFACTS)/sonobuoy
# Utilities
@ -223,4 +232,4 @@ push-%: login ## Pushes the installer, and talos images to the configured contai
.PHONY: clean
clean: ## Cleans up all artifacts.
@-rm -rf $(ARTIFACTS) coverage.txt
@-rm -rf $(ARTIFACTS)

View File

@ -122,7 +122,9 @@ local setup_ci = {
'apk add coreutils',
'echo -e "$BUILDX_KUBECONFIG" > /root/.kube/config',
'docker buildx create --driver kubernetes --driver-opt replicas=2 --driver-opt namespace=ci --driver-opt image=moby/buildkit:v0.6.2 --name ci --buildkitd-flags="--allow-insecure-entitlement security.insecure" --use',
'docker buildx inspect --bootstrap'
'docker buildx inspect --bootstrap',
'make ./_out/sonobuoy',
'make ./_out/kubectl',
],
volumes: volumes.ForStep(),
};
@ -192,16 +194,19 @@ local image_gcp = Step("image-gcp", depends_on=[installer]);
local image_vmware = Step("image-vmware", depends_on=[installer]);
local unit_tests = Step("unit-tests", depends_on=[talos]);
local unit_tests_race = Step("unit-tests-race", depends_on=[golint]);
local basic_integration_docker = Step("basic-integration-docker", depends_on=[unit_tests, talos, osctl_linux], environment={TALOS_PLATFORM: "docker", DOCKER_NET: "basic-integration"});
local basic_integration_firecracker = Step("basic-integration-firecracker", privileged=true, depends_on=[kernel, basic_integration_docker], environment={TALOS_PLATFORM: "firecracker", DOCKER_NET: "host"});
local e2e_docker = Step("e2e-docker", depends_on=[unit_tests, talos, osctl_linux]);
local e2e_firecracker = Step("e2e-firecracker", privileged=true, depends_on=[unit_tests, osctl_linux, kernel]);
local coverage = {
name: 'coverage',
image: 'plugins/codecov',
settings: {
token: { from_secret: 'codecov_token' },
files: ['coverage.txt'],
image: 'alpine:3.10',
environment: {
CODECOV_TOKEN: { from_secret: 'codecov_token' },
},
commands: [
'apk --no-cache add bash curl git',
'bash -c "bash <(curl -s https://codecov.io/bash) -f _out/coverage.txt -X fix"'
],
when: {
event: ['pull_request'],
},
@ -227,7 +232,7 @@ local push = {
],
},
},
depends_on: [basic_integration_docker.name, basic_integration_firecracker.name],
depends_on: [e2e_docker.name, e2e_firecracker.name],
};
local push_latest = {
@ -248,7 +253,7 @@ local push_latest = {
'push',
],
},
depends_on: [basic_integration_docker.name, basic_integration_firecracker.name],
depends_on: [e2e_docker.name, e2e_firecracker.name],
};
local default_steps = [
@ -277,8 +282,8 @@ local default_steps = [
unit_tests,
unit_tests_race,
coverage,
basic_integration_docker,
basic_integration_firecracker,
e2e_docker,
e2e_firecracker,
push,
push_latest,
];
@ -302,27 +307,24 @@ local default_pipeline = Pipeline('default', default_steps) + default_trigger;
// E2E pipeline.
local creds_env_vars = {
AZURE_SVC_ACCT: {from_secret: "azure_svc_acct"},
// TODO(andrewrynhard): Rename this to the GCP convention.
GCE_SVC_ACCT: {from_secret: "gce_svc_acct"},
PACKET_AUTH_TOKEN: {from_secret: "packet_auth_token"},
AWS_SVC_ACCT: {from_secret: "aws_svc_acct"},
AWS_ACCESS_KEY_ID: { from_secret: 'aws_access_key_id' },
AWS_SECRET_ACCESS_KEY: { from_secret: 'aws_secret_access_key' },
AWS_SVC_ACCT: {from_secret: "aws_svc_acct"},
AZURE_SVC_ACCT: {from_secret: "azure_svc_acct"},
// TODO(andrewrynhard): Rename this to the GCP convention.
GCE_SVC_ACCT: {from_secret: "gce_svc_acct"},
PACKET_AUTH_TOKEN: {from_secret: "packet_auth_token"},
};
local capi = Step("capi", depends_on=[basic_integration_docker, basic_integration_firecracker], environment=creds_env_vars+{DOCKER_NET: "basic-integration"});
local push_image_aws = Step("push-image-aws", depends_on=[image_aws], environment=creds_env_vars);
local push_image_azure = Step("push-image-azure", depends_on=[image_azure], environment=creds_env_vars);
local push_image_gcp = Step("push-image-gcp", depends_on=[image_gcp], environment=creds_env_vars);
local e2e_integration_aws = Step("e2e-integration-aws", target="e2e-integration", depends_on=[capi, push_image_aws], environment={TALOS_PLATFORM: "aws", DOCKER_NET: "basic-integration"});
local e2e_integration_azure = Step("e2e-integration-azure", target="e2e-integration", depends_on=[capi, push_image_azure], environment={TALOS_PLATFORM: "azure", DOCKER_NET: "basic-integration"});
local e2e_integration_gcp = Step("e2e-integration-gcp", target="e2e-integration", depends_on=[capi, push_image_gcp], environment={TALOS_PLATFORM: "gcp", DOCKER_NET: "basic-integration"});
local e2e_capi = Step("e2e-capi", depends_on=[e2e_docker, e2e_firecracker], environment=creds_env_vars);
local e2e_aws = Step("e2e-aws", depends_on=[e2e_capi], environment=creds_env_vars);
local e2e_azure = Step("e2e-azure", depends_on=[e2e_capi], environment=creds_env_vars);
local e2e_gcp = Step("e2e-gcp", depends_on=[e2e_capi], environment=creds_env_vars);
local e2e_steps = default_steps + [
capi,
push_image_aws,
push_image_gcp,
e2e_integration_aws,
e2e_integration_gcp,
e2e_capi,
e2e_aws,
e2e_gcp,
];
local e2e_trigger = {
@ -337,9 +339,9 @@ local e2e_pipeline = Pipeline('e2e', e2e_steps) + e2e_trigger;
// Conformance pipeline.
local conformance_aws = Step("conformance-aws", target="e2e-integration", depends_on=[capi, push_image_aws], environment={SONOBUOY_MODE: "certified-conformance", TALOS_PLATFORM: "aws", DOCKER_NET: "basic-integration"});
local conformance_azure = Step("conformance-azure", target="e2e-integration", depends_on=[capi, push_image_azure], environment={SONOBUOY_MODE: "certified-conformance", TALOS_PLATFORM: "azure", DOCKER_NET: "basic-integration"});
local conformance_gcp = Step("conformance-gcp", target="e2e-integration", depends_on=[capi, push_image_gcp], environment={SONOBUOY_MODE: "certified-conformance", TALOS_PLATFORM: "gcp", DOCKER_NET: "basic-integration"});
local conformance_aws = Step("e2e-aws", depends_on=[e2e_capi], environment=creds_env_vars+{SONOBUOY_MODE: "certified-conformance"});
local conformance_azure = Step("e2e-azure", depends_on=[e2e_capi], environment=creds_env_vars+{SONOBUOY_MODE: "certified-conformance"});
local conformance_gcp = Step("e2e-gcp", depends_on=[e2e_capi], environment=creds_env_vars+{SONOBUOY_MODE: "certified-conformance"});
local push_edge = {
name: 'push-edge',
@ -360,9 +362,7 @@ local push_edge = {
};
local conformance_steps = default_steps + [
capi,
push_image_aws,
push_image_gcp,
e2e_capi,
conformance_aws,
conformance_gcp,
push_edge,
@ -392,21 +392,8 @@ local nightly_pipeline = Pipeline('nightly', conformance_steps) + nightly_trigge
// Release pipeline.
local aws_env_vars = {
AWS_ACCESS_KEY_ID: { from_secret: 'aws_access_key_id' },
AWS_SECRET_ACCESS_KEY: { from_secret: 'aws_secret_access_key' },
AWS_DEFAULT_REGION: 'us-west-2',
AWS_PUBLISH_REGIONS: 'us-west-2,us-east-1,us-east-2,us-west-1,eu-central-1',
};
local ami_trigger = {
when: {
event: ['tag'],
},
};
local iso = Step('iso', depends_on=[basic_integration_docker, basic_integration_firecracker]);
local boot = Step('boot', depends_on=[basic_integration_docker, basic_integration_firecracker]);
local iso = Step('iso', depends_on=[e2e_docker, e2e_firecracker]);
local boot = Step('boot', depends_on=[e2e_docker, e2e_firecracker]);
// TODO(andrewrynhard): We should run E2E tests on a release.
local release = {

View File

@ -1,61 +0,0 @@
#!/bin/bash
set -eou pipefail
REGION="us-east-1"
BUCKET="talos-ci-e2e"
TMP=/tmp/e2e/aws
## Setup svc account
mkdir -p ${TMP}
echo ${AWS_SVC_ACCT} | base64 -d > ${TMP}/svc-acct.ini
# Ensure AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY env vars are available
export AWS_ACCESS_KEY_ID=$(awk '/aws_access_key_id/ { print $NF }' ${TMP}/svc-acct.ini)
export AWS_SECRET_ACCESS_KEY=$(awk '/aws_secret_access_key/ { print $NF }' ${TMP}/svc-acct.ini)
## Untar image
tar -C ${TMP} -xf ${ARTIFACTS}/aws.tar.gz
# Upload Image
echo "uploading image to s3"
aws s3 cp --quiet ${TMP}/disk.raw s3://${BUCKET}/aws-${TAG}.raw
# Create snapshot from image
echo "importing snapshot from s3"
import_task_id=$(aws ec2 import-snapshot --region ${REGION} --description "talos e2e ${TAG}" --disk-container "Format=raw,UserBucket={S3Bucket=${BUCKET},S3Key=aws-${TAG}.raw}" | jq -r '.ImportTaskId')
echo ${import_task_id}
# Wait for import to complete
echo "waiting for snapshot import to complete"
snapshot_status=$(aws ec2 describe-import-snapshot-tasks --region ${REGION} --import-task-ids ${import_task_id} | \
jq -r --arg image_name "aws-${TAG}.raw" '.ImportSnapshotTasks[] | select(.SnapshotTaskDetail.UserBucket.S3Key == $image_name) | .SnapshotTaskDetail.Status')
while [ ${snapshot_status} != "completed" ]; do
sleep 5
snapshot_status=$(aws ec2 describe-import-snapshot-tasks --region ${REGION} --import-task-ids ${import_task_id} | \
jq -r --arg image_name "aws-${TAG}.raw" '.ImportSnapshotTasks[] | select(.SnapshotTaskDetail.UserBucket.S3Key == $image_name) | .SnapshotTaskDetail.Status')
done
snapshot_id=$(aws ec2 describe-import-snapshot-tasks --region ${REGION} --import-task-ids ${import_task_id} | \
jq -r --arg image_name "aws-${TAG}.raw" '.ImportSnapshotTasks[] | select(.SnapshotTaskDetail.UserBucket.S3Key == $image_name) | .SnapshotTaskDetail.SnapshotId')
echo ${snapshot_id}
# Create AMI
image_id=$(aws ec2 describe-images --region ${REGION} --filters="Name=name,Values=talos-e2e-${TAG}" | jq -r '.Images[0].ImageId') || true
if [[ ${image_id} != "null" ]]; then
aws ec2 deregister-image --region ${REGION} --image-id ${image_id}
fi
ami=$(aws ec2 register-image --region ${REGION} \
--block-device-mappings "DeviceName=/dev/xvda,VirtualName=talostest,Ebs={DeleteOnTermination=true,SnapshotId=${snapshot_id},VolumeSize=20,VolumeType=gp2}" \
--root-device-name /dev/xvda \
--virtualization-type hvm \
--architecture x86_64 \
--ena-support \
--name talos-e2e-${TAG} | \
jq -r '.ImageId')
## Setup the cluster YAML.
sed -e "s#{{REGION}}#${REGION}#g" \
-e "s/{{TAG}}/${SHA}/" \
-e "s#{{AMI}}#${ami}#g" ${PWD}/hack/test/manifests/aws-cluster.yaml > ${TMP}/cluster.yaml

View File

@ -1,36 +0,0 @@
#!/bin/bash
set -eou pipefail
STORAGE_ACCOUNT=talostesting
STORAGE_CONTAINER=talostesting
GROUP=talos
TMP=/tmp/e2e/azure
## Setup svc acct vars
mkdir -p ${TMP}
echo ${AZURE_SVC_ACCT} | base64 -d > ${TMP}/svc-acct.json
CLIENT_ID="$( cat ${TMP}/svc-acct.json | jq -r '.clientId' )"
CLIENT_SECRET="$( cat ${TMP}/svc-acct.json | jq -r '.clientSecret' )"
TENANT_ID="$( cat ${TMP}/svc-acct.json | jq -r '.tenantId' )"
## Untar image
tar -C ${TMP} -xf ${ARTIFACTS}/azure.tar.gz
## Login to azure
az login --service-principal --username ${CLIENT_ID} --password ${CLIENT_SECRET} --tenant ${TENANT_ID} > /dev/null
## Get connection string
AZURE_STORAGE_CONNECTION_STRING=$(az storage account show-connection-string -n ${STORAGE_ACCOUNT} -g ${GROUP} -o tsv)
## Push blob
AZURE_STORAGE_CONNECTION_STRING="${AZURE_STORAGE_CONNECTION_STRING}" az storage blob upload --container-name ${STORAGE_CONTAINER} -f ${TMP}/disk.vhd -n azure-${TAG}.vhd
## Delete image
az image delete --name talos-e2e-${TAG} -g ${GROUP}
## Create image
az image create --name talos-e2e-${TAG} --source https://${STORAGE_ACCOUNT}.blob.core.windows.net/${STORAGE_CONTAINER}/azure-${TAG}.vhd --os-type linux -g ${GROUP}
## Setup the cluster YAML.
sed "s/{{TAG}}/${TAG}/" ${PWD}/hack/test/manifests/azure-cluster.yaml > ${TMP}/cluster.yaml

View File

@ -1,75 +0,0 @@
#!/bin/bash
set -eou pipefail
TMP="/tmp/e2e"
TALOS_IMG="docker.io/autonomy/talos:${TAG}"
export TALOSCONFIG="${TMP}/talosconfig"
case "${CI:-false}" in
true)
ENDPOINT="docker"
;;
*)
ENDPOINT="127.0.0.1"
;;
esac
case $(uname -s) in
Linux*)
OSCTL="${PWD}/${ARTIFACTS}/osctl-linux-amd64"
INTEGRATION_TEST="${PWD}/${ARTIFACTS}/integration-test-linux-amd64"
;;
Darwin*)
OSCTL="${PWD}/${ARTIFACTS}/osctl-darwin-amd64"
INTEGRATION_TEST="${PWD}/${ARTIFACTS}/integration-test-darwin-amd64"
;;
*)
exit 1
;;
esac
mkdir -p "${TMP}"
case ${PROVISIONER} in
docker)
"${OSCTL}" cluster create \
--provisioner docker \
--image "${TALOS_IMG}" \
--name basic-integration \
--masters=3 \
--mtu 1500 \
--memory 2048 \
--cpus 4.0 \
--wait \
--endpoint "${ENDPOINT}"
"${INTEGRATION_TEST}" -test.v -talos.osctlpath "${OSCTL}" -talos.k8sendpoint "${ENDPOINT}:6443"
mkdir -p ${TMP}/${TALOS_PLATFORM}
"${OSCTL}" kubeconfig ${TMP}/${TALOS_PLATFORM}
./hack/test/conformance.sh
;;
firecracker)
"${OSCTL}" cluster create \
--provisioner firecracker \
--name basic-integration \
--masters=3 \
--mtu 1500 \
--memory 2048 \
--cpus 2.0 \
--cidr 172.20.0.0/24 \
--init-node-as-endpoint \
--wait \
--install-image docker.io/autonomy/installer:latest
"${INTEGRATION_TEST}" -test.v -talos.osctlpath "${OSCTL}"
;;
*)
echo "unknown provisioner: ${PROVISIONER}"
exit 1
;;
esac

View File

@ -1,39 +0,0 @@
#!/bin/bash
set -eou pipefail
export TALOS_PLATFORM="docker"
source ./hack/test/e2e-runner.sh
## Create tmp dir
mkdir -p ${TMP}
cp ${PWD}/hack/test/manifests/provider-components.yaml ${TMP}/provider-components.yaml
## Installs envsubst command
apk add --no-cache gettext
## Template out aws components
## Using a local copy until v0.5.0 of the provider is cut.
export AWS_B64ENCODED_CREDENTIALS=${AWS_SVC_ACCT}
cat ${PWD}/hack/test/manifests/capa-components.yaml| envsubst > ${TMP}/capa-components.yaml
## Template out gcp components
export GCP_B64ENCODED_CREDENTIALS=${GCE_SVC_ACCT}
cat ${PWD}/hack/test/manifests/capg-components.yaml| envsubst > ${TMP}/capg-components.yaml
##Until next alpha release, keep a local copy of capg-components.yaml.
##They've got an incorrect image pull policy.
##curl -L ${CAPG_COMPONENTS} | envsubst > ${TMP}/capg-components.yaml
## Drop in capi stuff
e2e_run "kubectl apply -f ${TMP}/provider-components.yaml"
e2e_run "kubectl apply -f ${CAPI_COMPONENTS}"
e2e_run "kubectl apply -f ${TMP}/capa-components.yaml"
e2e_run "kubectl apply -f ${TMP}/capg-components.yaml"
## Wait for talosconfig in cm then dump it out
e2e_run "timeout=\$((\$(date +%s) + ${TIMEOUT}))
until KUBECONFIG=${KUBECONFIG} kubectl wait --timeout=1s --for=condition=Ready -n ${CABPT_NS} pods --all; do
[[ \$(date +%s) -gt \$timeout ]] && exit 1
echo 'Waiting to CABPT pod to be available...'
sleep 10
done"

View File

@ -9,7 +9,7 @@ spec:
clusterNetwork:
pods:
cidrBlocks:
- 192.168.0.0/16
- 192.168.0.0/16
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: AWSCluster
@ -22,13 +22,12 @@ metadata:
name: talos-e2e-{{TAG}}-aws
namespace: default
spec:
region: {{REGION}}
sshKeyName: debug
region: '{{REGION}}'
sshKeyName: talos-e2e
networkSpec:
vpc:
id: "vpc-ff5c5687"
id: 'vpc-ff5c5687'
---
## Controlplane 0 configs
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha2
@ -44,7 +43,7 @@ kind: Machine
metadata:
labels:
cluster.x-k8s.io/cluster-name: talos-e2e-{{TAG}}-aws
cluster.x-k8s.io/control-plane: "true"
cluster.x-k8s.io/control-plane: 'true'
name: talos-e2e-{{TAG}}-aws-controlplane-0
namespace: default
spec:
@ -67,17 +66,17 @@ metadata:
name: talos-e2e-{{TAG}}-aws-controlplane-0
namespace: default
spec:
instanceType: t3.small
sshKeyName: debug
instanceType: m5.xlarge
rootDeviceSize: 150
sshKeyName: talos-e2e
ami:
id: {{AMI}}
id: '{{AMI}}'
subnet:
id: "subnet-c4e9b3a0"
id: 'subnet-c4e9b3a0'
additionalSecurityGroups:
- id: "sg-ebe8e59f"
- id: 'sg-ebe8e59f'
publicIP: true
---
## Controlplane 1 configs
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha2
@ -93,7 +92,7 @@ kind: Machine
metadata:
labels:
cluster.x-k8s.io/cluster-name: talos-e2e-{{TAG}}-aws
cluster.x-k8s.io/control-plane: "true"
cluster.x-k8s.io/control-plane: 'true'
name: talos-e2e-{{TAG}}-aws-controlplane-1
namespace: default
spec:
@ -116,17 +115,17 @@ metadata:
name: talos-e2e-{{TAG}}-aws-controlplane-1
namespace: default
spec:
instanceType: t3.small
sshKeyName: debug
instanceType: m5.xlarge
rootDeviceSize: 150
sshKeyName: talos-e2e
ami:
id: {{AMI}}
id: '{{AMI}}'
subnet:
id: "subnet-c4e9b3a0"
id: 'subnet-c4e9b3a0'
additionalSecurityGroups:
- id: "sg-ebe8e59f"
- id: 'sg-ebe8e59f'
publicIP: true
---
## Controlplane 2 configs
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha2
@ -142,7 +141,7 @@ kind: Machine
metadata:
labels:
cluster.x-k8s.io/cluster-name: talos-e2e-{{TAG}}-aws
cluster.x-k8s.io/control-plane: "true"
cluster.x-k8s.io/control-plane: 'true'
name: talos-e2e-{{TAG}}-aws-controlplane-2
namespace: default
spec:
@ -165,17 +164,17 @@ metadata:
name: talos-e2e-{{TAG}}-aws-controlplane-2
namespace: default
spec:
instanceType: t3.small
sshKeyName: debug
instanceType: m5.xlarge
rootDeviceSize: 150
sshKeyName: talos-e2e
ami:
id: {{AMI}}
id: '{{AMI}}'
subnet:
id: "subnet-c4e9b3a0"
id: 'subnet-c4e9b3a0'
additionalSecurityGroups:
- id: "sg-ebe8e59f"
- id: 'sg-ebe8e59f'
publicIP: true
---
## Worker deployment configs
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha2
@ -186,7 +185,7 @@ metadata:
spec:
template:
spec:
machineType: "join"
machineType: 'join'
---
apiVersion: cluster.x-k8s.io/v1alpha2
kind: MachineDeployment
@ -229,12 +228,13 @@ metadata:
spec:
template:
spec:
instanceType: t3.small
sshKeyName: debug
instanceType: t3.large
rootDeviceSize: 100
sshKeyName: talos-e2e
ami:
id: {{AMI}}
id: '{{AMI}}'
subnet:
id: "subnet-c4e9b3a0"
id: 'subnet-c4e9b3a0'
additionalSecurityGroups:
- id: "sg-ebe8e59f"
- id: 'sg-ebe8e59f'
publicIP: true

View File

@ -23,7 +23,7 @@ spec:
type: azure
controlplane:
count: 3
k8sversion: "1.16.2"
k8sversion: '1.16.2'
---
apiVersion: cluster.k8s.io/v1alpha1
kind: Machine

View File

@ -9,7 +9,7 @@ spec:
clusterNetwork:
pods:
cidrBlocks:
- 192.168.0.0/16
- 192.168.0.0/16
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: GCPCluster
@ -25,7 +25,6 @@ spec:
project: talos-testbed
region: us-central1
---
## Controlplane 0 configs
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha2
@ -42,7 +41,7 @@ kind: Machine
metadata:
labels:
cluster.x-k8s.io/cluster-name: talos-e2e-{{TAG}}-gcp
cluster.x-k8s.io/control-plane: "true"
cluster.x-k8s.io/control-plane: 'true'
name: talos-e2e-{{TAG}}-gcp-controlplane-0
namespace: default
spec:
@ -65,14 +64,13 @@ metadata:
name: talos-e2e-{{TAG}}-gcp-controlplane-0
namespace: default
spec:
instanceType: n1-standard-2
instanceType: n1-standard-4
zone: us-central1-a
image: projects/talos-testbed/global/images/talos-e2e-{{TAG}}
serviceAccounts: {}
publicIP: true
rootDeviceSize: 100
rootDeviceSize: 150
---
## Controlplane 1 configs
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha2
@ -89,7 +87,7 @@ kind: Machine
metadata:
labels:
cluster.x-k8s.io/cluster-name: talos-e2e-{{TAG}}-gcp
cluster.x-k8s.io/control-plane: "true"
cluster.x-k8s.io/control-plane: 'true'
name: talos-e2e-{{TAG}}-gcp-controlplane-1
namespace: default
spec:
@ -112,14 +110,13 @@ metadata:
name: talos-e2e-{{TAG}}-gcp-controlplane-1
namespace: default
spec:
instanceType: n1-standard-2
instanceType: n1-standard-4
zone: us-central1-a
image: projects/talos-testbed/global/images/talos-e2e-{{TAG}}
serviceAccounts: {}
publicIP: true
rootDeviceSize: 100
rootDeviceSize: 150
---
## Controlplane 2 configs
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha2
@ -136,7 +133,7 @@ kind: Machine
metadata:
labels:
cluster.x-k8s.io/cluster-name: talos-e2e-{{TAG}}-gcp
cluster.x-k8s.io/control-plane: "true"
cluster.x-k8s.io/control-plane: 'true'
name: talos-e2e-{{TAG}}-gcp-controlplane-2
namespace: default
spec:
@ -159,14 +156,13 @@ metadata:
name: talos-e2e-{{TAG}}-gcp-controlplane-2
namespace: default
spec:
instanceType: n1-standard-2
instanceType: n1-standard-4
zone: us-central1-a
image: projects/talos-testbed/global/images/talos-e2e-{{TAG}}
serviceAccounts: {}
publicIP: true
rootDeviceSize: 100
rootDeviceSize: 150
---
## Worker deployment configs
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha2
@ -177,7 +173,7 @@ metadata:
spec:
template:
spec:
machineType: "join"
machineType: 'join'
---
apiVersion: cluster.x-k8s.io/v1alpha2
kind: MachineDeployment

View File

@ -1,32 +0,0 @@
#!/bin/bash
set -eou pipefail
source ./hack/test/e2e-runner.sh
# ## Run CIS conformance
# echo "Master CIS Conformance:"
# e2e_run "export KUBECONFIG=${KUBECONFIG}-${TALOS_PLATFORM}-capi
# kubectl apply -f /e2emanifests/cis-kube-bench-master.yaml
# kubectl wait --timeout=300s --for=condition=complete job/kube-bench-master > /dev/null
# kubectl logs job/kube-bench-master"
# echo "Worker CIS Conformance:"
# e2e_run "export KUBECONFIG=${KUBECONFIG}-${TALOS_PLATFORM}-capi
# kubectl apply -f /e2emanifests/cis-kube-bench-node.yaml
# kubectl wait --timeout=300s --for=condition=complete job/kube-bench-node > /dev/null
# kubectl logs job/kube-bench-node"
# Download sonobuoy and run kubernetes conformance
e2e_run "set -eou pipefail
apt-get update && apt-get install wget
wget --quiet -O /tmp/sonobuoy.tar.gz ${SONOBUOY_URL}
tar -xf /tmp/sonobuoy.tar.gz -C /usr/local/bin
sonobuoy run --kubeconfig ${KUBECONFIG} \
--wait \
--skip-preflight \
--plugin e2e \
--mode ${SONOBUOY_MODE}
results=\$(sonobuoy retrieve --kubeconfig ${KUBECONFIG})
sonobuoy e2e --kubeconfig ${KUBECONFIG} \$results
sonobuoy status --kubeconfig ${KUBECONFIG} --json | tee /tmp/status.json
if [ \$(cat /tmp/status.json | jq -r '.plugins[] | select(.plugin == \"e2e\") | .\"result-status\"') != 'passed' ]; then exit 1; fi"

63
hack/test/e2e-aws.sh Executable file
View File

@ -0,0 +1,63 @@
#!/bin/bash
set -eou pipefail
source ./hack/test/e2e.sh
REGION="us-east-1"
BUCKET="talos-ci-e2e"
function setup {
# Setup svc account
mkdir -p ${TMP}
# Untar image
tar -C ${TMP} -xf ${ARTIFACTS}/aws.tar.gz
# Upload Image
echo "uploading image to s3"
aws s3 cp --quiet ${TMP}/disk.raw s3://${BUCKET}/aws-${TAG}.raw
# Create snapshot from image
echo "importing snapshot from s3"
import_task_id=$(aws ec2 import-snapshot --region ${REGION} --description "talos e2e ${TAG}" --disk-container "Format=raw,UserBucket={S3Bucket=${BUCKET},S3Key=aws-${TAG}.raw}" | jq -r '.ImportTaskId')
echo ${import_task_id}
# Wait for import to complete
echo "waiting for snapshot import to complete"
snapshot_status=$(aws ec2 describe-import-snapshot-tasks --region ${REGION} --import-task-ids ${import_task_id} | \
jq -r --arg image_name "aws-${TAG}.raw" '.ImportSnapshotTasks[] | select(.SnapshotTaskDetail.UserBucket.S3Key == $image_name) | .SnapshotTaskDetail.Status')
while [ ${snapshot_status} != "completed" ]; do
sleep 5
snapshot_status=$(aws ec2 describe-import-snapshot-tasks --region ${REGION} --import-task-ids ${import_task_id} | \
jq -r --arg image_name "aws-${TAG}.raw" '.ImportSnapshotTasks[] | select(.SnapshotTaskDetail.UserBucket.S3Key == $image_name) | .SnapshotTaskDetail.Status')
done
snapshot_id=$(aws ec2 describe-import-snapshot-tasks --region ${REGION} --import-task-ids ${import_task_id} | \
jq -r --arg image_name "aws-${TAG}.raw" '.ImportSnapshotTasks[] | select(.SnapshotTaskDetail.UserBucket.S3Key == $image_name) | .SnapshotTaskDetail.SnapshotId')
echo ${snapshot_id}
# Create AMI
image_id=$(aws ec2 describe-images --region ${REGION} --filters="Name=name,Values=talos-e2e-${TAG}" | jq -r '.Images[0].ImageId') || true
if [[ ${image_id} != "null" ]]; then
aws ec2 deregister-image --region ${REGION} --image-id ${image_id}
fi
ami=$(aws ec2 register-image --region ${REGION} \
--block-device-mappings "DeviceName=/dev/xvda,VirtualName=talostest,Ebs={DeleteOnTermination=true,SnapshotId=${snapshot_id},VolumeSize=20,VolumeType=gp2}" \
--root-device-name /dev/xvda \
--virtualization-type hvm \
--architecture x86_64 \
--ena-support \
--name talos-e2e-${TAG} | jq -r '.ImageId')
# Setup the cluster YAML.
sed -e "s#{{REGION}}#${REGION}#g" \
-e "s/{{TAG}}/${SHA}/" \
-e "s#{{AMI}}#${ami}#g" ${PWD}/hack/test/capi/cluster-aws.yaml > ${TMP}/cluster.yaml
}
setup
create_cluster_capi aws
run_talos_integration_test
run_kubernetes_integration_test

43
hack/test/e2e-azure.sh Executable file
View File

@ -0,0 +1,43 @@
#!/bin/bash
set -eou pipefail
source ./hack/test/e2e.sh
function setup {
AZURE_STORAGE_ACCOUNT=talostesting
AZURE_STORAGE_CONTAINER=talostesting
AZURE_GROUP=talos
# Setup svc acct vars
echo ${AZURE_SVC_ACCT} | base64 -d > ${TMP}/svc-acct.json
AZURE_CLIENT_ID="$( cat ${TMP}/svc-acct.json | jq -r '.clientId' )"
AZURE_CLIENT_SECRET="$( cat ${TMP}/svc-acct.json | jq -r '.clientSecret' )"
AZURE_TENANT_ID="$( cat ${TMP}/svc-acct.json | jq -r '.tenantId' )"
# Untar image
tar -C ${TMP} -xf ${ARTIFACTS}/azure.tar.gz
# Login to azure
az login --service-principal --username ${AZURE_CLIENT_ID} --password ${AZURE_CLIENT_SECRET} --tenant ${AZURE_TENANT_ID} > /dev/null
# Get connection string
AZURE_STORAGE_CONNECTION_STRING=$(az storage account show-connection-string -n ${AZURE_STORAGE_ACCOUNT} -g ${AZURE_GROUP} -o tsv)
# Push blob
AZURE_STORAGE_CONNECTION_STRING="${AZURE_STORAGE_CONNECTION_STRING}" az storage blob upload --container-name ${AZURE_STORAGE_CONTAINER} -f ${TMP}/disk.vhd -n azure-${TAG}.vhd
# Delete image
az image delete --name talos-e2e-${TAG} -g ${AZURE_GROUP}
# Create image
az image create --name talos-e2e-${TAG} --source https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_STORAGE_CONTAINER}/azure-${TAG}.vhd --os-type linux -g ${AZURE_GROUP}
# Setup the cluster YAML.
sed "s/{{TAG}}/${TAG}/" ${PWD}/hack/test/manifests/azure-cluster.yaml > ${TMP}/cluster.yaml
}
setup
create_cluster_capi azure
run_talos_integration_test
run_kubernetes_integration_test

37
hack/test/e2e-capi.sh Executable file
View File

@ -0,0 +1,37 @@
#!/bin/bash
set -eou pipefail
source ./hack/test/e2e.sh
# We need to override this here since e2e.sh will set it to ${TMP}/capi/kubeconfig.
export KUBECONFIG="/tmp/e2e/docker/kubeconfig"
# CAPI
export CAPI_VERSION="0.2.6"
export CAPI_COMPONENTS="https://github.com/kubernetes-sigs/cluster-api/releases/download/v${CAPI_VERSION}/cluster-api-components.yaml"
# CABPT
export CABPT_NS="cabpt-system"
# Install envsubst
apk add --no-cache gettext
export AWS_B64ENCODED_CREDENTIALS=${AWS_SVC_ACCT}
cat ${PWD}/hack/test/capi/components-capa.yaml| envsubst | ${KUBECTL} apply -f -
export GCP_B64ENCODED_CREDENTIALS=${GCE_SVC_ACCT}
cat ${PWD}/hack/test/capi/components-capg.yaml| envsubst | ${KUBECTL} apply -f -
cat ${PWD}/hack/test/capi/components-provider.yaml | ${KUBECTL} apply -f -
${KUBECTL} apply -f ${CAPI_COMPONENTS}
# Wait for the talosconfig
timeout=$(($(date +%s) + ${TIMEOUT}))
until ${KUBECTL} wait --timeout=1s --for=condition=Ready -n ${CABPT_NS} pods --all; do
[[ $(date +%s) -gt $timeout ]] && exit 1
echo 'Waiting to CABPT pod to be available...'
sleep 5
done

33
hack/test/e2e-docker.sh Executable file
View File

@ -0,0 +1,33 @@
#!/bin/bash
set -eou pipefail
source ./hack/test/e2e.sh
case "${CI:-false}" in
true)
ENDPOINT="docker"
;;
*)
ENDPOINT="127.0.0.1"
;;
esac
function create_cluster {
"${OSCTL}" cluster create \
--provisioner docker \
--image "${IMAGE}" \
--name e2e-docker \
--masters=3 \
--mtu 1500 \
--memory 2048 \
--cpus 4.0 \
--wait \
--endpoint "${ENDPOINT}"
}
create_cluster
get_kubeconfig
${KUBECTL} config set-cluster e2e-docker --server https://${ENDPOINT}:6443
run_talos_integration_test_docker
run_kubernetes_integration_test

24
hack/test/e2e-firecracker.sh Executable file
View File

@ -0,0 +1,24 @@
#!/bin/bash
set -eou pipefail
source ./hack/test/e2e.sh
function create_cluster {
"${OSCTL}" cluster create \
--provisioner firecracker \
--name e2e-firecracker \
--masters=3 \
--mtu 1500 \
--memory 2048 \
--cpus 2.0 \
--cidr 172.20.0.0/24 \
--init-node-as-endpoint \
--wait \
--install-image docker.io/autonomy/installer:latest
}
create_cluster
get_kubeconfig
run_talos_integration_test
run_kubernetes_integration_test

19
hack/test/e2e-gcp.sh Executable file
View File

@ -0,0 +1,19 @@
#!/bin/bash
set -eou pipefail
source ./hack/test/e2e.sh
function setup {
echo ${GCE_SVC_ACCT} | base64 -d > ${TMP}/svc-acct.json
gcloud auth activate-service-account --key-file ${TMP}/svc-acct.json
gsutil cp ${ARTIFACTS}/gcp.tar.gz gs://talos-e2e/gcp-${SHA}.tar.gz
gcloud --quiet --project talos-testbed compute images delete talos-e2e-${SHA} || true
gcloud --quiet --project talos-testbed compute images create talos-e2e-${SHA} --source-uri gs://talos-e2e/gcp-${SHA}.tar.gz
sed -e "s/{{TAG}}/${SHA}/" ${PWD}/hack/test/capi/cluster-gcp.yaml > ${TMP}/cluster.yaml
}
setup
create_cluster_capi gcp
run_talos_integration_test
run_kubernetes_integration_test

View File

@ -1,83 +0,0 @@
#!/bin/bash
set -eou pipefail
source ./hack/test/e2e-runner.sh
## Create tmp dir
mkdir -p ${TMPPLATFORM}
NAME_PREFIX="talos-e2e-${SHA}-${TALOS_PLATFORM}"
## Cleanup the platform resources upon any exit
cleanup() {
e2e_run "KUBECONFIG=${TMP}/docker/kubeconfig kubectl delete cluster ${NAME_PREFIX}"
}
trap cleanup EXIT
## Download kustomize and template out capi cluster, then deploy it
e2e_run "KUBECONFIG=${TMP}/docker/kubeconfig kubectl apply -f ${TMPPLATFORM}/cluster.yaml"
## Wait for talosconfig in cm then dump it out
e2e_run "timeout=\$((\$(date +%s) + ${TIMEOUT}))
until [ -n \"\${STATUS_TALOSCONFIG}\" ]; do
[[ \$(date +%s) -gt \$timeout ]] && exit 1
sleep 10
STATUS_TALOSCONFIG=\$( KUBECONFIG=${TMP}/docker/kubeconfig kubectl get talosconfig ${NAME_PREFIX}-controlplane-0 -o jsonpath='{.status.talosConfig}' )
done
echo \"\${STATUS_TALOSCONFIG}\" > ${TALOSCONFIG}"
## Wait until we have an IP for master 0
e2e_run "timeout=\$((\$(date +%s) + ${TIMEOUT}))
until [ -n \"\${MASTER_0_IP}\" ]; do
[[ \$(date +%s) -gt \$timeout ]] && exit 1
sleep 10
MASTER_0_IP=\$( KUBECONFIG=${TMP}/docker/kubeconfig kubectl get machine -o go-template --template='{{range .status.addresses}}{{if eq .type \"ExternalIP\"}}{{.address}}{{end}}{{end}}' ${NAME_PREFIX}-controlplane-0 )
done
echo \${MASTER_0_IP} > ${TMP}/master0ip"
## Target master 0 for osctl
e2e_run "MASTER_0_IP=\$( cat ${TMP}/master0ip )
/bin/osctl config endpoint \${MASTER_0_IP}"
## Wait for kubeconfig from capi master-0
e2e_run "timeout=\$((\$(date +%s) + ${TIMEOUT}))
until /bin/osctl kubeconfig ${TMPPLATFORM}; do
[[ \$(date +%s) -gt \$timeout ]] && exit 1
sleep 10
done"
## Wait for nodes to check in
e2e_run "timeout=\$((\$(date +%s) + ${TIMEOUT}))
until kubectl get nodes -o go-template='{{ len .items }}' | grep ${NUM_NODES} >/dev/null; do
[[ \$(date +%s) -gt \$timeout ]] && exit 1
kubectl get nodes -o wide
sleep 10
done"
## Wait for nodes ready
e2e_run "timeout=\$((\$(date +%s) + ${TIMEOUT}))
until kubectl wait --timeout=1s --for=condition=ready=true --all nodes > /dev/null; do
[[ \$(date +%s) -gt \$timeout ]] && exit 1
kubectl get nodes -o wide
sleep 10
done"
## Verify that we have an HA controlplane
e2e_run "timeout=\$((\$(date +%s) + ${TIMEOUT}))
until kubectl get nodes -l node-role.kubernetes.io/master='' -o go-template='{{ len .items }}' | grep 3 > /dev/null; do
[[ \$(date +%s) -gt \$timeout ]] && exit 1
kubectl get nodes -l node-role.kubernetes.io/master=''
sleep 10
done"
## Print nodes so we know everything is healthy
echo "E2E setup complete. List of nodes: "
e2e_run "kubectl get nodes -o wide"
## Run integration tests
e2e_run "integration-test -test.v"
## Run conformance tests
echo "Beginning conformance tests..."
./hack/test/conformance.sh

View File

@ -1,48 +0,0 @@
export KUBERNETES_VERSION=v1.17.1
export TALOS_IMG="docker.io/autonomy/talos:${TAG}"
export TMP="/tmp/e2e"
export TMPPLATFORM="${TMP}/${TALOS_PLATFORM}"
export OSCTL="${PWD}/${ARTIFACTS}/osctl-linux-amd64"
export INTEGRATION_TEST="${PWD}/${ARTIFACTS}/integration-test-linux-amd64"
export TALOSCONFIG="${TMPPLATFORM}/talosconfig"
export KUBECONFIG="${TMPPLATFORM}/kubeconfig"
## Long timeout due to provisioning times
export TIMEOUT=9000
## Total number of nodes we'll be waiting to come up (3 Masters, 3 Workers)
export NUM_NODES=6
## ClusterAPI Bootstrap Provider Talos (CABPT)
export CABPT_VERSION="0.1.0-alpha.0"
export CABPT_COMPONENTS="https://github.com/talos-systems/cluster-api-bootstrap-provider-talos/releases/download/v${CABPT_VERSION}/provider-components.yaml"
## ClusterAPI (CAPI)
export CAPI_VERSION="0.2.6"
export CAPI_COMPONENTS="https://github.com/kubernetes-sigs/cluster-api/releases/download/v${CAPI_VERSION}/cluster-api-components.yaml"
## ClusterAPI Provider GCP (CAPG)
export CAPG_VERSION="0.2.0-alpha.2"
export CAPG_COMPONENTS="https://github.com/kubernetes-sigs/cluster-api-provider-gcp/releases/download/v${CAPG_VERSION}/infrastructure-components.yaml"
export KUSTOMIZE_VERSION="3.1.0"
export KUSTOMIZE_URL="https://github.com/kubernetes-sigs/kustomize/releases/download/v${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_amd64"
export SONOBUOY_VERSION="0.17.1"
export SONOBUOY_URL="https://github.com/heptio/sonobuoy/releases/download/v${SONOBUOY_VERSION}/sonobuoy_${SONOBUOY_VERSION}_linux_amd64.tar.gz"
export SONOBUOY_MODE=${SONOBUOY_MODE:-quick}
export CABPT_NS="cabpt-system"
e2e_run() {
docker run \
--rm \
--interactive \
--net=${DOCKER_NET} \
--entrypoint=/bin/bash \
--mount type=bind,source=${TMP},target=${TMP} \
--mount type=bind,source=${PWD}/hack/test/manifests,target=/e2emanifests \
-v ${OSCTL}:/bin/osctl:ro \
-v ${INTEGRATION_TEST}:/bin/integration-test:ro \
-e KUBECONFIG=${KUBECONFIG} \
-e TALOSCONFIG=${TALOSCONFIG} \
k8s.gcr.io/hyperkube:${KUBERNETES_VERSION} -c "${1}"
}

132
hack/test/e2e.sh Executable file
View File

@ -0,0 +1,132 @@
# This file contains common environment variables and setup logic for all test
# scripts. It assumes that the following environment variables are set by the
# Makefile:
# - PLATFORM
# - TAG
# - SHA
# - ARTIFACTS
# - OSCTL
# - INTEGRATION_TEST
# - KUBECTL
# - SONOBUOY
set -eoux pipefail
TMP="/tmp/e2e/${PLATFORM}"
mkdir -p "${TMP}"
# Talos
export TALOSCONFIG="${TMP}/talosconfig"
# Kubernetes
export KUBECONFIG="${TMP}/kubeconfig"
# Sonobuoy
export SONOBUOY_MODE=${SONOBUOY_MODE:-quick}
export NAME_PREFIX="talos-e2e-${SHA}-${PLATFORM}"
export TIMEOUT=1200
export NUM_NODES=6
cleanup_capi() {
${KUBECTL} --kubeconfig /tmp/e2e/docker/kubeconfig delete cluster ${NAME_PREFIX}
}
# Create a cluster via CAPI.
function create_cluster_capi {
trap cleanup_capi EXIT
${KUBECTL} --kubeconfig /tmp/e2e/docker/kubeconfig apply -f ${TMP}/cluster.yaml
# Wait for talosconfig in cm then dump it out
timeout=$(($(date +%s) + ${TIMEOUT}))
until [ -n "$(${KUBECTL} --kubeconfig /tmp/e2e/docker/kubeconfig get talosconfig ${NAME_PREFIX}-controlplane-0 -o jsonpath='{.status.talosConfig}')" ]; do
[[ $(date +%s) -gt $timeout ]] && exit 1
sleep 10
done
${KUBECTL} --kubeconfig /tmp/e2e/docker/kubeconfig get talosconfig ${NAME_PREFIX}-controlplane-0 -o jsonpath='{.status.talosConfig}' > ${TALOSCONFIG}
# Wait until we have an IP for master 0
timeout=$(($(date +%s) + ${TIMEOUT}))
until [ -n "$(${KUBECTL} --kubeconfig /tmp/e2e/docker/kubeconfig get machine -o go-template --template='{{range .status.addresses}}{{if eq .type "ExternalIP"}}{{.address}}{{end}}{{end}}' ${NAME_PREFIX}-controlplane-0)" ]; do
[[ $(date +%s) -gt $timeout ]] && exit 1
sleep 10
done
${OSCTL} config endpoint "$(${KUBECTL} --kubeconfig /tmp/e2e/docker/kubeconfig get machine -o go-template --template='{{range .status.addresses}}{{if eq .type "ExternalIP"}}{{.address}}{{end}}{{end}}' ${NAME_PREFIX}-controlplane-0)"
# Wait for the kubeconfig from capi master-0
timeout=$(($(date +%s) + ${TIMEOUT}))
until get_kubeconfig; do
[[ $(date +%s) -gt $timeout ]] && exit 1
sleep 10
done
# Wait for nodes to check in
timeout=$(($(date +%s) + ${TIMEOUT}))
until ${KUBECTL} get nodes -o go-template='{{ len .items }}' | grep ${NUM_NODES} >/dev/null; do
[[ $(date +%s) -gt $timeout ]] && exit 1
${KUBECTL} get nodes -o wide && :
sleep 10
done
# Wait for nodes to be ready
timeout=$(($(date +%s) + ${TIMEOUT}))
until ${KUBECTL} wait --timeout=1s --for=condition=ready=true --all nodes > /dev/null; do
[[ $(date +%s) -gt $timeout ]] && exit 1
${KUBECTL} get nodes -o wide && :
sleep 10
done
# Verify that we have an HA controlplane
timeout=$(($(date +%s) + ${TIMEOUT}))
until ${KUBECTL} get nodes -l node-role.kubernetes.io/master='' -o go-template='{{ len .items }}' | grep 3 > /dev/null; do
[[ $(date +%s) -gt $timeout ]] && exit 1
${KUBECTL} get nodes -l node-role.kubernetes.io/master='' && :
sleep 10
done
}
function run_talos_integration_test {
"${INTEGRATION_TEST}" -test.v -talos.osctlpath "${OSCTL}"
}
function run_talos_integration_test_docker {
"${INTEGRATION_TEST}" -test.v -talos.osctlpath "${OSCTL}" -talos.k8sendpoint ${ENDPOINT}:6443
}
function run_kubernetes_integration_test {
${SONOBUOY} run \
--kubeconfig ${KUBECONFIG} \
--wait \
--skip-preflight \
--plugin e2e \
--mode ${SONOBUOY_MODE}
${SONOBUOY} status --kubeconfig ${KUBECONFIG} --json | jq . | tee ${TMP}/sonobuoy-status.json
if [ $(cat ${TMP}/sonobuoy-status.json | jq -r '.plugins[] | select(.plugin == "e2e") | ."result-status"') != 'passed' ]; then exit 1; fi
}
function run_control_plane_cis_benchmark {
${KUBECTL} apply -f ${PWD}/hack/test/cis/kube-bench-master.yaml
${KUBECTL} wait --timeout=300s --for=condition=complete job/kube-bench-master > /dev/null
${KUBECTL} logs job/kube-bench-master
}
function run_worker_cis_benchmark {
${KUBECTL} apply -f ${PWD}/hack/test/cis/kube-bench-node.yaml
${KUBECTL} wait --timeout=300s --for=condition=complete job/kube-bench-node > /dev/null
${KUBECTL} logs job/kube-bench-node
}
function get_kubeconfig {
"${OSCTL}" kubeconfig "${TMP}"
}
function dump_cluster_state {
nodes=$(${KUBECTL} get nodes -o jsonpath="{.items[*].status.addresses[?(@.type == 'InternalIP')].address}" | tr [:space:] ',')
"${OSCTL}" -n ${nodes} services
${KUBECTL} get nodes -o wide
${KUBECTL} get pods --all-namespaces -o wide
}

View File

@ -1,22 +0,0 @@
#!/bin/bash
set -eou pipefail
TMP=/tmp/e2e/gcp
mkdir -p ${TMP}
## Setup svc acct
echo $GCE_SVC_ACCT | base64 -d > ${TMP}/svc-acct.json
gcloud auth activate-service-account --key-file ${TMP}/svc-acct.json
## Push talos-gcp to storage bucket
gsutil cp ${ARTIFACTS}/gcp.tar.gz gs://talos-e2e/gcp-${SHA}.tar.gz
## Create image from talos-gcp
gcloud --quiet --project talos-testbed compute images delete talos-e2e-${SHA} || true ##Ignore error if image doesn't exist
gcloud --quiet --project talos-testbed compute images create talos-e2e-${SHA} --source-uri gs://talos-e2e/gcp-${SHA}.tar.gz
## Setup the cluster YAML.
sed -e "s/{{TAG}}/${SHA}/" ${PWD}/hack/test/manifests/gcp-cluster.yaml > ${TMP}/cluster.yaml