diff --git a/hack/test/azure-setup.sh b/hack/test/azure-setup.sh index 1606ee078..bf64d2e72 100755 --- a/hack/test/azure-setup.sh +++ b/hack/test/azure-setup.sh @@ -24,7 +24,7 @@ az login --service-principal --username ${CLIENT_ID} --password ${CLIENT_SECRET} AZURE_STORAGE_CONNECTION_STRING=$(az storage account show-connection-string -n ${STORAGE_ACCOUNT} -g ${GROUP} -o tsv) ## Push blob -AZURE_STORAGE_CONNECTION_STRING='${AZURE_STORAGE_CONNECTION_STRING}' az storage blob upload --container-name ${STORAGE_CONTAINER} -f ${TMP}/azure.vhd -n azure-${TAG}.vhd +AZURE_STORAGE_CONNECTION_STRING="${AZURE_STORAGE_CONNECTION_STRING}" az storage blob upload --container-name ${STORAGE_CONTAINER} -f ${TMP}/azure.vhd -n azure-${TAG}.vhd ## Delete image az image delete --name talos-e2e-${TAG} -g ${GROUP} diff --git a/hack/test/capi.sh b/hack/test/capi.sh index f6ab08530..2fa0f995a 100755 --- a/hack/test/capi.sh +++ b/hack/test/capi.sh @@ -1,6 +1,8 @@ #!/bin/bash set -eou pipefail +PLATFORM="" + source ./hack/test/e2e-runner.sh ## Create tmp dir @@ -16,7 +18,7 @@ e2e_run "kubectl apply -f ${TMP}/provider-components.yaml -f ${TMP}/capi-secrets ## Wait for talosconfig in cm then dump it out e2e_run "timeout=\$((\$(date +%s) + ${TIMEOUT})) pod='pod/cluster-api-provider-talos-controller-manager-0' - until KUBECONFIG=${TMP}/kubeconfig kubectl wait --timeout=1s --for=condition=Ready -n ${CAPI_NS} ${pod}; do + until KUBECONFIG=${TMP}/kubeconfig kubectl wait --timeout=1s --for=condition=Ready -n ${CAPI_NS} \${pod}; do [[ \$(date +%s) -gt \$timeout ]] && exit 1 echo 'Waiting to CAPT pod to be available...' sleep 10 diff --git a/hack/test/e2e-integration.sh b/hack/test/e2e-integration.sh index 37bb2755c..cbbfaa6e0 100755 --- a/hack/test/e2e-integration.sh +++ b/hack/test/e2e-integration.sh @@ -10,10 +10,10 @@ NAME_PREFIX="talos-e2e-${TAG}-${PLATFORM}" ## Cleanup the platform resources upon any exit cleanup() { - e2e_run "kubectl delete machine ${NAME_PREFIX}-master-0 ${NAME_PREFIX}-master-1 ${NAME_PREFIX}-master-2 - kubectl scale machinedeployment ${NAME_PREFIX}-workers --replicas=0 - kubectl delete machinedeployment ${NAME_PREFIX}-workers - kubectl delete cluster ${NAME_PREFIX}" + e2e_run "KUBECONFIG=${TMP}/kubeconfig kubectl delete machine ${NAME_PREFIX}-master-0 ${NAME_PREFIX}-master-1 ${NAME_PREFIX}-master-2 + KUBECONFIG=${TMP}/kubeconfig kubectl scale machinedeployment ${NAME_PREFIX}-workers --replicas=0 + KUBECONFIG=${TMP}/kubeconfig kubectl delete machinedeployment ${NAME_PREFIX}-workers + KUBECONFIG=${TMP}/kubeconfig kubectl delete cluster ${NAME_PREFIX}" } trap cleanup EXIT @@ -22,7 +22,7 @@ trap cleanup EXIT sed "s/{{TAG}}/${TAG}/" ${PWD}/hack/test/manifests/${PLATFORM}-cluster.yaml > ${TMPPLATFORM}/cluster.yaml ## Download kustomize and template out capi cluster, then deploy it -e2e_run "kubectl apply -f ${TMPPLATFORM}/cluster.yaml" +e2e_run "KUBECONFIG=${TMP}/kubeconfig kubectl apply -f ${TMPPLATFORM}/cluster.yaml" ## Wait for talosconfig in cm then dump it out e2e_run "timeout=\$((\$(date +%s) + ${TIMEOUT})) @@ -30,7 +30,7 @@ e2e_run "timeout=\$((\$(date +%s) + ${TIMEOUT})) [[ \$(date +%s) -gt \$timeout ]] && exit 1 sleep 10 done - kubectl get cm -n ${CAPI_NS} ${NAME_PREFIX}-master-0 -o jsonpath='{.data.talosconfig}' > ${TALOSCONFIG}" + KUBECONFIG=${TMP}/kubeconfig kubectl get cm -n ${CAPI_NS} ${NAME_PREFIX}-master-0 -o jsonpath='{.data.talosconfig}' > ${TALOSCONFIG}" ## Wait for kubeconfig from capi master-0 e2e_run "timeout=\$((\$(date +%s) + ${TIMEOUT}))