From 4b1b14034c66955f21a7c7fdf881ac0b71d7d673 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Mon, 5 Jul 2021 19:04:20 +0200 Subject: [PATCH 01/45] simplify cuda guide --- docs/usage/guides/cuda.md | 67 +++++++---------- docs/usage/guides/cuda/Dockerfile | 47 ++++++++++++ docs/usage/guides/cuda/Dockerfile.base | 32 --------- docs/usage/guides/cuda/Dockerfile.k3d-gpu | 72 ------------------- docs/usage/guides/cuda/build.sh | 39 ++++------ ...{gpu.yaml => device-plugin-daemonset.yaml} | 0 6 files changed, 88 insertions(+), 169 deletions(-) create mode 100644 docs/usage/guides/cuda/Dockerfile delete mode 100644 docs/usage/guides/cuda/Dockerfile.base delete mode 100644 docs/usage/guides/cuda/Dockerfile.k3d-gpu rename docs/usage/guides/cuda/{gpu.yaml => device-plugin-daemonset.yaml} (100%) diff --git a/docs/usage/guides/cuda.md b/docs/usage/guides/cuda.md index 5c8cabaa..677ad5ac 100644 --- a/docs/usage/guides/cuda.md +++ b/docs/usage/guides/cuda.md @@ -11,22 +11,15 @@ To get the NVIDIA container runtime in the K3s image you need to build your own The native K3s image is based on Alpine but the NVIDIA container runtime is not supported on Alpine yet. To get around this we need to build the image with a supported base image. -### Dockerfiles - -[Dockerfile.base](cuda/Dockerfile.base): +### Dockerfile + +[Dockerfile](cuda/Dockerfile): ```Dockerfile -{% include "cuda/Dockerfile.base" %} - -``` - -[Dockerfile.k3d-gpu](cuda/Dockerfile.k3d-gpu): - -```Dockerfile -{% include "cuda/Dockerfile.k3d-gpu" %} +{% include "cuda/Dockerfile" %} ``` -These Dockerfiles are based on the [K3s Dockerfile](https://github.com/rancher/k3s/blob/master/package/Dockerfile) +This Dockerfile is based on the [K3s Dockerfile](https://github.com/rancher/k3s/blob/master/package/Dockerfile) The following changes are applied: 1. Change the base images to nvidia/cuda:11.2.0-base-ubuntu18.04 so the NVIDIA Container Runtime can be installed. The version of `cuda:xx.x.x` must match the one you're planning to use. @@ -50,7 +43,7 @@ To enable NVIDIA GPU support on Kubernetes you also need to install the [NVIDIA * Run GPU enabled containers in your Kubernetes cluster. ```yaml -{% include "cuda/gpu.yaml" %} +{% include "cuda/device-plugin-daemonset.yaml" %} ``` ### Build the K3s image @@ -59,20 +52,13 @@ To build the custom image we need to build K3s because we need the generated out Put the following files in a directory: -* [Dockerfile.base](cuda/Dockerfile.base) -* [Dockerfile.k3d-gpu](cuda/Dockerfile.k3d-gpu) +* [Dockerfile](cuda/Dockerfile) * [config.toml.tmpl](cuda/config.toml.tmpl) -* [gpu.yaml](cuda/gpu.yaml) +* [device-plugin-daemonset.yaml](cuda/device-plugin-daemonset.yaml) * [build.sh](cuda/build.sh) * [cuda-vector-add.yaml](cuda/cuda-vector-add.yaml) -The `build.sh` script is configured using exports & defaults to `v1.21.2+k3s1`. Please set your CI_REGISTRY_IMAGE! The script performs the following steps: - -* pulls K3s -* builds K3s -* build the custom K3D Docker image - -The resulting image is tagged as k3s-gpu:<version tag>. The version tag is the git tag but the '+' sign is replaced with a '-'. +The `build.sh` script is configured using exports & defaults to `v1.21.2+k3s1`. Please set at least the `IMAGE_REGISTRY` variable! The script performs the following steps builds the custom K3s image including the nvidia drivers. [build.sh](cuda/build.sh): @@ -80,28 +66,12 @@ The resulting image is tagged as k3s-gpu:<version tag>. The version tag is {% include "cuda/build.sh" %} ``` -## Run and test the custom image with Docker - -You can run a container based on the new image with Docker: - -```bash -docker run --name k3s-gpu -d --privileged --gpus all $CI_REGISTRY_IMAGE:$IMAGE_TAG -``` - -Deploy a [test pod](cuda/cuda-vector-add.yaml): - -```bash -docker cp cuda-vector-add.yaml k3s-gpu:/cuda-vector-add.yaml -docker exec k3s-gpu kubectl apply -f /cuda-vector-add.yaml -docker exec k3s-gpu kubectl logs cuda-vector-add -``` - ## Run and test the custom image with k3d -Tou can use the image with k3d: +You can use the image with k3d: ```bash -k3d cluster create local --image=$CI_REGISTRY_IMAGE:$IMAGE_TAG --gpus=1 +k3d cluster create gputest --image=$IMAGE --gpus=1 ``` Deploy a [test pod](cuda/cuda-vector-add.yaml): @@ -111,6 +81,21 @@ kubectl apply -f cuda-vector-add.yaml kubectl logs cuda-vector-add ``` +This should output something like the following: + +```bash +$ kubectl logs cuda-vector-add + +[Vector addition of 50000 elements] +Copy input data from the host memory to the CUDA device +CUDA kernel launch with 196 blocks of 256 threads +Copy output data from the CUDA device to the host memory +Test PASSED +Done +``` + +If the `cuda-vector-add` pod is stuck in `Pending` state, probably the device-driver daemonset didn't get deployed correctly from the auto-deploy manifests. In that case, you can apply it manually via `#!bash kubectl apply -f device-plugin-daemonset.yaml`. + ## Known issues * This approach does not work on WSL2 yet. The NVIDIA driver plugin and container runtime rely on the NVIDIA Management Library (NVML) which is not yet supported. See the [CUDA on WSL User Guide](https://docs.nvidia.com/cuda/wsl-user-guide/index.html#known-limitations). diff --git a/docs/usage/guides/cuda/Dockerfile b/docs/usage/guides/cuda/Dockerfile new file mode 100644 index 00000000..d17e8da0 --- /dev/null +++ b/docs/usage/guides/cuda/Dockerfile @@ -0,0 +1,47 @@ +ARG K3S_TAG="v1.21.2-k3s1" +FROM rancher/k3s:$K3S_TAG as k3s + +FROM nvidia/cuda:11.2.0-base-ubuntu18.04 + +ARG NVIDIA_CONTAINER_RUNTIME_VERSION +ENV NVIDIA_CONTAINER_RUNTIME_VERSION=$NVIDIA_CONTAINER_RUNTIME_VERSION + +RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections + +RUN apt-get update && \ + apt-get -y install gnupg2 curl + +# Install NVIDIA Container Runtime +RUN curl -s -L https://nvidia.github.io/nvidia-container-runtime/gpgkey | apt-key add - + +RUN curl -s -L https://nvidia.github.io/nvidia-container-runtime/ubuntu18.04/nvidia-container-runtime.list | tee /etc/apt/sources.list.d/nvidia-container-runtime.list + +RUN apt-get update && \ + apt-get -y install nvidia-container-runtime=${NVIDIA_CONTAINER_RUNTIME_VERSION} + +COPY --from=k3s / / + +RUN mkdir -p /etc && \ + echo 'hosts: files dns' > /etc/nsswitch.conf + +RUN chmod 1777 /tmp + +# Provide custom containerd configuration to configure the nvidia-container-runtime +RUN mkdir -p /var/lib/rancher/k3s/agent/etc/containerd/ + +COPY config.toml.tmpl /var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl + +# Deploy the nvidia driver plugin on startup +RUN mkdir -p /var/lib/rancher/k3s/server/manifests + +COPY device-plugin-daemonset.yaml /var/lib/rancher/k3s/server/manifests/nvidia-device-plugin-daemonset.yaml + +VOLUME /var/lib/kubelet +VOLUME /var/lib/rancher/k3s +VOLUME /var/lib/cni +VOLUME /var/log + +ENV PATH="$PATH:/bin/aux" + +ENTRYPOINT ["/bin/k3s"] +CMD ["agent"] \ No newline at end of file diff --git a/docs/usage/guides/cuda/Dockerfile.base b/docs/usage/guides/cuda/Dockerfile.base deleted file mode 100644 index 708c8735..00000000 --- a/docs/usage/guides/cuda/Dockerfile.base +++ /dev/null @@ -1,32 +0,0 @@ -FROM nvidia/cuda:11.2.0-base-ubuntu18.04 - -ENV DEBIAN_FRONTEND noninteractive - -ARG DOCKER_VERSION -ENV DOCKER_VERSION=$DOCKER_VERSION - -RUN set -x && \ - apt-get update && \ - apt-get install -y \ - apt-transport-https \ - ca-certificates \ - curl \ - wget \ - tar \ - zstd \ - gnupg \ - lsb-release \ - git \ - software-properties-common \ - build-essential && \ - rm -rf /var/lib/apt/lists/* - -RUN set -x && \ - curl -fsSL https://download.docker.com/linux/$(lsb_release -is | tr '[:upper:]' '[:lower:]')/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg && \ - echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/$(lsb_release -is | tr '[:upper:]' '[:lower:]') $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null && \ - apt-get update && \ - apt-get install -y \ - containerd.io \ - docker-ce=5:$DOCKER_VERSION~3-0~$(lsb_release -is | tr '[:upper:]' '[:lower:]')-$(lsb_release -cs) \ - docker-ce-cli=5:$DOCKER_VERSION~3-0~$(lsb_release -is | tr '[:upper:]' '[:lower:]')-$(lsb_release -cs) && \ - rm -rf /var/lib/apt/lists/* \ No newline at end of file diff --git a/docs/usage/guides/cuda/Dockerfile.k3d-gpu b/docs/usage/guides/cuda/Dockerfile.k3d-gpu deleted file mode 100644 index c78cbc5f..00000000 --- a/docs/usage/guides/cuda/Dockerfile.k3d-gpu +++ /dev/null @@ -1,72 +0,0 @@ -FROM nvidia/cuda:11.2.0-base-ubuntu18.04 as base - -RUN set -x && \ - apt-get update && \ - apt-get install -y ca-certificates zstd - -COPY k3s/build/out/data.tar.zst / - -RUN set -x && \ - mkdir -p /image/etc/ssl/certs /image/run /image/var/run /image/tmp /image/lib/modules /image/lib/firmware && \ - tar -I zstd -xf /data.tar.zst -C /image && \ - cp /etc/ssl/certs/ca-certificates.crt /image/etc/ssl/certs/ca-certificates.crt - -RUN set -x && \ - cd image/bin && \ - rm -f k3s && \ - ln -s k3s-server k3s - -FROM nvidia/cuda:11.2.0-base-ubuntu18.04 - -ARG NVIDIA_CONTAINER_RUNTIME_VERSION -ENV NVIDIA_CONTAINER_RUNTIME_VERSION=$NVIDIA_CONTAINER_RUNTIME_VERSION - -RUN set -x && \ - echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections - -RUN set -x && \ - apt-get update && \ - apt-get -y install gnupg2 curl - -# Install NVIDIA Container Runtime -RUN set -x && \ - curl -s -L https://nvidia.github.io/nvidia-container-runtime/gpgkey | apt-key add - - -RUN set -x && \ - curl -s -L https://nvidia.github.io/nvidia-container-runtime/ubuntu18.04/nvidia-container-runtime.list | tee /etc/apt/sources.list.d/nvidia-container-runtime.list - -RUN set -x && \ - apt-get update && \ - apt-get -y install nvidia-container-runtime=${NVIDIA_CONTAINER_RUNTIME_VERSION} - - -COPY --from=base /image / - -RUN set -x && \ - mkdir -p /etc && \ - echo 'hosts: files dns' > /etc/nsswitch.conf - -RUN set -x && \ - chmod 1777 /tmp - -# Provide custom containerd configuration to configure the nvidia-container-runtime -RUN set -x && \ - mkdir -p /var/lib/rancher/k3s/agent/etc/containerd/ - -COPY config.toml.tmpl /var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl - -# Deploy the nvidia driver plugin on startup -RUN set -x && \ - mkdir -p /var/lib/rancher/k3s/server/manifests - -COPY gpu.yaml /var/lib/rancher/k3s/server/manifests/gpu.yaml - -VOLUME /var/lib/kubelet -VOLUME /var/lib/rancher/k3s -VOLUME /var/lib/cni -VOLUME /var/log - -ENV PATH="$PATH:/bin/aux" - -ENTRYPOINT ["/bin/k3s"] -CMD ["agent"] \ No newline at end of file diff --git a/docs/usage/guides/cuda/build.sh b/docs/usage/guides/cuda/build.sh index b6288a5c..562601dc 100755 --- a/docs/usage/guides/cuda/build.sh +++ b/docs/usage/guides/cuda/build.sh @@ -1,30 +1,21 @@ #!/bin/bash -export CI_REGISTRY_IMAGE="YOUR_REGISTRY_IMAGE_URL" -export VERSION="1.0" -export K3S_TAG="v1.21.2+k3s1" -export DOCKER_VERSION="20.10.7" -export IMAGE_TAG="v1.21.2-k3s1" -export NVIDIA_CONTAINER_RUNTIME_VERSION="3.5.0-1" +set -euxo pipefail -docker build -f Dockerfile.base --build-arg DOCKER_VERSION=$DOCKER_VERSION -t $CI_REGISTRY_IMAGE/base:$VERSION . && \ -docker push $CI_REGISTRY_IMAGE/base:$VERSION +K3S_TAG=${K3S_TAG:="v1.21.2-k3s1"} # replace + with -, if needed +IMAGE_REGISTRY=${IMAGE_REGISTRY:="MY_REGISTRY"} +IMAGE_REPOSITORY=${IMAGE_REPOSITORY:="rancher/k3s"} +IMAGE_TAG="$K3S_TAG-cuda" +IMAGE=${IMAGE:="$IMAGE_REGISTRY/$IMAGE_REPOSITORY:$IMAGE_TAG"} -rm -rf ./k3s && \ -git clone --depth 1 https://github.com/rancher/k3s.git -b "$K3S_TAG" && \ -docker run -ti -v ${PWD}/k3s:/k3s -v /var/run/docker.sock:/var/run/docker.sock $CI_REGISTRY_IMAGE/base:1.0 sh -c "cd /k3s && make" && \ -ls -al k3s/build/out/data.tar.zst +NVIDIA_CONTAINER_RUNTIME_VERSION=${NVIDIA_CONTAINER_RUNTIME_VERSION:="3.5.0-1"} -if [ -f k3s/build/out/data.tar.zst ]; then - echo "File exists! Building!" - docker build -f Dockerfile.k3d-gpu \ - --build-arg NVIDIA_CONTAINER_RUNTIME_VERSION=$NVIDIA_CONTAINER_RUNTIME_VERSION\ - -t $CI_REGISTRY_IMAGE:$IMAGE_TAG . && \ - docker push $CI_REGISTRY_IMAGE:$IMAGE_TAG - echo "Done!" -else - echo "Error, file does not exist!" - exit 1 -fi +echo "IMAGE=$IMAGE" -docker build -t $CI_REGISTRY_IMAGE:$IMAGE_TAG . \ No newline at end of file +# due to some unknown reason, copying symlinks fails with buildkit enabled +DOCKER_BUILDKIT=0 docker build \ + --build-arg K3S_TAG=$K3S_TAG \ + --build-arg NVIDIA_CONTAINER_RUNTIME_VERSION=$NVIDIA_CONTAINER_RUNTIME_VERSION \ + -t $IMAGE . +docker push $IMAGE +echo "Done!" \ No newline at end of file diff --git a/docs/usage/guides/cuda/gpu.yaml b/docs/usage/guides/cuda/device-plugin-daemonset.yaml similarity index 100% rename from docs/usage/guides/cuda/gpu.yaml rename to docs/usage/guides/cuda/device-plugin-daemonset.yaml From c5f7884f787018e6538a68a790a670201e14e7a0 Mon Sep 17 00:00:00 2001 From: Thorsten Klein Date: Fri, 9 Jul 2021 08:22:05 +0200 Subject: [PATCH 02/45] [ENHANCEMENT] imageImport: start existing tools node before using it and add option to keep it around (#672) --- cmd/image/imageImport.go | 1 + pkg/tools/tools.go | 27 +++++++++++++++++---------- pkg/types/types.go | 3 ++- 3 files changed, 20 insertions(+), 11 deletions(-) diff --git a/cmd/image/imageImport.go b/cmd/image/imageImport.go index f2641be9..ff34345f 100644 --- a/cmd/image/imageImport.go +++ b/cmd/image/imageImport.go @@ -86,6 +86,7 @@ So if a file './rancher/k3d-tools' exists, k3d will try to import it instead of } cmd.Flags().BoolVarP(&loadImageOpts.KeepTar, "keep-tarball", "k", false, "Do not delete the tarball containing the saved images from the shared volume") + cmd.Flags().BoolVarP(&loadImageOpts.KeepToolsNode, "keep-tools", "t", false, "Do not delete the tools node after import") /* Subcommands */ diff --git a/pkg/tools/tools.go b/pkg/tools/tools.go index 8cf728a0..56866ed3 100644 --- a/pkg/tools/tools.go +++ b/pkg/tools/tools.go @@ -40,7 +40,7 @@ import ( // ImageImportIntoClusterMulti starts up a k3d tools container for the selected cluster and uses it to export // images from the runtime to import them into the nodes of the selected cluster -func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime, images []string, cluster *k3d.Cluster, loadImageOpts k3d.ImageImportOpts) error { +func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime, images []string, cluster *k3d.Cluster, opts k3d.ImageImportOpts) error { imagesFromRuntime, imagesFromTar, err := findImages(ctx, runtime, images) if err != nil { return err @@ -80,8 +80,8 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime, var toolsNode *k3d.Node toolsNode, err = runtime.GetNode(ctx, &k3d.Node{Name: fmt.Sprintf("%s-%s-tools", k3d.DefaultObjectNamePrefix, cluster.Name)}) if err != nil || toolsNode == nil { - log.Infoln("Starting k3d-tools node...") - toolsNode, err = startToolsNode( // TODO: re-use existing container + log.Infoln("Starting new tools node...") + toolsNode, err = runToolsNode( // TODO: re-use existing container ctx, runtime, cluster, @@ -91,7 +91,12 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime, fmt.Sprintf("%s:%s", runtime.GetRuntimePath(), runtime.GetRuntimePath()), }) if err != nil { - log.Errorf("Failed to start tools container for cluster '%s'", cluster.Name) + log.Errorf("Failed to run tools container for cluster '%s'", cluster.Name) + } + } else if !toolsNode.State.Running { + log.Infof("Starting existing tools node %s...", toolsNode.Name) + if err := runtime.StartNode(ctx, toolsNode); err != nil { + return fmt.Errorf("error starting existing tools node %s: %v", toolsNode.Name, err) } } @@ -151,7 +156,7 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime, importWaitgroup.Wait() // remove tarball - if !loadImageOpts.KeepTar && len(importTarNames) > 0 { + if !opts.KeepTar && len(importTarNames) > 0 { log.Infoln("Removing the tarball(s) from image volume...") if err := runtime.ExecInNode(ctx, toolsNode, []string{"rm", "-f", strings.Join(importTarNames, " ")}); err != nil { log.Errorf("Failed to delete one or more tarballs from '%+v'", importTarNames) @@ -160,9 +165,11 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime, } // delete tools container - log.Infoln("Removing k3d-tools node...") - if err := runtime.DeleteNode(ctx, toolsNode); err != nil { - log.Errorf("Failed to delete tools node '%s': Try to delete it manually", toolsNode.Name) + if !opts.KeepToolsNode { + log.Infoln("Removing k3d-tools node...") + if err := runtime.DeleteNode(ctx, toolsNode); err != nil { + log.Errorf("Failed to delete tools node '%s': Try to delete it manually", toolsNode.Name) + } } log.Infoln("Successfully imported image(s)") @@ -266,8 +273,8 @@ func containsVersionPart(imageTag string) bool { return strings.Contains(substringAfterSlash, ":") } -// startToolsNode will start a new k3d tools container and connect it to the network of the chosen cluster -func startToolsNode(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster, network string, volumes []string) (*k3d.Node, error) { +// runToolsNode will start a new k3d tools container and connect it to the network of the chosen cluster +func runToolsNode(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster, network string, volumes []string) (*k3d.Node, error) { labels := map[string]string{} for k, v := range k3d.DefaultObjectLabels { labels[k] = v diff --git a/pkg/types/types.go b/pkg/types/types.go index 832146e9..71da6d4d 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -253,7 +253,8 @@ type NodeHookAction interface { // ImageImportOpts describes a set of options one can set for loading image(s) into cluster(s) type ImageImportOpts struct { - KeepTar bool + KeepTar bool + KeepToolsNode bool } type IPAM struct { From a464bcdff227c3d0335b32da4eca692ca003a0bc Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Fri, 9 Jul 2021 08:34:59 +0200 Subject: [PATCH 03/45] v4.4.7: update changelog + docs --- CHANGELOG.md | 20 ++++ docgen/go.mod | 15 +-- docgen/go.sum | 120 +++++++++++++++++----- docs/usage/commands/k3d_cluster_create.md | 4 +- docs/usage/commands/k3d_image_import.md | 4 + 5 files changed, 123 insertions(+), 40 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 86cb9a96..6f9381cb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,25 @@ # Changelog +## v4.4.7 + +### Features / Enhancements + +- new flag: `k3d image import --keep-tools` to not delete the tools node container after importing the image(s) (#672) +- improve image name handling when importing images (#653, @cimnine) + - normalize image names internally, e.g. strip prefixes that docker adds, but that break the process + - see for more info + +### Fixes + +- Use default gateway, when bridge network doesn't have it (#666, @kuritka) +- Start an existing, but not running tools node to re-use it when importing an image (#672) + +### Misc + +- deps: switching back to upstream viper including the StringArray fix +- docs: reference to "nolar/setup-k3d-k3s" step for GitHub Actions (#668, @nolar) +- docs: updated and simplified CUDA guide (#662, @vainkop) (#669) + ## v4.4.6 ### Fixes diff --git a/docgen/go.mod b/docgen/go.mod index 8b20fcae..c321239c 100644 --- a/docgen/go.mod +++ b/docgen/go.mod @@ -7,26 +7,13 @@ require ( github.com/containerd/cgroups v0.0.0-20210414074453-680c246289fb // indirect github.com/containerd/containerd v1.5.0-rc.1 // indirect github.com/containerd/continuity v0.0.0-20210315143101-93e15499afd5 // indirect - github.com/docker/cli v20.10.6+incompatible // indirect - github.com/docker/docker v20.10.6+incompatible // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/imdario/mergo v0.3.12 // indirect - github.com/magiconair/properties v1.8.5 // indirect - github.com/mitchellh/mapstructure v1.4.1 // indirect - github.com/pelletier/go-toml v1.9.0 // indirect - github.com/rancher/k3d/v4 v4.4.1 - github.com/spf13/afero v1.6.0 // indirect + github.com/rancher/k3d/v4 v4.4.7-0.20210709062205-c5f7884f7870 github.com/spf13/cobra v1.1.3 golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1 // indirect golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78 // indirect - golang.org/x/sys v0.0.0-20210414055047-fe65e336abe0 // indirect golang.org/x/term v0.0.0-20210406210042-72f3dc4e9b72 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20210413151531-c14fb6ef47c3 // indirect - google.golang.org/grpc v1.37.0 // indirect - gopkg.in/ini.v1 v1.62.0 // indirect - k8s.io/client-go v0.21.0 // indirect k8s.io/utils v0.0.0-20210305010621-2afb4311ab10 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.1.1 // indirect ) diff --git a/docgen/go.sum b/docgen/go.sum index 4cb02b63..b7f7d914 100644 --- a/docgen/go.sum +++ b/docgen/go.sum @@ -14,6 +14,11 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -80,6 +85,7 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -93,6 +99,7 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= @@ -114,6 +121,7 @@ github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= @@ -206,6 +214,7 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= @@ -227,7 +236,6 @@ github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11 github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/docker/cli v20.10.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v20.10.6+incompatible h1:LAyI6Lnwv+AUjtp2ZyN1lxqXBtkeFUqm4H7CZMWZuP8= github.com/docker/cli v20.10.6+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20171011171712-7484e51bf6af/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= @@ -235,7 +243,6 @@ github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TT github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v20.10.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.6+incompatible h1:oXI3Vas8TI8Eu/EjH4srKHJBVqraSzJybhxY7Om9faQ= github.com/docker/docker v20.10.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= @@ -253,13 +260,14 @@ github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dvyukov/go-fuzz v0.0.0-20201127111758-49e582c6c23d/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= +github.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= @@ -318,6 +326,7 @@ github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblf github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -343,6 +352,7 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -358,6 +368,7 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= @@ -397,6 +408,7 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -404,6 +416,10 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -427,6 +443,7 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -455,6 +472,7 @@ github.com/heroku/docker-registry-client v0.0.0-20190909225348-afc9e1acc3d5 h1:6 github.com/heroku/docker-registry-client v0.0.0-20190909225348-afc9e1acc3d5/go.mod h1:Yho0S7KhsnHQRCC5lDraYF1SsLMeWtf/tKdufKu3TJA= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= @@ -469,8 +487,9 @@ github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= @@ -608,10 +627,9 @@ github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3 github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= -github.com/pelletier/go-toml v1.9.0 h1:NOd0BRdOKpPf0SxkL3HxSQOG7rNh+4kl6PHcBPFs7Q0= -github.com/pelletier/go-toml v1.9.0/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -653,15 +671,14 @@ github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= -github.com/rancher/k3d/v4 v4.4.1 h1:GW6obnThywS7tX3SV1IwN/RTMiDP+e8gQCkZWWclPPw= -github.com/rancher/k3d/v4 v4.4.1/go.mod h1:kgiNgM6HO/yYo3puVtJZZtJ8mSpYvbcoaut4sSj1RNo= +github.com/rancher/k3d/v4 v4.4.7-0.20210709062205-c5f7884f7870 h1:G+QYoXAR02hyJiPv4GnxWFBI92/HkA65QRVi+SCNgmk= +github.com/rancher/k3d/v4 v4.4.7-0.20210709062205-c5f7884f7870/go.mod h1:Cr4a6z5rTg/C+JwbT7OtWQedzswBlfRfLljYnbesoGE= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= @@ -720,8 +737,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= -github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -776,6 +793,7 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= @@ -783,18 +801,25 @@ go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go4.org/intern v0.0.0-20210108033219-3eb7198706b2 h1:VFTf+jjIgsldaz/Mr00VaCSswHJrI2hIjQygE/W4IMg= go4.org/intern v0.0.0-20210108033219-3eb7198706b2/go.mod h1:vLqJ+12kCw61iCWsPto0EOHhBS+o4rO5VIucbc9g2Cc= go4.org/unsafe/assume-no-moving-gc v0.0.0-20201222175341-b30ae309168e/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= @@ -840,6 +865,7 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -848,6 +874,9 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -890,11 +919,15 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1 h1:4qWs8cYYH6PoEFy4dfhDFgoMGkwAcETd+MmPdCPMzUc= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -902,6 +935,13 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78 h1:rPRtHfUb0UKZeZ6GH4K4Nt4YRbE9V1u+QZX5upZXqJQ= golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -974,6 +1014,7 @@ golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -983,16 +1024,20 @@ golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210414055047-fe65e336abe0 h1:g9s1Ppvvun/fI+BptTMj909BBIcGrzQ32k9FNlcevOE= -golang.org/x/sys v0.0.0-20210414055047-fe65e336abe0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1069,8 +1114,14 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1093,6 +1144,12 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1127,16 +1184,26 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210413151531-c14fb6ef47c3 h1:K+7Ig5hjiLVA/i1UFUUbCGimWz5/Ey0lAQjT3QiLaPY= -google.golang.org/genproto v0.0.0-20210413151531-c14fb6ef47c3/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1153,11 +1220,15 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1186,7 +1257,6 @@ gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKW gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.58.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= @@ -1198,14 +1268,16 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= @@ -1218,8 +1290,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -inet.af/netaddr v0.0.0-20210403172118-1e1430f727e0 h1:ANl7piXB3SHmhwTNeTO0yl0yf4gO3/aaFjcBCdH9Ftg= -inet.af/netaddr v0.0.0-20210403172118-1e1430f727e0/go.mod h1:I2i9ONCXRZDnG1+7O8fSuYzjcPxHQXrIfzD/IkR87x4= +inet.af/netaddr v0.0.0-20210421205553-78c777480f22 h1:TX8hopxzHycFVkIsvu6DSpCWUCqDqOvyyPj/5IK1fUQ= +inet.af/netaddr v0.0.0-20210421205553-78c777480f22/go.mod h1:z0nx+Dh+7N7CC8V5ayHtHGpZpxLQZZxkIaaz6HN65Ls= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.21.0 h1:gu5iGF4V6tfVCQ/R+8Hc0h7H1JuEhzyEi9S4R5LM8+Y= diff --git a/docs/usage/commands/k3d_cluster_create.md b/docs/usage/commands/k3d_cluster_create.md index ff4ea16b..7ef87cfb 100644 --- a/docs/usage/commands/k3d_cluster_create.md +++ b/docs/usage/commands/k3d_cluster_create.md @@ -25,7 +25,7 @@ k3d cluster create NAME [flags] - Example: `k3d cluster create --servers 3 --api-port 0.0.0.0:6550` -c, --config string Path of a config file to use -e, --env KEY[=VALUE][@NODEFILTER[;NODEFILTER...]] Add environment variables to nodes (Format: KEY[=VALUE][@NODEFILTER[;NODEFILTER...]] - - Example: `k3d cluster create --agents 2 -e "HTTP_PROXY=my.proxy.com" -e "SOME_KEY=SOME_VAL@server[0]"` + - Example: `k3d cluster create --agents 2 -e "HTTP_PROXY=my.proxy.com@server[0]" -e "SOME_KEY=SOME_VAL@server[0]"` --gpus string GPU devices to add to the cluster node containers ('all' to pass all GPUs) [From docker] -h, --help help for create -i, --image string Specify k3s image that you want to use for the nodes @@ -34,7 +34,7 @@ k3d cluster create NAME [flags] --kubeconfig-switch-context Directly switch the default kubeconfig's current-context to the new cluster's context (requires --kubeconfig-update-default) (default true) --kubeconfig-update-default Directly update the default kubeconfig with the new cluster's context (default true) -l, --label KEY[=VALUE][@NODEFILTER[;NODEFILTER...]] Add label to node container (Format: KEY[=VALUE][@NODEFILTER[;NODEFILTER...]] - - Example: `k3d cluster create --agents 2 -l "my.label@agent[0,1]" -v "other.label=somevalue@server[0]"` + - Example: `k3d cluster create --agents 2 -l "my.label@agent[0,1]" -l "other.label=somevalue@server[0]"` --network string Join an existing network --no-hostip Disable the automatic injection of the Host IP as 'host.k3d.internal' into the containers and CoreDNS --no-image-volume Disable the creation of a volume for importing images diff --git a/docs/usage/commands/k3d_image_import.md b/docs/usage/commands/k3d_image_import.md index cb535dbe..c11dada6 100644 --- a/docs/usage/commands/k3d_image_import.md +++ b/docs/usage/commands/k3d_image_import.md @@ -9,6 +9,9 @@ Import image(s) from docker into k3d cluster(s). If an IMAGE starts with the prefix 'docker.io/', then this prefix is stripped internally. That is, 'docker.io/rancher/k3d-tools:latest' is treated as 'rancher/k3d-tools:latest'. +If an IMAGE starts with the prefix 'library/' (or 'docker.io/library/'), then this prefix is stripped internally. +That is, 'library/busybox:latest' (or 'docker.io/library/busybox:latest') are treated as 'busybox:latest'. + If an IMAGE does not have a version tag, then ':latest' is assumed. That is, 'rancher/k3d-tools' is treated as 'rancher/k3d-tools:latest'. @@ -25,6 +28,7 @@ k3d image import [IMAGE | ARCHIVE [IMAGE | ARCHIVE...]] [flags] -c, --cluster stringArray Select clusters to load the image to. (default [k3s-default]) -h, --help help for import -k, --keep-tarball Do not delete the tarball containing the saved images from the shared volume + -t, --keep-tools Do not delete the tools node after import ``` ### Options inherited from parent commands From 296f24c9b76f1ae118dd3cd7828c95e4497a2170 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Fri, 14 May 2021 13:55:11 +0200 Subject: [PATCH 04/45] Revert 'Revert "add --node-label flag for node create command (#584, @developer-guy, @ejose, @dentrax)"' This reverts commit 70872648b345af93380653a357cc5c215614fb7c. --- cmd/node/nodeCreate.go | 23 +++++++++++++++++++++-- pkg/client/node.go | 5 +++++ pkg/types/types.go | 39 ++++++++++++++++++++------------------- 3 files changed, 46 insertions(+), 21 deletions(-) diff --git a/cmd/node/nodeCreate.go b/cmd/node/nodeCreate.go index 52f255d5..cc7c2b10 100644 --- a/cmd/node/nodeCreate.go +++ b/cmd/node/nodeCreate.go @@ -23,6 +23,7 @@ package node import ( "fmt" + "strings" "time" "github.com/spf13/cobra" @@ -73,6 +74,8 @@ func NewCmdNodeCreate() *cobra.Command { cmd.Flags().BoolVar(&createNodeOpts.Wait, "wait", false, "Wait for the node(s) to be ready before returning.") cmd.Flags().DurationVar(&createNodeOpts.Timeout, "timeout", 0*time.Second, "Maximum waiting time for '--wait' before canceling/returning.") + cmd.Flags().StringSliceP("k3s-node-label", "", []string{}, "Specify k3s node labels in format \"foo=bar\"") + // done return cmd } @@ -124,6 +127,21 @@ func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, *k3d.Cl log.Errorf("Provided memory limit value is invalid") } + k3sNodeLabelsFlag, err := cmd.Flags().GetStringSlice("k3s-node-label") + if err != nil { + log.Errorln("No node-label specified") + log.Fatalln(err) + } + + k3sNodeLabels := make(map[string]string, len(k3sNodeLabelsFlag)) + for _, label := range k3sNodeLabelsFlag { + labelSplitted := strings.Split(label, "=") + if len(labelSplitted) != 2 { + log.Fatalf("unknown label format format: %s, use format \"foo=bar\"", label) + } + k3sNodeLabels[labelSplitted[0]] = labelSplitted[1] + } + // generate list of nodes nodes := []*k3d.Node{} for i := 0; i < replicas; i++ { @@ -134,8 +152,9 @@ func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, *k3d.Cl Labels: map[string]string{ k3d.LabelRole: roleStr, }, - Restart: true, - Memory: memory, + K3sNodeLabels: k3sNodeLabels, + Restart: true, + Memory: memory, } nodes = append(nodes, node) } diff --git a/pkg/client/node.go b/pkg/client/node.go index c59dbc7b..ebbf28f0 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -492,6 +492,11 @@ func patchAgentSpec(node *k3d.Node) error { if node.Cmd == nil { node.Cmd = []string{"agent"} } + + for k, v := range node.K3sNodeLabels { + node.Args = append(node.Args, "--node-label", fmt.Sprintf("%s=%s", k, v)) + } + return nil } diff --git a/pkg/types/types.go b/pkg/types/types.go index 71da6d4d..31675511 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -331,25 +331,26 @@ type NodeIP struct { // Node describes a k3d node type Node struct { - Name string `yaml:"name" json:"name,omitempty"` - Role Role `yaml:"role" json:"role,omitempty"` - Image string `yaml:"image" json:"image,omitempty"` - Volumes []string `yaml:"volumes" json:"volumes,omitempty"` - Env []string `yaml:"env" json:"env,omitempty"` - Cmd []string // filled automatically based on role - Args []string `yaml:"extraArgs" json:"extraArgs,omitempty"` - Ports nat.PortMap `yaml:"portMappings" json:"portMappings,omitempty"` - Restart bool `yaml:"restart" json:"restart,omitempty"` - Created string `yaml:"created" json:"created,omitempty"` - Labels map[string]string // filled automatically - Networks []string // filled automatically - ExtraHosts []string // filled automatically - ServerOpts ServerOpts `yaml:"serverOpts" json:"serverOpts,omitempty"` - AgentOpts AgentOpts `yaml:"agentOpts" json:"agentOpts,omitempty"` - GPURequest string // filled automatically - Memory string // filled automatically - State NodeState // filled automatically - IP NodeIP // filled automatically + Name string `yaml:"name" json:"name,omitempty"` + Role Role `yaml:"role" json:"role,omitempty"` + Image string `yaml:"image" json:"image,omitempty"` + Volumes []string `yaml:"volumes" json:"volumes,omitempty"` + Env []string `yaml:"env" json:"env,omitempty"` + Cmd []string // filled automatically based on role + Args []string `yaml:"extraArgs" json:"extraArgs,omitempty"` + Ports nat.PortMap `yaml:"portMappings" json:"portMappings,omitempty"` + Restart bool `yaml:"restart" json:"restart,omitempty"` + Created string `yaml:"created" json:"created,omitempty"` + Labels map[string]string // filled automatically + K3sNodeLabels map[string]string `yaml:"k3sNodeLabels" json:"k3sNodeLabels,omitempty"` + Networks []string // filled automatically + ExtraHosts []string // filled automatically + ServerOpts ServerOpts `yaml:"serverOpts" json:"serverOpts,omitempty"` + AgentOpts AgentOpts `yaml:"agentOpts" json:"agentOpts,omitempty"` + GPURequest string // filled automatically + Memory string // filled automatically + State NodeState // filled automatically + IP NodeIP // filled automatically } // ServerOpts describes some additional server role specific opts From 261ac0faf4c51f68ae24bb723bd44fa91b056555 Mon Sep 17 00:00:00 2001 From: Thorsten Klein Date: Fri, 14 May 2021 14:01:25 +0200 Subject: [PATCH 05/45] [BREAKING] Config File Enhancements: v1alpha3, migrations, generic k3s-args (#605) Excerpt: - new version v1alpha3 with k3s extraArgs using node filters - reflected in CLI via --k3s-arg - new migration option to migrate (internally and via cli) from v1alpha2 to v1alpha3 - enhancements to how config files are being read - e2e tests for config file migration --- cmd/cluster/clusterCreate.go | 86 ++++-- cmd/config/config.go | 1 + cmd/config/configInit.go | 2 +- cmd/config/configMigrate.go | 111 ++++++++ docgen/README.md | 2 + docs/usage/commands/k3d_cluster_create.md | 4 +- docs/usage/commands/k3d_config.md | 1 + .../{k3d_docgen.md => k3d_config_migrate.md} | 10 +- docs/usage/commands/k3d_node_create.md | 17 +- docs/usage/configfile.md | 15 +- docs/usage/guides/registries.md | 2 +- pkg/client/cluster.go | 2 +- pkg/config/config.go | 73 +++-- pkg/config/config_test.go | 27 +- pkg/config/jsonschema_test.go | 6 +- pkg/config/merge.go | 2 +- pkg/config/merge_test.go | 9 +- pkg/config/migrate.go | 40 +++ pkg/config/process.go | 2 +- pkg/config/process_test.go | 5 +- .../test_assets/config_test_cluster.yaml | 2 +- .../test_assets/config_test_cluster_list.yaml | 2 +- .../test_assets/config_test_simple.yaml | 13 +- .../test_assets/config_test_simple_2.yaml | 2 +- .../config_test_simple_invalid_servers.yaml | 9 +- .../test_assets/config_test_unknown.yaml | 2 +- pkg/config/transform.go | 22 +- pkg/config/transform_test.go | 5 +- pkg/config/types/types.go | 34 +++ pkg/config/v1alpha2/types.go | 99 ++++--- pkg/config/v1alpha3/migrations.go | 84 ++++++ pkg/config/v1alpha3/schema.json | 254 ++++++++++++++++++ pkg/config/v1alpha3/types.go | 203 ++++++++++++++ pkg/config/validate.go | 2 +- pkg/config/validate_test.go | 2 +- pkg/types/types.go | 2 - tests/assets/config_test_simple.yaml | 9 +- ...config_test_simple_migration_v1alpha2.yaml | 51 ++++ ...config_test_simple_migration_v1alpha3.yaml | 52 ++++ tests/test_config_file_migration.sh | 27 ++ 40 files changed, 1129 insertions(+), 164 deletions(-) create mode 100644 cmd/config/configMigrate.go rename docs/usage/commands/{k3d_docgen.md => k3d_config_migrate.md} (63%) create mode 100644 pkg/config/migrate.go create mode 100644 pkg/config/types/types.go create mode 100644 pkg/config/v1alpha3/migrations.go create mode 100644 pkg/config/v1alpha3/schema.json create mode 100644 pkg/config/v1alpha3/types.go create mode 100755 tests/assets/config_test_simple_migration_v1alpha2.yaml create mode 100755 tests/assets/config_test_simple_migration_v1alpha3.yaml create mode 100755 tests/test_config_file_migration.sh diff --git a/cmd/cluster/clusterCreate.go b/cmd/cluster/clusterCreate.go index f5fc4af5..574db83d 100644 --- a/cmd/cluster/clusterCreate.go +++ b/cmd/cluster/clusterCreate.go @@ -38,7 +38,7 @@ import ( cliutil "github.com/rancher/k3d/v4/cmd/util" k3dCluster "github.com/rancher/k3d/v4/pkg/client" "github.com/rancher/k3d/v4/pkg/config" - conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" "github.com/rancher/k3d/v4/pkg/runtimes" k3d "github.com/rancher/k3d/v4/pkg/types" "github.com/rancher/k3d/v4/version" @@ -77,11 +77,6 @@ func initConfig() { if _, err := os.Stat(configFile); err != nil { log.Fatalf("Failed to stat config file %s: %+v", configFile, err) } - log.Tracef("Schema: %+v", conf.JSONSchema) - - if err := config.ValidateSchemaFile(configFile, []byte(conf.JSONSchema)); err != nil { - log.Fatalf("Schema Validation failed for config file %s: %+v", configFile, err) - } // try to read config into memory (viper map structure) if err := cfgViper.ReadInConfig(); err != nil { @@ -92,7 +87,16 @@ func initConfig() { log.Fatalf("Failed to read config file %s: %+v", configFile, err) } - log.Infof("Using config file %s", cfgViper.ConfigFileUsed()) + schema, err := config.GetSchemaByVersion(cfgViper.GetString("apiVersion")) + if err != nil { + log.Fatalf("Cannot validate config file %s: %+v", configFile, err) + } + + if err := config.ValidateSchemaFile(configFile, schema); err != nil { + log.Fatalf("Schema Validation failed for config file %s: %+v", configFile, err) + } + + log.Infof("Using config file %s (%s#%s)", cfgViper.ConfigFileUsed(), strings.ToLower(cfgViper.GetString("apiVersion")), strings.ToLower(cfgViper.GetString("kind"))) } if log.GetLevel() >= log.DebugLevel { c, _ := yaml.Marshal(cfgViper.AllSettings()) @@ -121,19 +125,35 @@ func NewCmdClusterCreate() *cobra.Command { /************************* * Compute Configuration * *************************/ - cfg, err := config.FromViperSimple(cfgViper) + if cfgViper.GetString("apiversion") == "" { + cfgViper.Set("apiversion", config.DefaultConfigApiVersion) + } + if cfgViper.GetString("kind") == "" { + cfgViper.Set("kind", "Simple") + } + cfg, err := config.FromViper(cfgViper) if err != nil { log.Fatalln(err) } - log.Debugf("========== Simple Config ==========\n%+v\n==========================\n", cfg) + if cfg.GetAPIVersion() != config.DefaultConfigApiVersion { + log.Warnf("Default config apiVersion is '%s', but you're using '%s': consider migrating.", config.DefaultConfigApiVersion, cfg.GetAPIVersion()) + cfg, err = config.Migrate(cfg, config.DefaultConfigApiVersion) + if err != nil { + log.Fatalln(err) + } + } - cfg, err = applyCLIOverrides(cfg) + simpleCfg := cfg.(conf.SimpleConfig) + + log.Debugf("========== Simple Config ==========\n%+v\n==========================\n", simpleCfg) + + simpleCfg, err = applyCLIOverrides(simpleCfg) if err != nil { log.Fatalf("Failed to apply CLI overrides: %+v", err) } - log.Debugf("========== Merged Simple Config ==========\n%+v\n==========================\n", cfg) + log.Debugf("========== Merged Simple Config ==========\n%+v\n==========================\n", simpleCfg) /************************************** * Transform, Process & Validate Configuration * @@ -141,10 +161,10 @@ func NewCmdClusterCreate() *cobra.Command { // Set the name if len(args) != 0 { - cfg.Name = args[0] + simpleCfg.Name = args[0] } - clusterConfig, err := config.TransformSimpleToClusterConfig(cmd.Context(), runtimes.SelectedRuntime, cfg) + clusterConfig, err := config.TransformSimpleToClusterConfig(cmd.Context(), runtimes.SelectedRuntime, simpleCfg) if err != nil { log.Fatalln(err) } @@ -178,7 +198,7 @@ func NewCmdClusterCreate() *cobra.Command { if err := k3dCluster.ClusterRun(cmd.Context(), runtimes.SelectedRuntime, clusterConfig); err != nil { // rollback if creation failed log.Errorln(err) - if cfg.Options.K3dOptions.NoRollback { // TODO: move rollback mechanics to pkg/ + if simpleCfg.Options.K3dOptions.NoRollback { // TODO: move rollback mechanics to pkg/ log.Fatalln("Cluster creation FAILED, rollback deactivated.") } // rollback if creation failed @@ -202,7 +222,7 @@ func NewCmdClusterCreate() *cobra.Command { if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig { log.Debugf("Updating default kubeconfig with a new context for cluster %s", clusterConfig.Cluster.Name) - if _, err := k3dCluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster, "", &k3dCluster.WriteKubeConfigOptions{UpdateExisting: true, OverwriteExisting: false, UpdateCurrentContext: cfg.Options.KubeconfigOptions.SwitchCurrentContext}); err != nil { + if _, err := k3dCluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster, "", &k3dCluster.WriteKubeConfigOptions{UpdateExisting: true, OverwriteExisting: false, UpdateCurrentContext: simpleCfg.Options.KubeconfigOptions.SwitchCurrentContext}); err != nil { log.Warningln(err) } } @@ -266,6 +286,10 @@ func NewCmdClusterCreate() *cobra.Command { cmd.Flags().StringArrayP("label", "l", nil, "Add label to node container (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -l \"my.label@agent[0,1]\" -l \"other.label=somevalue@server[0]\"`") _ = ppViper.BindPFlag("cli.labels", cmd.Flags().Lookup("label")) + /* k3s */ + cmd.Flags().StringArray("k3s-arg", nil, "Additional args passed to k3s command (Format: `ARG@NODEFILTER[;@NODEFILTER]`)\n - Example: `k3d cluster create --k3s-arg \"--disable=traefik@server[0]\"") + _ = cfgViper.BindPFlag("cli.k3sargs", cmd.Flags().Lookup("k3s-arg")) + /****************** * "Normal" Flags * ****************** @@ -340,13 +364,6 @@ func NewCmdClusterCreate() *cobra.Command { cmd.Flags().String("registry-config", "", "Specify path to an extra registries.yaml file") _ = cfgViper.BindPFlag("registries.config", cmd.Flags().Lookup("registry-config")) - /* k3s */ - cmd.Flags().StringArray("k3s-server-arg", nil, "Additional args passed to the `k3s server` command on server nodes (new flag per arg)") - _ = cfgViper.BindPFlag("options.k3s.extraserverargs", cmd.Flags().Lookup("k3s-server-arg")) - - cmd.Flags().StringArray("k3s-agent-arg", nil, "Additional args passed to the `k3s agent` command on agent nodes (new flag per arg)") - _ = cfgViper.BindPFlag("options.k3s.extraagentargs", cmd.Flags().Lookup("k3s-agent-arg")) - /* Subcommands */ // done @@ -520,5 +537,30 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) { log.Tracef("EnvFilterMap: %+v", envFilterMap) + // --k3s-arg + argFilterMap := make(map[string][]string, 1) + for _, argFlag := range ppViper.GetStringSlice("cli.k3sargs") { + + // split node filter from the specified arg + arg, filters, err := cliutil.SplitFiltersFromFlag(argFlag) + if err != nil { + log.Fatalln(err) + } + + // create new entry or append filter to existing entry + if _, exists := argFilterMap[arg]; exists { + argFilterMap[arg] = append(argFilterMap[arg], filters...) + } else { + argFilterMap[arg] = filters + } + } + + for arg, nodeFilters := range argFilterMap { + cfg.Options.K3sOptions.ExtraArgs = append(cfg.Options.K3sOptions.ExtraArgs, conf.K3sArgWithNodeFilters{ + Arg: arg, + NodeFilters: nodeFilters, + }) + } + return cfg, nil } diff --git a/cmd/config/config.go b/cmd/config/config.go index 2a49986a..7156c576 100644 --- a/cmd/config/config.go +++ b/cmd/config/config.go @@ -41,6 +41,7 @@ func NewCmdConfig() *cobra.Command { } cmd.AddCommand(NewCmdConfigInit()) + cmd.AddCommand(NewCmdConfigMigrate()) return cmd } diff --git a/cmd/config/configInit.go b/cmd/config/configInit.go index 34b27b91..b85b754b 100644 --- a/cmd/config/configInit.go +++ b/cmd/config/configInit.go @@ -25,7 +25,7 @@ import ( "fmt" "os" - config "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + config "github.com/rancher/k3d/v4/pkg/config/v1alpha3" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) diff --git a/cmd/config/configMigrate.go b/cmd/config/configMigrate.go new file mode 100644 index 00000000..e18b19bc --- /dev/null +++ b/cmd/config/configMigrate.go @@ -0,0 +1,111 @@ +/* +Copyright © 2020 The k3d Author(s) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ +package config + +import ( + "os" + "strings" + + "github.com/rancher/k3d/v4/pkg/config" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "gopkg.in/yaml.v2" +) + +// NewCmdConfigMigrate returns a new cobra command +func NewCmdConfigMigrate() *cobra.Command { + + cmd := &cobra.Command{ + Use: "migrate INPUT OUTPUT", + Args: cobra.RangeArgs(1, 2), + Run: func(cmd *cobra.Command, args []string) { + + configFile := args[0] + + if _, err := os.Stat(configFile); err != nil { + log.Fatalf("Failed to stat config file %s: %+v", configFile, err) + } + + cfgViper := viper.New() + cfgViper.SetConfigType("yaml") + + cfgViper.SetConfigFile(configFile) + + // try to read config into memory (viper map structure) + if err := cfgViper.ReadInConfig(); err != nil { + if _, ok := err.(viper.ConfigFileNotFoundError); ok { + log.Fatalf("Config file %s not found: %+v", configFile, err) + } + // config file found but some other error happened + log.Fatalf("Failed to read config file %s: %+v", configFile, err) + } + + schema, err := config.GetSchemaByVersion(cfgViper.GetString("apiVersion")) + if err != nil { + log.Fatalf("Cannot validate config file %s: %+v", configFile, err) + } + + if err := config.ValidateSchemaFile(configFile, schema); err != nil { + log.Fatalf("Schema Validation failed for config file %s: %+v", configFile, err) + } + + log.Infof("Using config file %s (%s#%s)", cfgViper.ConfigFileUsed(), strings.ToLower(cfgViper.GetString("apiVersion")), strings.ToLower(cfgViper.GetString("kind"))) + + cfg, err := config.FromViper(cfgViper) + if err != nil { + log.Fatalln(err) + } + + if cfg.GetAPIVersion() != config.DefaultConfigApiVersion { + cfg, err = config.Migrate(cfg, config.DefaultConfigApiVersion) + if err != nil { + log.Fatalln(err) + } + } + + yamlout, err := yaml.Marshal(cfg) + if err != nil { + log.Fatalln(err) + } + + output := "-" + + if len(args) > 1 { + output = args[1] + } + + if output == "-" { + if _, err := os.Stdout.Write(yamlout); err != nil { + log.Fatalln(err) + } + } else { + if err := os.WriteFile(output, yamlout, os.ModeAppend); err != nil { + log.Fatalln(err) + } + } + + }, + } + + return cmd +} diff --git a/docgen/README.md b/docgen/README.md index b65264ef..26f1be62 100644 --- a/docgen/README.md +++ b/docgen/README.md @@ -6,6 +6,8 @@ The code will output files in [`../docs/usage/commands/`](../docs/usage/commands ## Run +- may required a `replace github.com/rancher/k3d/v4 => PATH/TO/LOCAL/REPO` in the `go.mod` + ```bash # ensure that you're in the docgen dir, as the relative path to the docs/ dir is hardcoded cd docgen diff --git a/docs/usage/commands/k3d_cluster_create.md b/docs/usage/commands/k3d_cluster_create.md index 7ef87cfb..f784462d 100644 --- a/docs/usage/commands/k3d_cluster_create.md +++ b/docs/usage/commands/k3d_cluster_create.md @@ -29,8 +29,8 @@ k3d cluster create NAME [flags] --gpus string GPU devices to add to the cluster node containers ('all' to pass all GPUs) [From docker] -h, --help help for create -i, --image string Specify k3s image that you want to use for the nodes - --k3s-agent-arg k3s agent Additional args passed to the k3s agent command on agent nodes (new flag per arg) - --k3s-server-arg k3s server Additional args passed to the k3s server command on server nodes (new flag per arg) + --k3s-arg ARG@NODEFILTER[;@NODEFILTER] Additional args passed to k3s command (Format: ARG@NODEFILTER[;@NODEFILTER]) + - Example: `k3d cluster create --k3s-arg "--disable=traefik@server[0]" --kubeconfig-switch-context Directly switch the default kubeconfig's current-context to the new cluster's context (requires --kubeconfig-update-default) (default true) --kubeconfig-update-default Directly update the default kubeconfig with the new cluster's context (default true) -l, --label KEY[=VALUE][@NODEFILTER[;NODEFILTER...]] Add label to node container (Format: KEY[=VALUE][@NODEFILTER[;NODEFILTER...]] diff --git a/docs/usage/commands/k3d_config.md b/docs/usage/commands/k3d_config.md index c42923e5..8f8f076e 100644 --- a/docs/usage/commands/k3d_config.md +++ b/docs/usage/commands/k3d_config.md @@ -28,4 +28,5 @@ k3d config [flags] * [k3d](k3d.md) - https://k3d.io/ -> Run k3s in Docker! * [k3d config init](k3d_config_init.md) - +* [k3d config migrate](k3d_config_migrate.md) - diff --git a/docs/usage/commands/k3d_docgen.md b/docs/usage/commands/k3d_config_migrate.md similarity index 63% rename from docs/usage/commands/k3d_docgen.md rename to docs/usage/commands/k3d_config_migrate.md index 4767bdb6..902687ad 100644 --- a/docs/usage/commands/k3d_docgen.md +++ b/docs/usage/commands/k3d_config_migrate.md @@ -1,15 +1,15 @@ -## k3d docgen +## k3d config migrate + -Generate command docs ``` -k3d docgen [flags] +k3d config migrate INPUT OUTPUT [flags] ``` ### Options ``` - -h, --help help for docgen + -h, --help help for migrate ``` ### Options inherited from parent commands @@ -22,5 +22,5 @@ k3d docgen [flags] ### SEE ALSO -* [k3d](k3d.md) - https://k3d.io/ -> Run k3s in Docker! +* [k3d config](k3d_config.md) - Work with config file(s) diff --git a/docs/usage/commands/k3d_node_create.md b/docs/usage/commands/k3d_node_create.md index 041bb5cb..808cbe13 100644 --- a/docs/usage/commands/k3d_node_create.md +++ b/docs/usage/commands/k3d_node_create.md @@ -13,14 +13,15 @@ k3d node create NAME [flags] ### Options ``` - -c, --cluster string Select the cluster that the node shall connect to. (default "k3s-default") - -h, --help help for create - -i, --image string Specify k3s image used for the node(s) (default "docker.io/rancher/k3s:v1.20.0-k3s2") - --memory string Memory limit imposed on the node [From docker] - --replicas int Number of replicas of this node specification. (default 1) - --role string Specify node role [server, agent] (default "agent") - --timeout duration Maximum waiting time for '--wait' before canceling/returning. - --wait Wait for the node(s) to be ready before returning. + -c, --cluster string Select the cluster that the node shall connect to. (default "k3s-default") + -h, --help help for create + -i, --image string Specify k3s image used for the node(s) (default "docker.io/rancher/k3s:v1.20.0-k3s2") + --k3s-node-label strings Specify k3s node labels in format "foo=bar" + --memory string Memory limit imposed on the node [From docker] + --replicas int Number of replicas of this node specification. (default 1) + --role string Specify node role [server, agent] (default "agent") + --timeout duration Maximum waiting time for '--wait' before canceling/returning. + --wait Wait for the node(s) to be ready before returning. ``` ### Options inherited from parent commands diff --git a/docs/usage/configfile.md b/docs/usage/configfile.md index 051b4edb..97fa45c0 100644 --- a/docs/usage/configfile.md +++ b/docs/usage/configfile.md @@ -25,13 +25,13 @@ Using a config file is as easy as putting it in a well-known place in your file As of the time of writing this documentation, the config file only **requires** you to define two fields: -- `apiVersion` to match the version of the config file that you want to use (at this time it would be `apiVersion: k3d.io/v1alpha2`) +- `apiVersion` to match the version of the config file that you want to use (at this time it would be `apiVersion: k3d.io/v1alpha3`) - `kind` to define the kind of config file that you want to use (currently we only have the `Simple` config) So this would be the minimal config file, which configures absolutely nothing: ```yaml -apiVersion: k3d.io/v1alpha2 +apiVersion: k3d.io/v1alpha3 kind: Simple ``` @@ -43,7 +43,7 @@ Currently, the config file is still in an Alpha-State, meaning, that it is subje !!! info "Validation via JSON-Schema" k3d uses a [JSON-Schema](https://json-schema.org/) to describe the expected format and fields of the configuration file. This schema is also used to [validate](https://github.com/xeipuuv/gojsonschema#validation) a user-given config file. - This JSON-Schema can be found in the specific config version sub-directory in the repository (e.g. [here for `v1alpha2`](https://github.com/rancher/k3d/blob/main/pkg/config/v1alpha2/schema.json)) and could be used to lookup supported fields or by linters to validate the config file, e.g. in your code editor. + This JSON-Schema can be found in the specific config version sub-directory in the repository (e.g. [here for `v1alpha3`](https://github.com/rancher/k3d/blob/main/pkg/config/v1alpha3/schema.json)) and could be used to lookup supported fields or by linters to validate the config file, e.g. in your code editor. ### All Options: Example @@ -51,7 +51,7 @@ Since the config options and the config file are changing quite a bit, it's hard ```yaml # k3d configuration file, saved as e.g. /home/me/myk3dcluster.yaml -apiVersion: k3d.io/v1alpha2 # this will change in the future as we make everything more stable +apiVersion: k3d.io/v1alpha3 # this will change in the future as we make everything more stable kind: Simple # internally, we also have a Cluster config, which is not yet available externally name: mycluster # name that you want to give to your cluster (will still be prefixed with `k3d-`) servers: 1 # same as `--servers 1` @@ -98,9 +98,10 @@ options: disableRollback: false # same as `--no-Rollback` disableHostIPInjection: false # same as `--no-hostip` k3s: # options passed on to K3s itself - extraServerArgs: # additional arguments passed to the `k3s server` command; same as `--k3s-server-arg` - - --tls-san=my.host.domain - extraAgentArgs: [] # addditional arguments passed to the `k3s agent` command; same as `--k3s-agent-arg` + extraArgs: # additional arguments passed to the `k3s server|agent` command; same as `--k3s-arg` + - arg: --tls-san=my.host.domain + nodeFilters: + - server[*] kubeconfig: updateDefaultKubeconfig: true # add new cluster to your default Kubeconfig; same as `--kubeconfig-update-default` (default: true) switchCurrentContext: true # also set current-context to the new cluster's context; same as `--kubeconfig-switch-context` (default: true) diff --git a/docs/usage/guides/registries.md b/docs/usage/guides/registries.md index 04411dc8..11a485ab 100644 --- a/docs/usage/guides/registries.md +++ b/docs/usage/guides/registries.md @@ -29,7 +29,7 @@ This file can also be used for providing additional information necessary for ac If you're using a `SimpleConfig` file to configure your k3d cluster, you may as well embed the registries.yaml in there directly: ```yaml -apiVersion: k3d.io/v1alpha2 +apiVersion: k3d.io/v1alpha3 kind: Simple name: test servers: 1 diff --git a/pkg/client/cluster.go b/pkg/client/cluster.go index ec6cf801..4aa29fcb 100644 --- a/pkg/client/cluster.go +++ b/pkg/client/cluster.go @@ -36,7 +36,7 @@ import ( "github.com/docker/go-connections/nat" "github.com/imdario/mergo" "github.com/rancher/k3d/v4/pkg/actions" - config "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + config "github.com/rancher/k3d/v4/pkg/config/v1alpha3" k3drt "github.com/rancher/k3d/v4/pkg/runtimes" "github.com/rancher/k3d/v4/pkg/runtimes/docker" runtimeErr "github.com/rancher/k3d/v4/pkg/runtimes/errors" diff --git a/pkg/config/config.go b/pkg/config/config.go index 9cf51e23..efd5b267 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -29,43 +29,51 @@ import ( "github.com/spf13/viper" - conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + "github.com/rancher/k3d/v4/pkg/config/v1alpha3" + defaultConfig "github.com/rancher/k3d/v4/pkg/config/v1alpha3" + + types "github.com/rancher/k3d/v4/pkg/config/types" ) -func FromViperSimple(config *viper.Viper) (conf.SimpleConfig, error) { +const DefaultConfigApiVersion = defaultConfig.ApiVersion - var cfg conf.SimpleConfig - - // determine config kind - if config.GetString("kind") != "" && strings.ToLower(config.GetString("kind")) != "simple" { - return cfg, fmt.Errorf("Wrong `kind` '%s' != 'simple' in config file", config.GetString("kind")) - } - - if err := config.Unmarshal(&cfg); err != nil { - log.Errorln("Failed to unmarshal File config") - - return cfg, err - } - - return cfg, nil +var Schemas = map[string]string{ + v1alpha2.ApiVersion: v1alpha2.JSONSchema, + v1alpha3.ApiVersion: v1alpha3.JSONSchema, } -func FromViper(config *viper.Viper) (conf.Config, error) { +func GetSchemaByVersion(apiVersion string) ([]byte, error) { + schema, ok := Schemas[strings.ToLower(apiVersion)] + if !ok { + return nil, fmt.Errorf("unsupported apiVersion '%s'", apiVersion) + } + return []byte(schema), nil +} - var cfg conf.Config +func FromViper(config *viper.Viper) (types.Config, error) { - // determine config kind - switch strings.ToLower(config.GetString("kind")) { - case "simple": - cfg = conf.SimpleConfig{} - case "cluster": - cfg = conf.ClusterConfig{} - case "clusterlist": - cfg = conf.ClusterListConfig{} + var cfg types.Config + var err error + + apiVersion := strings.ToLower(config.GetString("apiversion")) + kind := strings.ToLower(config.GetString("kind")) + + log.Tracef("Trying to read config apiVersion='%s', kind='%s'", apiVersion, kind) + + switch apiVersion { + case "k3d.io/v1alpha2": + cfg, err = v1alpha2.GetConfigByKind(kind) + case "k3d.io/v1alpha3": + cfg, err = v1alpha3.GetConfigByKind(kind) case "": - return nil, fmt.Errorf("Missing `kind` in config file") + cfg, err = defaultConfig.GetConfigByKind(kind) default: - return nil, fmt.Errorf("Unknown `kind` '%s' in config file", config.GetString("kind")) + return nil, fmt.Errorf("cannot read config with apiversion '%s'", config.GetString("apiversion")) + } + + if err != nil { + return nil, err } if err := config.Unmarshal(&cfg); err != nil { @@ -76,3 +84,12 @@ func FromViper(config *viper.Viper) (conf.Config, error) { return cfg, nil } + +func getMigrations(version string) map[string]func(types.Config) (types.Config, error) { + switch version { + case v1alpha3.ApiVersion: + return v1alpha3.Migrations + default: + return nil + } +} diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index 9f6e05d7..eb15f152 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -26,7 +26,8 @@ import ( "time" "github.com/go-test/deep" - conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + configtypes "github.com/rancher/k3d/v4/pkg/config/types" + conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" "github.com/spf13/viper" k3d "github.com/rancher/k3d/v4/pkg/types" @@ -39,8 +40,8 @@ func TestReadSimpleConfig(t *testing.T) { exposedAPI.HostPort = "6443" expectedConfig := conf.SimpleConfig{ - TypeMeta: conf.TypeMeta{ - APIVersion: "k3d.io/v1alpha2", + TypeMeta: configtypes.TypeMeta{ + APIVersion: "k3d.io/v1alpha3", Kind: "Simple", }, Name: "test", @@ -83,8 +84,12 @@ func TestReadSimpleConfig(t *testing.T) { DisableImageVolume: false, }, K3sOptions: conf.SimpleConfigOptionsK3s{ - ExtraServerArgs: []string{"--tls-san=127.0.0.1"}, - ExtraAgentArgs: []string{}, + ExtraArgs: []conf.K3sArgWithNodeFilters{ + { + Arg: "--tls-san=127.0.0.1", + NodeFilters: []string{"server[*]"}, + }, + }, }, KubeconfigOptions: conf.SimpleConfigOptionsKubeconfig{ UpdateDefaultKubeconfig: true, @@ -107,7 +112,7 @@ func TestReadSimpleConfig(t *testing.T) { t.Error(err) } - cfg, err := FromViperSimple(config) + cfg, err := FromViper(config) if err != nil { t.Error(err) } @@ -123,8 +128,8 @@ func TestReadSimpleConfig(t *testing.T) { func TestReadClusterConfig(t *testing.T) { expectedConfig := conf.ClusterConfig{ - TypeMeta: conf.TypeMeta{ - APIVersion: "k3d.io/v1alpha2", + TypeMeta: configtypes.TypeMeta{ + APIVersion: "k3d.io/v1alpha3", Kind: "Cluster", }, Cluster: k3d.Cluster{ @@ -168,8 +173,8 @@ func TestReadClusterConfig(t *testing.T) { func TestReadClusterListConfig(t *testing.T) { expectedConfig := conf.ClusterListConfig{ - TypeMeta: conf.TypeMeta{ - APIVersion: "k3d.io/v1alpha2", + TypeMeta: configtypes.TypeMeta{ + APIVersion: "k3d.io/v1alpha3", Kind: "ClusterList", }, Clusters: []k3d.Cluster{ @@ -237,7 +242,7 @@ func TestReadUnknownConfig(t *testing.T) { t.Error(err) } - _, err := FromViperSimple(config) + _, err := FromViper(config) if err == nil { t.Fail() } diff --git a/pkg/config/jsonschema_test.go b/pkg/config/jsonschema_test.go index 5ece4e79..f3ec75b6 100644 --- a/pkg/config/jsonschema_test.go +++ b/pkg/config/jsonschema_test.go @@ -24,14 +24,14 @@ package config import ( "testing" - "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + "github.com/rancher/k3d/v4/pkg/config/v1alpha3" ) func TestValidateSchema(t *testing.T) { cfgPath := "./test_assets/config_test_simple.yaml" - if err := ValidateSchemaFile(cfgPath, []byte(v1alpha2.JSONSchema)); err != nil { + if err := ValidateSchemaFile(cfgPath, []byte(v1alpha3.JSONSchema)); err != nil { t.Errorf("Validation of config file %s against the default schema failed: %+v", cfgPath, err) } @@ -42,7 +42,7 @@ func TestValidateSchemaFail(t *testing.T) { cfgPath := "./test_assets/config_test_simple_invalid_servers.yaml" var err error - if err = ValidateSchemaFile(cfgPath, []byte(v1alpha2.JSONSchema)); err == nil { + if err = ValidateSchemaFile(cfgPath, []byte(v1alpha3.JSONSchema)); err == nil { t.Errorf("Validation of config file %s against the default schema passed where we expected a failure", cfgPath) } diff --git a/pkg/config/merge.go b/pkg/config/merge.go index 390269dc..c86c0eaa 100644 --- a/pkg/config/merge.go +++ b/pkg/config/merge.go @@ -24,7 +24,7 @@ package config import ( "github.com/imdario/mergo" - conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" log "github.com/sirupsen/logrus" ) diff --git a/pkg/config/merge_test.go b/pkg/config/merge_test.go index 349f7d4b..cfb73b72 100644 --- a/pkg/config/merge_test.go +++ b/pkg/config/merge_test.go @@ -25,7 +25,8 @@ package config import ( "testing" - conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + configtypes "github.com/rancher/k3d/v4/pkg/config/types" + conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" "github.com/spf13/viper" "gotest.tools/assert" ) @@ -34,7 +35,7 @@ func TestMergeSimpleConfig(t *testing.T) { srcConfig := "./test_assets/config_test_simple.yaml" destConfig := "./test_assets/config_test_simple_2.yaml" - var src, dest conf.Config + var src, dest configtypes.Config var err error cfg1 := viper.New() @@ -45,11 +46,11 @@ func TestMergeSimpleConfig(t *testing.T) { cfg2.SetConfigFile(destConfig) _ = cfg2.ReadInConfig() - if src, err = FromViperSimple(cfg1); err != nil { + if src, err = FromViper(cfg1); err != nil { t.Fatal(err) } - if dest, err = FromViperSimple(cfg2); err != nil { + if dest, err = FromViper(cfg2); err != nil { t.Fatal(err) } diff --git a/pkg/config/migrate.go b/pkg/config/migrate.go new file mode 100644 index 00000000..28fb5d1f --- /dev/null +++ b/pkg/config/migrate.go @@ -0,0 +1,40 @@ +/* +Copyright © 2020 The k3d Author(s) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ + +package config + +import ( + "fmt" + + types "github.com/rancher/k3d/v4/pkg/config/types" +) + +func Migrate(config types.Config, targetVersion string) (types.Config, error) { + + migration, ok := getMigrations(targetVersion)[config.GetAPIVersion()] + if !ok { + return nil, fmt.Errorf("no migration possible from '%s' to '%s'", config.GetAPIVersion(), targetVersion) + } + + return migration(config) + +} diff --git a/pkg/config/process.go b/pkg/config/process.go index 199a77cf..d67d55ae 100644 --- a/pkg/config/process.go +++ b/pkg/config/process.go @@ -23,7 +23,7 @@ THE SOFTWARE. package config import ( - conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" log "github.com/sirupsen/logrus" ) diff --git a/pkg/config/process_test.go b/pkg/config/process_test.go index 5326ca53..c4d890ed 100644 --- a/pkg/config/process_test.go +++ b/pkg/config/process_test.go @@ -26,6 +26,7 @@ import ( "context" "testing" + conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" "github.com/rancher/k3d/v4/pkg/runtimes" "github.com/spf13/viper" "gotest.tools/assert" @@ -38,14 +39,14 @@ func TestProcessClusterConfig(t *testing.T) { vip.SetConfigFile(cfgFile) _ = vip.ReadInConfig() - cfg, err := FromViperSimple(vip) + cfg, err := FromViper(vip) if err != nil { t.Error(err) } t.Logf("\n========== Read Config and transform to cluster ==========\n%+v\n=================================\n", cfg) - clusterCfg, err := TransformSimpleToClusterConfig(context.Background(), runtimes.Docker, cfg) + clusterCfg, err := TransformSimpleToClusterConfig(context.Background(), runtimes.Docker, cfg.(conf.SimpleConfig)) if err != nil { t.Error(err) } diff --git a/pkg/config/test_assets/config_test_cluster.yaml b/pkg/config/test_assets/config_test_cluster.yaml index e90f30b2..f1a8438c 100644 --- a/pkg/config/test_assets/config_test_cluster.yaml +++ b/pkg/config/test_assets/config_test_cluster.yaml @@ -1,4 +1,4 @@ -apiVersion: k3d.io/v1alpha2 +apiVersion: k3d.io/v1alpha3 kind: Cluster name: foo nodes: diff --git a/pkg/config/test_assets/config_test_cluster_list.yaml b/pkg/config/test_assets/config_test_cluster_list.yaml index 0eba2c22..9d2e55ca 100644 --- a/pkg/config/test_assets/config_test_cluster_list.yaml +++ b/pkg/config/test_assets/config_test_cluster_list.yaml @@ -1,5 +1,5 @@ --- -apiVersion: k3d.io/v1alpha2 +apiVersion: k3d.io/v1alpha3 kind: ClusterList clusters: - name: foo diff --git a/pkg/config/test_assets/config_test_simple.yaml b/pkg/config/test_assets/config_test_simple.yaml index e264bd9a..4e132176 100644 --- a/pkg/config/test_assets/config_test_simple.yaml +++ b/pkg/config/test_assets/config_test_simple.yaml @@ -1,4 +1,4 @@ -apiVersion: k3d.io/v1alpha2 +apiVersion: k3d.io/v1alpha3 kind: Simple name: test servers: 1 @@ -25,7 +25,7 @@ env: labels: - label: foo=bar nodeFilters: - - server[0] + - "server[0]" - loadbalancer options: @@ -35,9 +35,10 @@ options: disableLoadbalancer: false disableImageVolume: false k3s: - extraServerArgs: - - --tls-san=127.0.0.1 - extraAgentArgs: [] + extraArgs: + - arg: --tls-san=127.0.0.1 + nodeFilters: + - "server[*]" kubeconfig: updateDefaultKubeconfig: true - switchCurrentContext: true \ No newline at end of file + switchCurrentContext: true diff --git a/pkg/config/test_assets/config_test_simple_2.yaml b/pkg/config/test_assets/config_test_simple_2.yaml index 0d5293ee..a849e322 100644 --- a/pkg/config/test_assets/config_test_simple_2.yaml +++ b/pkg/config/test_assets/config_test_simple_2.yaml @@ -1,4 +1,4 @@ -apiVersion: k3d.io/v1alpha2 +apiVersion: k3d.io/v1alpha3 kind: Simple name: supertest agents: 8 \ No newline at end of file diff --git a/pkg/config/test_assets/config_test_simple_invalid_servers.yaml b/pkg/config/test_assets/config_test_simple_invalid_servers.yaml index 7f2442c3..b9e75fb6 100644 --- a/pkg/config/test_assets/config_test_simple_invalid_servers.yaml +++ b/pkg/config/test_assets/config_test_simple_invalid_servers.yaml @@ -1,4 +1,4 @@ -apiVersion: k3d.io/v1alpha2 +apiVersion: k3d.io/v1alpha3 kind: Simple name: 1234 servers: 1 @@ -35,9 +35,10 @@ options: disableLoadbalancer: false disableImageVolume: false k3s: - extraServerArgs: - - --tls-san=127.0.0.1 - extraAgentArgs: [] + extraArgs: + - arg: --tls-san=127.0.0.1 + nodeFilters: + - "server[*]" kubeconfig: updateDefaultKubeconfig: true switchCurrentContext: true \ No newline at end of file diff --git a/pkg/config/test_assets/config_test_unknown.yaml b/pkg/config/test_assets/config_test_unknown.yaml index 356972ce..66fe0c0f 100644 --- a/pkg/config/test_assets/config_test_unknown.yaml +++ b/pkg/config/test_assets/config_test_unknown.yaml @@ -1,3 +1,3 @@ -apiVersion: k3d.io/v1alpha2 +apiVersion: k3d.io/v1alpha3 kind: Unknown foo: bar \ No newline at end of file diff --git a/pkg/config/transform.go b/pkg/config/transform.go index 76f9751e..0ec43686 100644 --- a/pkg/config/transform.go +++ b/pkg/config/transform.go @@ -31,7 +31,7 @@ import ( "github.com/docker/go-connections/nat" cliutil "github.com/rancher/k3d/v4/cmd/util" // TODO: move parseapiport to pkg - conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" "github.com/rancher/k3d/v4/pkg/runtimes" k3d "github.com/rancher/k3d/v4/pkg/types" "github.com/rancher/k3d/v4/pkg/types/k3s" @@ -117,7 +117,6 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim serverNode := k3d.Node{ Role: k3d.ServerRole, Image: simpleConfig.Image, - Args: simpleConfig.Options.K3sOptions.ExtraServerArgs, ServerOpts: k3d.ServerOpts{}, Memory: simpleConfig.Options.Runtime.ServersMemory, } @@ -135,7 +134,6 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim agentNode := k3d.Node{ Role: k3d.AgentRole, Image: simpleConfig.Image, - Args: simpleConfig.Options.K3sOptions.ExtraAgentArgs, Memory: simpleConfig.Options.Runtime.AgentsMemory, } newCluster.Nodes = append(newCluster.Nodes, &agentNode) @@ -228,6 +226,22 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim } } + // -> ARGS + for _, argWithNodeFilters := range simpleConfig.Options.K3sOptions.ExtraArgs { + if len(argWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 { + return nil, fmt.Errorf("K3sExtraArg '%s' lacks a node filter, but there's more than one node", argWithNodeFilters.Arg) + } + + nodes, err := util.FilterNodes(nodeList, argWithNodeFilters.NodeFilters) + if err != nil { + return nil, err + } + + for _, node := range nodes { + node.Args = append(node.Args, argWithNodeFilters.Arg) + } + } + /************************** * Cluster Create Options * **************************/ @@ -238,8 +252,6 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim WaitForServer: simpleConfig.Options.K3dOptions.Wait, Timeout: simpleConfig.Options.K3dOptions.Timeout, DisableLoadBalancer: simpleConfig.Options.K3dOptions.DisableLoadbalancer, - K3sServerArgs: simpleConfig.Options.K3sOptions.ExtraServerArgs, - K3sAgentArgs: simpleConfig.Options.K3sOptions.ExtraAgentArgs, GPURequest: simpleConfig.Options.Runtime.GPURequest, ServersMemory: simpleConfig.Options.Runtime.ServersMemory, AgentsMemory: simpleConfig.Options.Runtime.AgentsMemory, diff --git a/pkg/config/transform_test.go b/pkg/config/transform_test.go index 6cfb336d..495f499c 100644 --- a/pkg/config/transform_test.go +++ b/pkg/config/transform_test.go @@ -26,6 +26,7 @@ import ( "context" "testing" + conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" "github.com/rancher/k3d/v4/pkg/runtimes" "github.com/spf13/viper" ) @@ -37,14 +38,14 @@ func TestTransformSimpleConfigToClusterConfig(t *testing.T) { vip.SetConfigFile(cfgFile) _ = vip.ReadInConfig() - cfg, err := FromViperSimple(vip) + cfg, err := FromViper(vip) if err != nil { t.Error(err) } t.Logf("\n========== Read Config ==========\n%+v\n=================================\n", cfg) - clusterCfg, err := TransformSimpleToClusterConfig(context.Background(), runtimes.Docker, cfg) + clusterCfg, err := TransformSimpleToClusterConfig(context.Background(), runtimes.Docker, cfg.(conf.SimpleConfig)) if err != nil { t.Error(err) } diff --git a/pkg/config/types/types.go b/pkg/config/types/types.go new file mode 100644 index 00000000..ff0e26e5 --- /dev/null +++ b/pkg/config/types/types.go @@ -0,0 +1,34 @@ +/* +Copyright © 2020 The k3d Author(s) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ +package types + +// TypeMeta is basically copied from https://github.com/kubernetes/apimachinery/blob/a3b564b22db316a41e94fdcffcf9995424fe924c/pkg/apis/meta/v1/types.go#L36-L56 +type TypeMeta struct { + Kind string `mapstructure:"kind,omitempty" yaml:"kind,omitempty" json:"kind,omitempty"` + APIVersion string `mapstructure:"apiVersion,omitempty" yaml:"apiVersion,omitempty" json:"apiVersion,omitempty"` +} + +// Config interface. +type Config interface { + GetKind() string + GetAPIVersion() string +} diff --git a/pkg/config/v1alpha2/types.go b/pkg/config/v1alpha2/types.go index 0679cfd4..9f2375d1 100644 --- a/pkg/config/v1alpha2/types.go +++ b/pkg/config/v1alpha2/types.go @@ -27,6 +27,7 @@ import ( "fmt" "time" + configtypes "github.com/rancher/k3d/v4/pkg/config/types" k3d "github.com/rancher/k3d/v4/pkg/types" "github.com/rancher/k3d/v4/version" ) @@ -35,9 +36,11 @@ import ( //go:embed schema.json var JSONSchema string +const ApiVersion = "k3d.io/v1alpha2" + // DefaultConfigTpl for printing const DefaultConfigTpl = `--- -apiVersion: k3d.io/v1alpha2 +apiVersion: %s kind: Simple name: %s servers: 1 @@ -48,21 +51,11 @@ image: %s // DefaultConfig templated DefaultConfigTpl var DefaultConfig = fmt.Sprintf( DefaultConfigTpl, + ApiVersion, k3d.DefaultClusterName, fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), ) -// TypeMeta is basically copied from https://github.com/kubernetes/apimachinery/blob/a3b564b22db316a41e94fdcffcf9995424fe924c/pkg/apis/meta/v1/types.go#L36-L56 -type TypeMeta struct { - Kind string `mapstructure:"kind,omitempty" yaml:"kind,omitempty" json:"kind,omitempty"` - APIVersion string `mapstructure:"apiVersion,omitempty" yaml:"apiVersion,omitempty" json:"apiVersion,omitempty"` -} - -// Config interface. -type Config interface { - GetKind() string -} - type VolumeWithNodeFilters struct { Volume string `mapstructure:"volume" yaml:"volume" json:"volume,omitempty"` NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters" json:"nodeFilters,omitempty"` @@ -119,21 +112,21 @@ type SimpleConfigOptionsK3s struct { // SimpleConfig describes the toplevel k3d configuration file. type SimpleConfig struct { - TypeMeta `mapstructure:",squash" yaml:",inline"` - Name string `mapstructure:"name" yaml:"name" json:"name,omitempty"` - Servers int `mapstructure:"servers" yaml:"servers" json:"servers,omitempty"` //nolint:lll // default 1 - Agents int `mapstructure:"agents" yaml:"agents" json:"agents,omitempty"` //nolint:lll // default 0 - ExposeAPI SimpleExposureOpts `mapstructure:"kubeAPI" yaml:"kubeAPI" json:"kubeAPI,omitempty"` - Image string `mapstructure:"image" yaml:"image" json:"image,omitempty"` - Network string `mapstructure:"network" yaml:"network" json:"network,omitempty"` - Subnet string `mapstructure:"subnet" yaml:"subnet" json:"subnet,omitempty"` - ClusterToken string `mapstructure:"token" yaml:"clusterToken" json:"clusterToken,omitempty"` // default: auto-generated - Volumes []VolumeWithNodeFilters `mapstructure:"volumes" yaml:"volumes" json:"volumes,omitempty"` - Ports []PortWithNodeFilters `mapstructure:"ports" yaml:"ports" json:"ports,omitempty"` - Labels []LabelWithNodeFilters `mapstructure:"labels" yaml:"labels" json:"labels,omitempty"` - Options SimpleConfigOptions `mapstructure:"options" yaml:"options" json:"options,omitempty"` - Env []EnvVarWithNodeFilters `mapstructure:"env" yaml:"env" json:"env,omitempty"` - Registries struct { + configtypes.TypeMeta `mapstructure:",squash" yaml:",inline"` + Name string `mapstructure:"name" yaml:"name" json:"name,omitempty"` + Servers int `mapstructure:"servers" yaml:"servers" json:"servers,omitempty"` //nolint:lll // default 1 + Agents int `mapstructure:"agents" yaml:"agents" json:"agents,omitempty"` //nolint:lll // default 0 + ExposeAPI SimpleExposureOpts `mapstructure:"kubeAPI" yaml:"kubeAPI" json:"kubeAPI,omitempty"` + Image string `mapstructure:"image" yaml:"image" json:"image,omitempty"` + Network string `mapstructure:"network" yaml:"network" json:"network,omitempty"` + Subnet string `mapstructure:"subnet" yaml:"subnet" json:"subnet,omitempty"` + ClusterToken string `mapstructure:"token" yaml:"clusterToken" json:"clusterToken,omitempty"` // default: auto-generated + Volumes []VolumeWithNodeFilters `mapstructure:"volumes" yaml:"volumes" json:"volumes,omitempty"` + Ports []PortWithNodeFilters `mapstructure:"ports" yaml:"ports" json:"ports,omitempty"` + Labels []LabelWithNodeFilters `mapstructure:"labels" yaml:"labels" json:"labels,omitempty"` + Options SimpleConfigOptions `mapstructure:"options" yaml:"options" json:"options,omitempty"` + Env []EnvVarWithNodeFilters `mapstructure:"env" yaml:"env" json:"env,omitempty"` + Registries struct { Use []string `mapstructure:"use" yaml:"use,omitempty" json:"use,omitempty"` Create bool `mapstructure:"create" yaml:"create,omitempty" json:"create,omitempty"` Config string `mapstructure:"config" yaml:"config,omitempty" json:"config,omitempty"` // registries.yaml (k3s config for containerd registry override) @@ -147,30 +140,60 @@ type SimpleExposureOpts struct { HostPort string `mapstructure:"hostPort" yaml:"hostPort,omitempty" json:"hostPort,omitempty"` } -// GetKind implements Config.GetKind +// Kind implements Config.Kind func (c SimpleConfig) GetKind() string { - return "Cluster" + return "Simple" +} + +func (c SimpleConfig) GetAPIVersion() string { + return ApiVersion } // ClusterConfig describes a single cluster config type ClusterConfig struct { - TypeMeta `mapstructure:",squash" yaml:",inline"` - Cluster k3d.Cluster `mapstructure:",squash" yaml:",inline"` - ClusterCreateOpts k3d.ClusterCreateOpts `mapstructure:"options" yaml:"options"` - KubeconfigOpts SimpleConfigOptionsKubeconfig `mapstructure:"kubeconfig" yaml:"kubeconfig"` + configtypes.TypeMeta `mapstructure:",squash" yaml:",inline"` + Cluster k3d.Cluster `mapstructure:",squash" yaml:",inline"` + ClusterCreateOpts k3d.ClusterCreateOpts `mapstructure:"options" yaml:"options"` + KubeconfigOpts SimpleConfigOptionsKubeconfig `mapstructure:"kubeconfig" yaml:"kubeconfig"` } -// GetKind implements Config.GetKind +// Kind implements Config.Kind func (c ClusterConfig) GetKind() string { - return "Cluster" + return "Simple" +} + +func (c ClusterConfig) GetAPIVersion() string { + return ApiVersion } // ClusterListConfig describes a list of clusters type ClusterListConfig struct { - TypeMeta `mapstructure:",squash" yaml:",inline"` - Clusters []k3d.Cluster `mapstructure:"clusters" yaml:"clusters"` + configtypes.TypeMeta `mapstructure:",squash" yaml:",inline"` + Clusters []k3d.Cluster `mapstructure:"clusters" yaml:"clusters"` } func (c ClusterListConfig) GetKind() string { - return "ClusterList" + return "Simple" +} + +func (c ClusterListConfig) GetAPIVersion() string { + return ApiVersion +} + +func GetConfigByKind(kind string) (configtypes.Config, error) { + + // determine config kind + switch kind { + case "simple": + return SimpleConfig{}, nil + case "cluster": + return ClusterConfig{}, nil + case "clusterlist": + return ClusterListConfig{}, nil + case "": + return nil, fmt.Errorf("missing `kind` in config file") + default: + return nil, fmt.Errorf("unknown `kind` '%s' in config file", kind) + } + } diff --git a/pkg/config/v1alpha3/migrations.go b/pkg/config/v1alpha3/migrations.go new file mode 100644 index 00000000..5c894ca1 --- /dev/null +++ b/pkg/config/v1alpha3/migrations.go @@ -0,0 +1,84 @@ +/* +Copyright © 2020 The k3d Author(s) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ + +package v1alpha3 + +import ( + "encoding/json" + + configtypes "github.com/rancher/k3d/v4/pkg/config/types" + "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + log "github.com/sirupsen/logrus" +) + +var Migrations = map[string]func(configtypes.Config) (configtypes.Config, error){ + v1alpha2.ApiVersion: MigrateV1Alpha2, +} + +func MigrateV1Alpha2(input configtypes.Config) (configtypes.Config, error) { + log.Debugln("Migrating v1alpha2 to v1alpha3") + + injson, err := json.Marshal(input) + if err != nil { + return nil, err + } + + if input.GetKind() == "Simple" { + cfg := SimpleConfig{} + + if err := json.Unmarshal(injson, &cfg); err != nil { + return nil, err + } + + cfg.Options.K3sOptions.ExtraArgs = []K3sArgWithNodeFilters{} + + for _, arg := range input.(v1alpha2.SimpleConfig).Options.K3sOptions.ExtraServerArgs { + cfg.Options.K3sOptions.ExtraArgs = append(cfg.Options.K3sOptions.ExtraArgs, K3sArgWithNodeFilters{ + Arg: arg, + NodeFilters: []string{ + "server[*]", + }, + }) + } + + for _, arg := range input.(v1alpha2.SimpleConfig).Options.K3sOptions.ExtraAgentArgs { + cfg.Options.K3sOptions.ExtraArgs = append(cfg.Options.K3sOptions.ExtraArgs, K3sArgWithNodeFilters{ + Arg: arg, + NodeFilters: []string{ + "agent[*]", + }, + }) + } + + cfg.APIVersion = ApiVersion + + log.Debugf("Migrated config: %+v", cfg) + + return cfg, nil + + } + + log.Debugf("No migration needed for %s#%s -> %s#%s", input.GetAPIVersion(), input.GetKind(), ApiVersion, input.GetKind()) + + return input, nil + +} diff --git a/pkg/config/v1alpha3/schema.json b/pkg/config/v1alpha3/schema.json new file mode 100644 index 00000000..2b07c02c --- /dev/null +++ b/pkg/config/v1alpha3/schema.json @@ -0,0 +1,254 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "SimpleConfig", + "type": "object", + "required": [ + "apiVersion", + "kind" + ], + "properties": { + "apiVersion": { + "type": "string", + "enum": [ + "k3d.io/v1alpha3" + ], + "default": "k3d.io/v1alpha3" + }, + "kind": { + "type": "string", + "enum": [ + "Simple" + ], + "default": "Simple" + }, + "name": { + "description": "Name of the cluster (must be a valid hostname and will be prefixed with 'k3d-'). Example: 'mycluster'.", + "type": "string", + "format": "hostname" + }, + "servers": { + "type": "number", + "minimum": 1 + }, + "agents": { + "type": "number", + "minimum": 0 + }, + "kubeAPI": { + "type": "object", + "properties": { + "host": { + "type": "string", + "format": "hostname" + }, + "hostIP": { + "type": "string", + "format": "ipv4", + "examples": [ + "0.0.0.0", + "192.168.178.55" + ] + }, + "hostPort": { + "type":"string", + "examples": [ + "6443" + ] + } + }, + "additionalProperties": false + }, + "image": { + "type": "string", + "examples": [ + "rancher/k3s:latest" + ] + }, + "network": { + "type": "string" + }, + "subnet": { + "type": "string", + "default": "auto", + "examples": [ + "172.28.0.0/16", + "192.162.0.0/16" + ] + }, + "token": { + "type": "string" + }, + "volumes": { + "type": "array", + "items": { + "type": "object", + "properties": { + "volume": { + "type": "string" + }, + "nodeFilters": { + "$ref": "#/definitions/nodeFilters" + } + }, + "additionalProperties": false + } + }, + "ports": { + "type": "array", + "items": { + "type": "object", + "properties": { + "port": { + "type": "string" + }, + "nodeFilters": { + "$ref": "#/definitions/nodeFilters" + } + }, + "additionalProperties": false + } + }, + "labels": { + "type": "array", + "items": { + "type": "object", + "properties": { + "label": { + "type": "string" + }, + "nodeFilters": { + "$ref": "#/definitions/nodeFilters" + } + }, + "additionalProperties": false + } + }, + "options": { + "type": "object", + "properties": { + "k3d": { + "type": "object", + "properties": { + "wait": { + "type": "boolean", + "default": true + }, + "timeout": { + "type": "string", + "examples": [ + "60s", + "1m", + "1m30s" + ] + }, + "disableLoadbalancer": { + "type": "boolean", + "default": false + }, + "disableImageVolume": { + "type": "boolean", + "default": false + }, + "disableRollback": { + "type": "boolean", + "default": false + }, + "disableHostIPInjection": { + "type": "boolean", + "default": false + } + }, + "additionalProperties": false + }, + "k3s": { + "type": "object", + "properties": { + "extraArgs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "arg": { + "type": "string", + "examples": [ + "--tls-san=127.0.0.1", + "--disable=traefik" + ] + }, + "nodeFilters": { + "$ref": "#/definitions/nodeFilters" + } + }, + "additionalProperties": false + } + } + }, + "additionalProperties": false + }, + "kubeconfig": { + "type": "object", + "properties": { + "updateDefaultKubeconfig": { + "type": "boolean", + "default": true + }, + "switchCurrentContext": { + "type": "boolean", + "default": true + } + }, + "additionalProperties": false + }, + "runtime": { + "type": "object", + "properties": { + "gpuRequest": { + "type": "string" + }, + "serversMemory": { + "type": "string" + }, + "agentsMemory": { + "type": "string" + } + } + } + }, + "additionalProperties": false + }, + "env": { + "type": "array", + "items": { + "type": "object", + "properties": { + "envVar": { + "type": "string" + }, + "nodeFilters": { + "$ref": "#/definitions/nodeFilters" + } + }, + "additionalProperties": false + } + }, + "registries": { + "type": "object" + } + }, + "additionalProperties": false, + "definitions": { + "nodeFilters": { + "type": "array", + "items": { + "type": "string" + }, + "examples": [ + "loadbalancer", + "server[*]", + "server[0]", + "agent[1]", + "all" + ] + } + } +} \ No newline at end of file diff --git a/pkg/config/v1alpha3/types.go b/pkg/config/v1alpha3/types.go new file mode 100644 index 00000000..22465c71 --- /dev/null +++ b/pkg/config/v1alpha3/types.go @@ -0,0 +1,203 @@ +/* +Copyright © 2020 The k3d Author(s) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ + +package v1alpha3 + +import ( + _ "embed" + "fmt" + "strings" + "time" + + config "github.com/rancher/k3d/v4/pkg/config/types" + k3d "github.com/rancher/k3d/v4/pkg/types" + "github.com/rancher/k3d/v4/version" +) + +const ApiVersion = "k3d.io/v1alpha3" + +// JSONSchema describes the schema used to validate config files +//go:embed schema.json +var JSONSchema string + +// DefaultConfigTpl for printing +const DefaultConfigTpl = `--- +apiVersion: k3d.io/v1alpha3 +kind: Simple +name: %s +servers: 1 +agents: 0 +image: %s +` + +// DefaultConfig templated DefaultConfigTpl +var DefaultConfig = fmt.Sprintf( + DefaultConfigTpl, + k3d.DefaultClusterName, + fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), +) + +type VolumeWithNodeFilters struct { + Volume string `mapstructure:"volume" yaml:"volume" json:"volume,omitempty"` + NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters" json:"nodeFilters,omitempty"` +} + +type PortWithNodeFilters struct { + Port string `mapstructure:"port" yaml:"port" json:"port,omitempty"` + NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters" json:"nodeFilters,omitempty"` +} + +type LabelWithNodeFilters struct { + Label string `mapstructure:"label" yaml:"label" json:"label,omitempty"` + NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters" json:"nodeFilters,omitempty"` +} + +type EnvVarWithNodeFilters struct { + EnvVar string `mapstructure:"envVar" yaml:"envVar" json:"envVar,omitempty"` + NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters" json:"nodeFilters,omitempty"` +} + +type K3sArgWithNodeFilters struct { + Arg string `mapstructure:"arg" yaml:"arg" json:"arg,omitempty"` + NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters" json:"nodeFilters,omitempty"` +} + +// SimpleConfigOptionsKubeconfig describes the set of options referring to the kubeconfig during cluster creation. +type SimpleConfigOptionsKubeconfig struct { + UpdateDefaultKubeconfig bool `mapstructure:"updateDefaultKubeconfig" yaml:"updateDefaultKubeconfig" json:"updateDefaultKubeconfig,omitempty"` // default: true + SwitchCurrentContext bool `mapstructure:"switchCurrentContext" yaml:"switchCurrentContext" json:"switchCurrentContext,omitempty"` //nolint:lll // default: true +} + +type SimpleConfigOptions struct { + K3dOptions SimpleConfigOptionsK3d `mapstructure:"k3d" yaml:"k3d"` + K3sOptions SimpleConfigOptionsK3s `mapstructure:"k3s" yaml:"k3s"` + KubeconfigOptions SimpleConfigOptionsKubeconfig `mapstructure:"kubeconfig" yaml:"kubeconfig"` + Runtime SimpleConfigOptionsRuntime `mapstructure:"runtime" yaml:"runtime"` +} + +type SimpleConfigOptionsRuntime struct { + GPURequest string `mapstructure:"gpuRequest" yaml:"gpuRequest"` + ServersMemory string `mapstructure:"serversMemory" yaml:"serversMemory"` + AgentsMemory string `mapstructure:"agentsMemory" yaml:"agentsMemory"` +} + +type SimpleConfigOptionsK3d struct { + Wait bool `mapstructure:"wait" yaml:"wait"` + Timeout time.Duration `mapstructure:"timeout" yaml:"timeout"` + DisableLoadbalancer bool `mapstructure:"disableLoadbalancer" yaml:"disableLoadbalancer"` + DisableImageVolume bool `mapstructure:"disableImageVolume" yaml:"disableImageVolume"` + NoRollback bool `mapstructure:"disableRollback" yaml:"disableRollback"` + PrepDisableHostIPInjection bool `mapstructure:"disableHostIPInjection" yaml:"disableHostIPInjection"` + NodeHookActions []k3d.NodeHookAction `mapstructure:"nodeHookActions" yaml:"nodeHookActions,omitempty"` +} + +type SimpleConfigOptionsK3s struct { + ExtraArgs []K3sArgWithNodeFilters `mapstructure:"extraArgs" yaml:"extraArgs"` +} + +// SimpleConfig describes the toplevel k3d configuration file. +type SimpleConfig struct { + config.TypeMeta `mapstructure:",squash" yaml:",inline"` + Name string `mapstructure:"name" yaml:"name" json:"name,omitempty"` + Servers int `mapstructure:"servers" yaml:"servers" json:"servers,omitempty"` //nolint:lll // default 1 + Agents int `mapstructure:"agents" yaml:"agents" json:"agents,omitempty"` //nolint:lll // default 0 + ExposeAPI SimpleExposureOpts `mapstructure:"kubeAPI" yaml:"kubeAPI" json:"kubeAPI,omitempty"` + Image string `mapstructure:"image" yaml:"image" json:"image,omitempty"` + Network string `mapstructure:"network" yaml:"network" json:"network,omitempty"` + Subnet string `mapstructure:"subnet" yaml:"subnet" json:"subnet,omitempty"` + ClusterToken string `mapstructure:"token" yaml:"clusterToken" json:"clusterToken,omitempty"` // default: auto-generated + Volumes []VolumeWithNodeFilters `mapstructure:"volumes" yaml:"volumes" json:"volumes,omitempty"` + Ports []PortWithNodeFilters `mapstructure:"ports" yaml:"ports" json:"ports,omitempty"` + Labels []LabelWithNodeFilters `mapstructure:"labels" yaml:"labels" json:"labels,omitempty"` + Options SimpleConfigOptions `mapstructure:"options" yaml:"options" json:"options,omitempty"` + Env []EnvVarWithNodeFilters `mapstructure:"env" yaml:"env" json:"env,omitempty"` + Registries struct { + Use []string `mapstructure:"use" yaml:"use,omitempty" json:"use,omitempty"` + Create bool `mapstructure:"create" yaml:"create,omitempty" json:"create,omitempty"` + Config string `mapstructure:"config" yaml:"config,omitempty" json:"config,omitempty"` // registries.yaml (k3s config for containerd registry override) + } `mapstructure:"registries" yaml:"registries,omitempty" json:"registries,omitempty"` +} + +// SimpleExposureOpts provides a simplified syntax compared to the original k3d.ExposureOpts +type SimpleExposureOpts struct { + Host string `mapstructure:"host" yaml:"host,omitempty" json:"host,omitempty"` + HostIP string `mapstructure:"hostIP" yaml:"hostIP,omitempty" json:"hostIP,omitempty"` + HostPort string `mapstructure:"hostPort" yaml:"hostPort,omitempty" json:"hostPort,omitempty"` +} + +// GetKind implements Config.GetKind +func (c SimpleConfig) GetKind() string { + return "Simple" +} + +func (c SimpleConfig) GetAPIVersion() string { + return ApiVersion +} + +// ClusterConfig describes a single cluster config +type ClusterConfig struct { + config.TypeMeta `mapstructure:",squash" yaml:",inline"` + Cluster k3d.Cluster `mapstructure:",squash" yaml:",inline"` + ClusterCreateOpts k3d.ClusterCreateOpts `mapstructure:"options" yaml:"options"` + KubeconfigOpts SimpleConfigOptionsKubeconfig `mapstructure:"kubeconfig" yaml:"kubeconfig"` +} + +// GetKind implements Config.GetKind +func (c ClusterConfig) GetKind() string { + return "Simple" +} + +func (c ClusterConfig) GetAPIVersion() string { + return ApiVersion +} + +// ClusterListConfig describes a list of clusters +type ClusterListConfig struct { + config.TypeMeta `mapstructure:",squash" yaml:",inline"` + Clusters []k3d.Cluster `mapstructure:"clusters" yaml:"clusters"` +} + +func (c ClusterListConfig) GetKind() string { + return "Simple" +} + +func (c ClusterListConfig) GetAPIVersion() string { + return ApiVersion +} + +func GetConfigByKind(kind string) (config.Config, error) { + + // determine config kind + switch strings.ToLower(kind) { + case "simple": + return SimpleConfig{}, nil + case "cluster": + return ClusterConfig{}, nil + case "clusterlist": + return ClusterListConfig{}, nil + case "": + return nil, fmt.Errorf("missing `kind` in config file") + default: + return nil, fmt.Errorf("unknown `kind` '%s' in config file", kind) + } + +} diff --git a/pkg/config/validate.go b/pkg/config/validate.go index bb81ba41..c647e4f5 100644 --- a/pkg/config/validate.go +++ b/pkg/config/validate.go @@ -27,7 +27,7 @@ import ( "time" k3dc "github.com/rancher/k3d/v4/pkg/client" - conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" "github.com/rancher/k3d/v4/pkg/runtimes" runtimeutil "github.com/rancher/k3d/v4/pkg/runtimes/util" k3d "github.com/rancher/k3d/v4/pkg/types" diff --git a/pkg/config/validate_test.go b/pkg/config/validate_test.go index 45e736e3..88177808 100644 --- a/pkg/config/validate_test.go +++ b/pkg/config/validate_test.go @@ -26,7 +26,7 @@ import ( "context" "testing" - conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2" + conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" "github.com/rancher/k3d/v4/pkg/runtimes" "github.com/spf13/viper" ) diff --git a/pkg/types/types.go b/pkg/types/types.go index 31675511..23667855 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -184,8 +184,6 @@ type ClusterCreateOpts struct { WaitForServer bool `yaml:"waitForServer" json:"waitForServer,omitempty"` Timeout time.Duration `yaml:"timeout" json:"timeout,omitempty"` DisableLoadBalancer bool `yaml:"disableLoadbalancer" json:"disableLoadbalancer,omitempty"` - K3sServerArgs []string `yaml:"k3sServerArgs" json:"k3sServerArgs,omitempty"` - K3sAgentArgs []string `yaml:"k3sAgentArgs" json:"k3sAgentArgs,omitempty"` GPURequest string `yaml:"gpuRequest" json:"gpuRequest,omitempty"` ServersMemory string `yaml:"serversMemory" json:"serversMemory,omitempty"` AgentsMemory string `yaml:"agentsMemory" json:"agentsMemory,omitempty"` diff --git a/tests/assets/config_test_simple.yaml b/tests/assets/config_test_simple.yaml index 4d9f7255..c1d05e2b 100755 --- a/tests/assets/config_test_simple.yaml +++ b/tests/assets/config_test_simple.yaml @@ -1,4 +1,4 @@ -apiVersion: k3d.io/v1alpha2 +apiVersion: k3d.io/v1alpha3 kind: Simple name: test servers: 3 @@ -43,9 +43,10 @@ options: disableLoadbalancer: false disableImageVolume: false k3s: - extraServerArgs: - - --tls-san=127.0.0.1 - extraAgentArgs: [] + extraArgs: + - arg: --tls-san=127.0.0.1 + nodeFilters: + - server[*] kubeconfig: updateDefaultKubeconfig: true switchCurrentContext: true \ No newline at end of file diff --git a/tests/assets/config_test_simple_migration_v1alpha2.yaml b/tests/assets/config_test_simple_migration_v1alpha2.yaml new file mode 100755 index 00000000..4d9f7255 --- /dev/null +++ b/tests/assets/config_test_simple_migration_v1alpha2.yaml @@ -0,0 +1,51 @@ +apiVersion: k3d.io/v1alpha2 +kind: Simple +name: test +servers: 3 +agents: 2 +kubeAPI: + hostIP: "0.0.0.0" + hostPort: "6446" +image: rancher/k3s:latest +volumes: + - volume: /my/path:/some/path + nodeFilters: + - all +ports: + - port: 80:80 + nodeFilters: + - loadbalancer + - port: 0.0.0.0:443:443 + nodeFilters: + - loadbalancer +env: + - envVar: bar=baz,bob + nodeFilters: + - all +labels: + - label: foo=bar + nodeFilters: + - server[0] + - loadbalancer +registries: + create: true + use: [] + config: | + mirrors: + "my.company.registry": + endpoint: + - http://my.company.registry:5000 + +options: + k3d: + wait: true + timeout: "360s" # should be pretty high for multi-server clusters to allow for a proper startup routine + disableLoadbalancer: false + disableImageVolume: false + k3s: + extraServerArgs: + - --tls-san=127.0.0.1 + extraAgentArgs: [] + kubeconfig: + updateDefaultKubeconfig: true + switchCurrentContext: true \ No newline at end of file diff --git a/tests/assets/config_test_simple_migration_v1alpha3.yaml b/tests/assets/config_test_simple_migration_v1alpha3.yaml new file mode 100755 index 00000000..c1d05e2b --- /dev/null +++ b/tests/assets/config_test_simple_migration_v1alpha3.yaml @@ -0,0 +1,52 @@ +apiVersion: k3d.io/v1alpha3 +kind: Simple +name: test +servers: 3 +agents: 2 +kubeAPI: + hostIP: "0.0.0.0" + hostPort: "6446" +image: rancher/k3s:latest +volumes: + - volume: /my/path:/some/path + nodeFilters: + - all +ports: + - port: 80:80 + nodeFilters: + - loadbalancer + - port: 0.0.0.0:443:443 + nodeFilters: + - loadbalancer +env: + - envVar: bar=baz,bob + nodeFilters: + - all +labels: + - label: foo=bar + nodeFilters: + - server[0] + - loadbalancer +registries: + create: true + use: [] + config: | + mirrors: + "my.company.registry": + endpoint: + - http://my.company.registry:5000 + +options: + k3d: + wait: true + timeout: "360s" # should be pretty high for multi-server clusters to allow for a proper startup routine + disableLoadbalancer: false + disableImageVolume: false + k3s: + extraArgs: + - arg: --tls-san=127.0.0.1 + nodeFilters: + - server[*] + kubeconfig: + updateDefaultKubeconfig: true + switchCurrentContext: true \ No newline at end of file diff --git a/tests/test_config_file_migration.sh b/tests/test_config_file_migration.sh new file mode 100755 index 00000000..8f4be093 --- /dev/null +++ b/tests/test_config_file_migration.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +[ -d "$CURR_DIR" ] || { echo "FATAL: no current dir (maybe running in zsh?)"; exit 1; } + +# shellcheck source=./common.sh +source "$CURR_DIR/common.sh" + + +export CURRENT_STAGE="Test | config-file-migration" + + + +highlight "[START] ConfigMigrateTest" + +tempdir=$(mktemp -d) +$EXE config migrate "$CURR_DIR/assets/config_test_simple_migration_v1alpha2.yaml" "$tempdir/expected.yaml" || failed "failed on $CURR_DIR/assets/config_test_simple.yaml" +$EXE config migrate "$CURR_DIR/assets/config_test_simple_migration_v1alpha3.yaml" "$tempdir/actual.yaml" || failed "failed on $CURR_DIR/assets/config_test_simple_migrate.yaml" + +diff "$tempdir/actual.yaml" "$tempdir/expected.yaml" || failed "config migration failed" && passed "config migration succeeded" + + +highlight "[DONE] ConfigMigrateTest" + +exit 0 + + From bfead49c462ac6af652710123e3e22aa2806c080 Mon Sep 17 00:00:00 2001 From: ejose19 <8742215+ejose19@users.noreply.github.com> Date: Wed, 19 May 2021 09:20:47 -0300 Subject: [PATCH 06/45] [Enhancement] Refactoring: normalize label flags (k3s node & runtime) (#598, @ejose19) --- cmd/cluster/clusterCreate.go | 57 +++++++++++++++---- cmd/node/nodeCreate.go | 37 +++++++++--- cmd/node/nodeList.go | 2 +- cmd/registry/registryList.go | 4 +- cmd/util/runtimeLabels.go | 35 ++++++++++++ pkg/client/cluster.go | 40 ++++++------- pkg/client/kubeconfig.go | 8 +-- pkg/client/node.go | 40 ++++++------- pkg/client/registry.go | 10 ++-- pkg/config/config_test.go | 20 +++++-- .../test_assets/config_test_simple.yaml | 16 ++++-- .../config_test_simple_invalid_servers.yaml | 18 ++++-- pkg/config/transform.go | 44 ++++++++++---- pkg/config/v1alpha3/migrations.go | 9 +++ pkg/config/v1alpha3/schema.json | 52 ++++++++++------- pkg/config/v1alpha3/types.go | 11 ++-- pkg/runtimes/docker/container.go | 2 +- pkg/runtimes/docker/network.go | 2 +- pkg/runtimes/docker/node.go | 2 +- pkg/runtimes/docker/translate.go | 44 +++++++------- pkg/runtimes/docker/translate_test.go | 6 +- pkg/runtimes/docker/util.go | 2 +- pkg/runtimes/docker/volume.go | 4 +- pkg/tools/tools.go | 24 ++++---- pkg/types/types.go | 10 ++-- tests/assets/config_test_simple.yaml | 18 ++++-- ...config_test_simple_migration_v1alpha3.yaml | 13 +++-- tests/common.sh | 6 ++ tests/test_config_file.sh | 6 +- tests/test_config_with_overrides.sh | 4 ++ 30 files changed, 361 insertions(+), 185 deletions(-) create mode 100644 cmd/util/runtimeLabels.go diff --git a/cmd/cluster/clusterCreate.go b/cmd/cluster/clusterCreate.go index 574db83d..89a2fbae 100644 --- a/cmd/cluster/clusterCreate.go +++ b/cmd/cluster/clusterCreate.go @@ -283,8 +283,11 @@ func NewCmdClusterCreate() *cobra.Command { cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d cluster create --agents 2 -p 8080:80@agent[0] -p 8081@agent[1]`") _ = ppViper.BindPFlag("cli.ports", cmd.Flags().Lookup("port")) - cmd.Flags().StringArrayP("label", "l", nil, "Add label to node container (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -l \"my.label@agent[0,1]\" -l \"other.label=somevalue@server[0]\"`") - _ = ppViper.BindPFlag("cli.labels", cmd.Flags().Lookup("label")) + cmd.Flags().StringArrayP("k3s-node-label", "", nil, "Add label to k3s node (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 --k3s-node-label \"my.label@agent[0,1]\" --k3s-node-label \"other.label=somevalue@server[0]\"`") + _ = ppViper.BindPFlag("cli.k3s-node-labels", cmd.Flags().Lookup("k3s-node-label")) + + cmd.Flags().StringArrayP("runtime-label", "", nil, "Add label to container runtime (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 --runtime-label \"my.label@agent[0,1]\" --runtime-label \"other.label=somevalue@server[0]\"`") + _ = ppViper.BindPFlag("cli.runtime-labels", cmd.Flags().Lookup("runtime-label")) /* k3s */ cmd.Flags().StringArray("k3s-arg", nil, "Additional args passed to k3s command (Format: `ARG@NODEFILTER[;@NODEFILTER]`)\n - Example: `k3d cluster create --k3s-arg \"--disable=traefik@server[0]\"") @@ -481,10 +484,10 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) { log.Tracef("PortFilterMap: %+v", portFilterMap) - // --label - // labelFilterMap will add container label to applied node filters - labelFilterMap := make(map[string][]string, 1) - for _, labelFlag := range ppViper.GetStringSlice("cli.labels") { + // --k3s-node-label + // k3sNodeLabelFilterMap will add k3s node label to applied node filters + k3sNodeLabelFilterMap := make(map[string][]string, 1) + for _, labelFlag := range ppViper.GetStringSlice("cli.k3s-node-labels") { // split node filter from the specified label label, nodeFilters, err := cliutil.SplitFiltersFromFlag(labelFlag) @@ -493,21 +496,51 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) { } // create new entry or append filter to existing entry - if _, exists := labelFilterMap[label]; exists { - labelFilterMap[label] = append(labelFilterMap[label], nodeFilters...) + if _, exists := k3sNodeLabelFilterMap[label]; exists { + k3sNodeLabelFilterMap[label] = append(k3sNodeLabelFilterMap[label], nodeFilters...) } else { - labelFilterMap[label] = nodeFilters + k3sNodeLabelFilterMap[label] = nodeFilters } } - for label, nodeFilters := range labelFilterMap { - cfg.Labels = append(cfg.Labels, conf.LabelWithNodeFilters{ + for label, nodeFilters := range k3sNodeLabelFilterMap { + cfg.Options.K3sOptions.NodeLabels = append(cfg.Options.K3sOptions.NodeLabels, conf.LabelWithNodeFilters{ Label: label, NodeFilters: nodeFilters, }) } - log.Tracef("LabelFilterMap: %+v", labelFilterMap) + log.Tracef("K3sNodeLabelFilterMap: %+v", k3sNodeLabelFilterMap) + + // --runtime-label + // runtimeLabelFilterMap will add container runtime label to applied node filters + runtimeLabelFilterMap := make(map[string][]string, 1) + for _, labelFlag := range ppViper.GetStringSlice("cli.runtime-labels") { + + // split node filter from the specified label + label, nodeFilters, err := cliutil.SplitFiltersFromFlag(labelFlag) + if err != nil { + log.Fatalln(err) + } + + cliutil.ValidateRuntimeLabelKey(strings.Split(label, "=")[0]) + + // create new entry or append filter to existing entry + if _, exists := runtimeLabelFilterMap[label]; exists { + runtimeLabelFilterMap[label] = append(runtimeLabelFilterMap[label], nodeFilters...) + } else { + runtimeLabelFilterMap[label] = nodeFilters + } + } + + for label, nodeFilters := range runtimeLabelFilterMap { + cfg.Options.Runtime.Labels = append(cfg.Options.Runtime.Labels, conf.LabelWithNodeFilters{ + Label: label, + NodeFilters: nodeFilters, + }) + } + + log.Tracef("RuntimeLabelFilterMap: %+v", runtimeLabelFilterMap) // --env // envFilterMap will add container env vars to applied node filters diff --git a/cmd/node/nodeCreate.go b/cmd/node/nodeCreate.go index cc7c2b10..8169170c 100644 --- a/cmd/node/nodeCreate.go +++ b/cmd/node/nodeCreate.go @@ -30,6 +30,7 @@ import ( dockerunits "github.com/docker/go-units" "github.com/rancher/k3d/v4/cmd/util" + cliutil "github.com/rancher/k3d/v4/cmd/util" k3dc "github.com/rancher/k3d/v4/pkg/client" "github.com/rancher/k3d/v4/pkg/runtimes" k3d "github.com/rancher/k3d/v4/pkg/types" @@ -74,6 +75,7 @@ func NewCmdNodeCreate() *cobra.Command { cmd.Flags().BoolVar(&createNodeOpts.Wait, "wait", false, "Wait for the node(s) to be ready before returning.") cmd.Flags().DurationVar(&createNodeOpts.Timeout, "timeout", 0*time.Second, "Maximum waiting time for '--wait' before canceling/returning.") + cmd.Flags().StringSliceP("runtime-label", "", []string{}, "Specify container runtime labels in format \"foo=bar\"") cmd.Flags().StringSliceP("k3s-node-label", "", []string{}, "Specify k3s node labels in format \"foo=bar\"") // done @@ -127,9 +129,30 @@ func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, *k3d.Cl log.Errorf("Provided memory limit value is invalid") } + // --runtime-label + runtimeLabelsFlag, err := cmd.Flags().GetStringSlice("runtime-label") + if err != nil { + log.Errorln("No runtime-label specified") + log.Fatalln(err) + } + + runtimeLabels := make(map[string]string, len(runtimeLabelsFlag)+1) + for _, label := range runtimeLabelsFlag { + labelSplitted := strings.Split(label, "=") + if len(labelSplitted) != 2 { + log.Fatalf("unknown runtime-label format format: %s, use format \"foo=bar\"", label) + } + cliutil.ValidateRuntimeLabelKey(labelSplitted[0]) + runtimeLabels[labelSplitted[0]] = labelSplitted[1] + } + + // Internal k3d runtime labels take precedence over user-defined labels + runtimeLabels[k3d.LabelRole] = roleStr + + // --k3s-node-label k3sNodeLabelsFlag, err := cmd.Flags().GetStringSlice("k3s-node-label") if err != nil { - log.Errorln("No node-label specified") + log.Errorln("No k3s-node-label specified") log.Fatalln(err) } @@ -137,7 +160,7 @@ func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, *k3d.Cl for _, label := range k3sNodeLabelsFlag { labelSplitted := strings.Split(label, "=") if len(labelSplitted) != 2 { - log.Fatalf("unknown label format format: %s, use format \"foo=bar\"", label) + log.Fatalf("unknown k3s-node-label format format: %s, use format \"foo=bar\"", label) } k3sNodeLabels[labelSplitted[0]] = labelSplitted[1] } @@ -146,13 +169,11 @@ func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, *k3d.Cl nodes := []*k3d.Node{} for i := 0; i < replicas; i++ { node := &k3d.Node{ - Name: fmt.Sprintf("%s-%s-%d", k3d.DefaultObjectNamePrefix, args[0], i), - Role: role, - Image: image, - Labels: map[string]string{ - k3d.LabelRole: roleStr, - }, + Name: fmt.Sprintf("%s-%s-%d", k3d.DefaultObjectNamePrefix, args[0], i), + Role: role, + Image: image, K3sNodeLabels: k3sNodeLabels, + RuntimeLabels: runtimeLabels, Restart: true, Memory: memory, } diff --git a/cmd/node/nodeList.go b/cmd/node/nodeList.go index fd0698f8..c4abde01 100644 --- a/cmd/node/nodeList.go +++ b/cmd/node/nodeList.go @@ -88,7 +88,7 @@ func NewCmdNodeList() *cobra.Command { fmt.Fprintf(tabwriter, "%s\t%s\t%s\t%s\n", strings.TrimPrefix(node.Name, "/"), string(node.Role), - node.Labels[k3d.LabelClusterName], + node.RuntimeLabels[k3d.LabelClusterName], node.State.Status) })) }, diff --git a/cmd/registry/registryList.go b/cmd/registry/registryList.go index f41d5f89..30fa66f0 100644 --- a/cmd/registry/registryList.go +++ b/cmd/registry/registryList.go @@ -88,8 +88,8 @@ func NewCmdRegistryList() *cobra.Command { util.PrintNodes(existingNodes, registryListFlags.output, headers, util.NodePrinterFunc(func(tabwriter *tabwriter.Writer, node *k3d.Node) { cluster := "*" - if _, ok := node.Labels[k3d.LabelClusterName]; ok { - cluster = node.Labels[k3d.LabelClusterName] + if _, ok := node.RuntimeLabels[k3d.LabelClusterName]; ok { + cluster = node.RuntimeLabels[k3d.LabelClusterName] } fmt.Fprintf(tabwriter, "%s\t%s\t%s\t%s\n", strings.TrimPrefix(node.Name, "/"), diff --git a/cmd/util/runtimeLabels.go b/cmd/util/runtimeLabels.go new file mode 100644 index 00000000..e603cef1 --- /dev/null +++ b/cmd/util/runtimeLabels.go @@ -0,0 +1,35 @@ +/* +Copyright © 2020 The k3d Author(s) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ +package util + +import ( + "strings" + + log "github.com/sirupsen/logrus" +) + +// validateRuntimeLabelKey validates a given label key is not reserved for internal k3d usage +func ValidateRuntimeLabelKey(labelKey string) { + if strings.HasPrefix(labelKey, "k3s.") || strings.HasPrefix(labelKey, "k3d.") || labelKey == "app" { + log.Fatalf("runtime label \"%s\" is reserved for internal usage", labelKey) + } +} diff --git a/pkg/client/cluster.go b/pkg/client/cluster.go index 4aa29fcb..e04484c1 100644 --- a/pkg/client/cluster.go +++ b/pkg/client/cluster.go @@ -381,13 +381,13 @@ ClusterCreatOpts: nodeSetup := func(node *k3d.Node, suffix int) error { // cluster specific settings - if node.Labels == nil { - node.Labels = make(map[string]string) // TODO: maybe create an init function? + if node.RuntimeLabels == nil { + node.RuntimeLabels = make(map[string]string) // TODO: maybe create an init function? } // ensure global labels for k, v := range clusterCreateOpts.GlobalLabels { - node.Labels[k] = v + node.RuntimeLabels[k] = v } // ensure global env @@ -404,7 +404,7 @@ ClusterCreatOpts: cluster.Network.IPAM.IPsUsed = append(cluster.Network.IPAM.IPsUsed, ip) // make sure that we're not reusing the same IP next time node.IP.Static = true node.IP.IP = ip - node.Labels[k3d.LabelNodeStaticIP] = ip.String() + node.RuntimeLabels[k3d.LabelNodeStaticIP] = ip.String() } node.ServerOpts.KubeAPI = cluster.KubeAPI @@ -412,7 +412,7 @@ ClusterCreatOpts: // the cluster has an init server node, but its not this one, so connect it to the init node if cluster.InitNode != nil && !node.ServerOpts.IsInit { node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", connectionURL)) - node.Labels[k3d.LabelServerIsInit] = "false" // set label, that this server node is not the init server + node.RuntimeLabels[k3d.LabelServerIsInit] = "false" // set label, that this server node is not the init server } } else if node.Role == k3d.AgentRole { @@ -446,10 +446,10 @@ ClusterCreatOpts: if cluster.InitNode != nil { log.Infoln("Creating initializing server node") cluster.InitNode.Args = append(cluster.InitNode.Args, "--cluster-init") - if cluster.InitNode.Labels == nil { - cluster.InitNode.Labels = map[string]string{} + if cluster.InitNode.RuntimeLabels == nil { + cluster.InitNode.RuntimeLabels = map[string]string{} } - cluster.InitNode.Labels[k3d.LabelServerIsInit] = "true" // set label, that this server node is the init server + cluster.InitNode.RuntimeLabels[k3d.LabelServerIsInit] = "true" // set label, that this server node is the init server // in case the LoadBalancer was disabled, expose the API Port on the initializing server node if clusterCreateOpts.DisableLoadBalancer { @@ -547,10 +547,10 @@ ClusterCreatOpts: fmt.Sprintf("PORTS=%s", strings.Join(ports, ",")), fmt.Sprintf("WORKER_PROCESSES=%d", len(ports)), }, - Role: k3d.LoadBalancerRole, - Labels: clusterCreateOpts.GlobalLabels, // TODO: createLoadBalancer: add more expressive labels - Networks: []string{cluster.Network.Name}, - Restart: true, + Role: k3d.LoadBalancerRole, + RuntimeLabels: clusterCreateOpts.GlobalLabels, // TODO: createLoadBalancer: add more expressive labels + Networks: []string{cluster.Network.Name}, + Restart: true, } if len(udp_ports) > 0 { lbNode.Env = append(lbNode.Env, fmt.Sprintf("UDP_PORTS=%s", strings.Join(udp_ports, ","))) @@ -673,7 +673,7 @@ func ClusterDelete(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus // ClusterList returns a list of all existing clusters func ClusterList(ctx context.Context, runtime k3drt.Runtime) ([]*k3d.Cluster, error) { log.Traceln("Listing Clusters...") - nodes, err := runtime.GetNodesByLabel(ctx, k3d.DefaultObjectLabels) + nodes, err := runtime.GetNodesByLabel(ctx, k3d.DefaultRuntimeLabels) if err != nil { log.Errorln("Failed to get clusters") return nil, err @@ -691,7 +691,7 @@ func ClusterList(ctx context.Context, runtime k3drt.Runtime) ([]*k3d.Cluster, er log.Tracef("Found %d cluster-internal nodes", len(nodes)) if log.GetLevel() == log.TraceLevel { for _, node := range nodes { - log.Tracef("Found cluster-internal node %s of role %s belonging to cluster %s", node.Name, node.Role, node.Labels[k3d.LabelClusterName]) + log.Tracef("Found cluster-internal node %s of role %s belonging to cluster %s", node.Name, node.Role, node.RuntimeLabels[k3d.LabelClusterName]) } } @@ -700,7 +700,7 @@ func ClusterList(ctx context.Context, runtime k3drt.Runtime) ([]*k3d.Cluster, er for _, node := range nodes { clusterExists := false for _, cluster := range clusters { - if node.Labels[k3d.LabelClusterName] == cluster.Name { // TODO: handle case, where this label doesn't exist + if node.RuntimeLabels[k3d.LabelClusterName] == cluster.Name { // TODO: handle case, where this label doesn't exist cluster.Nodes = append(cluster.Nodes, node) clusterExists = true break @@ -709,7 +709,7 @@ func ClusterList(ctx context.Context, runtime k3drt.Runtime) ([]*k3d.Cluster, er // cluster is not in the list yet, so we add it with the current node as its first member if !clusterExists { clusters = append(clusters, &k3d.Cluster{ - Name: node.Labels[k3d.LabelClusterName], + Name: node.RuntimeLabels[k3d.LabelClusterName], Nodes: []*k3d.Node{node}, }) } @@ -734,7 +734,7 @@ func populateClusterFieldsFromLabels(cluster *k3d.Cluster) error { // get the name of the cluster network if cluster.Network.Name == "" { - if networkName, ok := node.Labels[k3d.LabelNetwork]; ok { + if networkName, ok := node.RuntimeLabels[k3d.LabelNetwork]; ok { cluster.Network.Name = networkName } } @@ -742,7 +742,7 @@ func populateClusterFieldsFromLabels(cluster *k3d.Cluster) error { // check if the network is external // since the struct value is a bool, initialized as false, we cannot check if it's unset if !cluster.Network.External && !networkExternalSet { - if networkExternalString, ok := node.Labels[k3d.LabelNetworkExternal]; ok { + if networkExternalString, ok := node.RuntimeLabels[k3d.LabelNetworkExternal]; ok { if networkExternal, err := strconv.ParseBool(networkExternalString); err == nil { cluster.Network.External = networkExternal networkExternalSet = true @@ -752,14 +752,14 @@ func populateClusterFieldsFromLabels(cluster *k3d.Cluster) error { // get image volume // TODO: enable external image volumes the same way we do it with networks if cluster.ImageVolume == "" { - if imageVolumeName, ok := node.Labels[k3d.LabelImageVolume]; ok { + if imageVolumeName, ok := node.RuntimeLabels[k3d.LabelImageVolume]; ok { cluster.ImageVolume = imageVolumeName } } // get k3s cluster's token if cluster.Token == "" { - if token, ok := node.Labels[k3d.LabelClusterToken]; ok { + if token, ok := node.RuntimeLabels[k3d.LabelClusterToken]; ok { cluster.Token = token } } diff --git a/pkg/client/kubeconfig.go b/pkg/client/kubeconfig.go index 957f593d..a016f153 100644 --- a/pkg/client/kubeconfig.go +++ b/pkg/client/kubeconfig.go @@ -131,11 +131,11 @@ func KubeconfigGet(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.C APIHost := k3d.DefaultAPIHost for _, server := range serverNodes { - if _, ok := server.Labels[k3d.LabelServerAPIPort]; ok { + if _, ok := server.RuntimeLabels[k3d.LabelServerAPIPort]; ok { chosenServer = server - APIPort = server.Labels[k3d.LabelServerAPIPort] - if _, ok := server.Labels[k3d.LabelServerAPIHost]; ok { - APIHost = server.Labels[k3d.LabelServerAPIHost] + APIPort = server.RuntimeLabels[k3d.LabelServerAPIPort] + if _, ok := server.RuntimeLabels[k3d.LabelServerAPIHost]; ok { + APIHost = server.RuntimeLabels[k3d.LabelServerAPIHost] } break } diff --git a/pkg/client/node.go b/pkg/client/node.go index ebbf28f0..a00f4666 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -59,8 +59,8 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N node.Networks = []string{cluster.Network.Name} // skeleton - if node.Labels == nil { - node.Labels = map[string]string{ + if node.RuntimeLabels == nil { + node.RuntimeLabels = map[string]string{ k3d.LabelRole: string(node.Role), } } @@ -141,7 +141,7 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N } } if !k3sURLFound { - if url, ok := node.Labels[k3d.LabelClusterURL]; ok { + if url, ok := node.RuntimeLabels[k3d.LabelClusterURL]; ok { node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", url)) } else { log.Warnln("Failed to find K3S_URL value!") @@ -381,18 +381,22 @@ func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, c // ### Labels ### labels := make(map[string]string) - for k, v := range k3d.DefaultObjectLabels { + for k, v := range k3d.DefaultRuntimeLabels { labels[k] = v } - for k, v := range k3d.DefaultObjectLabelsVar { + for k, v := range k3d.DefaultRuntimeLabelsVar { labels[k] = v } - for k, v := range node.Labels { + for k, v := range node.RuntimeLabels { labels[k] = v } - node.Labels = labels + node.RuntimeLabels = labels // second most important: the node role label - node.Labels[k3d.LabelRole] = string(node.Role) + node.RuntimeLabels[k3d.LabelRole] = string(node.Role) + + for k, v := range node.K3sNodeLabels { + node.Args = append(node.Args, "--node-label", fmt.Sprintf("%s=%s", k, v)) + } // ### Environment ### node.Env = append(node.Env, k3d.DefaultNodeEnv...) // append default node env vars @@ -469,7 +473,7 @@ func NodeDelete(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, o // update the server loadbalancer if !opts.SkipLBUpdate && (node.Role == k3d.ServerRole || node.Role == k3d.AgentRole) { - cluster, err := ClusterGet(ctx, runtime, &k3d.Cluster{Name: node.Labels[k3d.LabelClusterName]}) + cluster, err := ClusterGet(ctx, runtime, &k3d.Cluster{Name: node.RuntimeLabels[k3d.LabelClusterName]}) if err != nil { log.Errorf("Failed to find cluster for node '%s'", node.Name) return err @@ -493,10 +497,6 @@ func patchAgentSpec(node *k3d.Node) error { node.Cmd = []string{"agent"} } - for k, v := range node.K3sNodeLabels { - node.Args = append(node.Args, "--node-label", fmt.Sprintf("%s=%s", k, v)) - } - return nil } @@ -509,9 +509,9 @@ func patchServerSpec(node *k3d.Node, runtime runtimes.Runtime) error { // Add labels and TLS SAN for the exposed API // FIXME: For now, the labels concerning the API on the server nodes are only being used for configuring the kubeconfig - node.Labels[k3d.LabelServerAPIHostIP] = node.ServerOpts.KubeAPI.Binding.HostIP // TODO: maybe get docker machine IP here - node.Labels[k3d.LabelServerAPIHost] = node.ServerOpts.KubeAPI.Host - node.Labels[k3d.LabelServerAPIPort] = node.ServerOpts.KubeAPI.Binding.HostPort + node.RuntimeLabels[k3d.LabelServerAPIHostIP] = node.ServerOpts.KubeAPI.Binding.HostIP // TODO: maybe get docker machine IP here + node.RuntimeLabels[k3d.LabelServerAPIHost] = node.ServerOpts.KubeAPI.Host + node.RuntimeLabels[k3d.LabelServerAPIPort] = node.ServerOpts.KubeAPI.Binding.HostPort // If the runtime is docker, attempt to use the docker host if runtime == runtimes.Docker { @@ -519,19 +519,19 @@ func patchServerSpec(node *k3d.Node, runtime runtimes.Runtime) error { if dockerHost != "" { dockerHost = strings.Split(dockerHost, ":")[0] // remove the port log.Tracef("Using docker host %s", dockerHost) - node.Labels[k3d.LabelServerAPIHostIP] = dockerHost - node.Labels[k3d.LabelServerAPIHost] = dockerHost + node.RuntimeLabels[k3d.LabelServerAPIHostIP] = dockerHost + node.RuntimeLabels[k3d.LabelServerAPIHost] = dockerHost } } - node.Args = append(node.Args, "--tls-san", node.Labels[k3d.LabelServerAPIHost]) // add TLS SAN for non default host name + node.Args = append(node.Args, "--tls-san", node.RuntimeLabels[k3d.LabelServerAPIHost]) // add TLS SAN for non default host name return nil } // NodeList returns a list of all existing clusters func NodeList(ctx context.Context, runtime runtimes.Runtime) ([]*k3d.Node, error) { - nodes, err := runtime.GetNodesByLabel(ctx, k3d.DefaultObjectLabels) + nodes, err := runtime.GetNodesByLabel(ctx, k3d.DefaultRuntimeLabels) if err != nil { log.Errorln("Failed to get nodes") return nil, err diff --git a/pkg/client/registry.go b/pkg/client/registry.go index 4df7ff7a..9e1dc86c 100644 --- a/pkg/client/registry.go +++ b/pkg/client/registry.go @@ -77,7 +77,7 @@ func RegistryCreate(ctx context.Context, runtime runtimes.Runtime, reg *k3d.Regi } // setup the node labels - registryNode.Labels = map[string]string{ + registryNode.RuntimeLabels = map[string]string{ k3d.LabelClusterName: reg.ClusterRef, k3d.LabelRole: string(k3d.RegistryRole), k3d.LabelRegistryHost: reg.ExposureOpts.Host, // TODO: docker machine host? @@ -85,11 +85,11 @@ func RegistryCreate(ctx context.Context, runtime runtimes.Runtime, reg *k3d.Regi k3d.LabelRegistryPortExternal: reg.ExposureOpts.Binding.HostPort, k3d.LabelRegistryPortInternal: reg.ExposureOpts.Port.Port(), } - for k, v := range k3d.DefaultObjectLabels { - registryNode.Labels[k] = v + for k, v := range k3d.DefaultRuntimeLabels { + registryNode.RuntimeLabels[k] = v } - for k, v := range k3d.DefaultObjectLabelsVar { - registryNode.Labels[k] = v + for k, v := range k3d.DefaultRuntimeLabelsVar { + registryNode.RuntimeLabels[k] = v } // port diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index eb15f152..41d76422 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -64,12 +64,6 @@ func TestReadSimpleConfig(t *testing.T) { NodeFilters: []string{"loadbalancer"}, }, }, - Labels: []conf.LabelWithNodeFilters{ - { - Label: "foo=bar", - NodeFilters: []string{"server[0]", "loadbalancer"}, - }, - }, Env: []conf.EnvVarWithNodeFilters{ { EnvVar: "bar=baz", @@ -90,11 +84,25 @@ func TestReadSimpleConfig(t *testing.T) { NodeFilters: []string{"server[*]"}, }, }, + NodeLabels: []conf.LabelWithNodeFilters{ + { + Label: "foo=bar", + NodeFilters: []string{"server[0]", "loadbalancer"}, + }, + }, }, KubeconfigOptions: conf.SimpleConfigOptionsKubeconfig{ UpdateDefaultKubeconfig: true, SwitchCurrentContext: true, }, + Runtime: conf.SimpleConfigOptionsRuntime{ + Labels: []conf.LabelWithNodeFilters{ + { + Label: "foo=bar", + NodeFilters: []string{"server[0]", "loadbalancer"}, + }, + }, + }, }, } diff --git a/pkg/config/test_assets/config_test_simple.yaml b/pkg/config/test_assets/config_test_simple.yaml index 4e132176..f8f873cb 100644 --- a/pkg/config/test_assets/config_test_simple.yaml +++ b/pkg/config/test_assets/config_test_simple.yaml @@ -22,11 +22,6 @@ env: - envVar: bar=baz nodeFilters: - all -labels: - - label: foo=bar - nodeFilters: - - "server[0]" - - loadbalancer options: k3d: @@ -39,6 +34,17 @@ options: - arg: --tls-san=127.0.0.1 nodeFilters: - "server[*]" + nodeLabels: + - label: foo=bar + nodeFilters: + - server[0] + - loadbalancer kubeconfig: updateDefaultKubeconfig: true switchCurrentContext: true + runtime: + labels: + - label: foo=bar + nodeFilters: + - server[0] + - loadbalancer diff --git a/pkg/config/test_assets/config_test_simple_invalid_servers.yaml b/pkg/config/test_assets/config_test_simple_invalid_servers.yaml index b9e75fb6..7b9bc8a0 100644 --- a/pkg/config/test_assets/config_test_simple_invalid_servers.yaml +++ b/pkg/config/test_assets/config_test_simple_invalid_servers.yaml @@ -22,11 +22,6 @@ env: - envVar: bar=baz nodeFilters: - all -labels: - - label: foo=bar - nodeFilters: - - server[0] - - loadbalancer options: k3d: @@ -39,6 +34,17 @@ options: - arg: --tls-san=127.0.0.1 nodeFilters: - "server[*]" + nodeLabels: + - label: foo=bar + nodeFilters: + - server[0] + - loadbalancer kubeconfig: updateDefaultKubeconfig: true - switchCurrentContext: true \ No newline at end of file + switchCurrentContext: true + runtime: + labels: + - label: foo=bar + nodeFilters: + - server[0] + - loadbalancer diff --git a/pkg/config/transform.go b/pkg/config/transform.go index 0ec43686..59e674d9 100644 --- a/pkg/config/transform.go +++ b/pkg/config/transform.go @@ -190,23 +190,47 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim } } - // -> LABELS - for _, labelWithNodeFilters := range simpleConfig.Labels { - if len(labelWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 { - return nil, fmt.Errorf("Labelmapping '%s' lacks a node filter, but there's more than one node", labelWithNodeFilters.Label) + // -> K3S NODE LABELS + for _, k3sNodeLabelWithNodeFilters := range simpleConfig.Options.K3sOptions.NodeLabels { + if len(k3sNodeLabelWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 { + return nil, fmt.Errorf("K3sNodeLabelmapping '%s' lacks a node filter, but there's more than one node", k3sNodeLabelWithNodeFilters.Label) } - nodes, err := util.FilterNodes(nodeList, labelWithNodeFilters.NodeFilters) + nodes, err := util.FilterNodes(nodeList, k3sNodeLabelWithNodeFilters.NodeFilters) if err != nil { return nil, err } for _, node := range nodes { - if node.Labels == nil { - node.Labels = make(map[string]string) // ensure that the map is initialized + if node.K3sNodeLabels == nil { + node.K3sNodeLabels = make(map[string]string) // ensure that the map is initialized } - k, v := util.SplitLabelKeyValue(labelWithNodeFilters.Label) - node.Labels[k] = v + k, v := util.SplitLabelKeyValue(k3sNodeLabelWithNodeFilters.Label) + node.K3sNodeLabels[k] = v + + } + } + + // -> RUNTIME LABELS + for _, runtimeLabelWithNodeFilters := range simpleConfig.Options.Runtime.Labels { + if len(runtimeLabelWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 { + return nil, fmt.Errorf("RuntimeLabelmapping '%s' lacks a node filter, but there's more than one node", runtimeLabelWithNodeFilters.Label) + } + + nodes, err := util.FilterNodes(nodeList, runtimeLabelWithNodeFilters.NodeFilters) + if err != nil { + return nil, err + } + + for _, node := range nodes { + if node.RuntimeLabels == nil { + node.RuntimeLabels = make(map[string]string) // ensure that the map is initialized + } + k, v := util.SplitLabelKeyValue(runtimeLabelWithNodeFilters.Label) + + cliutil.ValidateRuntimeLabelKey(k) + + node.RuntimeLabels[k] = v } } @@ -260,7 +284,7 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim } // ensure, that we have the default object labels - for k, v := range k3d.DefaultObjectLabels { + for k, v := range k3d.DefaultRuntimeLabels { clusterCreateOpts.GlobalLabels[k] = v } diff --git a/pkg/config/v1alpha3/migrations.go b/pkg/config/v1alpha3/migrations.go index 5c894ca1..fd1bc6dc 100644 --- a/pkg/config/v1alpha3/migrations.go +++ b/pkg/config/v1alpha3/migrations.go @@ -49,6 +49,15 @@ func MigrateV1Alpha2(input configtypes.Config) (configtypes.Config, error) { return nil, err } + cfg.Options.Runtime.Labels = []LabelWithNodeFilters{} + + for _, label := range input.(v1alpha2.SimpleConfig).Labels { + cfg.Options.Runtime.Labels = append(cfg.Options.Runtime.Labels, LabelWithNodeFilters{ + Label: label.Label, + NodeFilters: label.NodeFilters, + }) + } + cfg.Options.K3sOptions.ExtraArgs = []K3sArgWithNodeFilters{} for _, arg := range input.(v1alpha2.SimpleConfig).Options.K3sOptions.ExtraServerArgs { diff --git a/pkg/config/v1alpha3/schema.json b/pkg/config/v1alpha3/schema.json index 2b07c02c..1deb0060 100644 --- a/pkg/config/v1alpha3/schema.json +++ b/pkg/config/v1alpha3/schema.json @@ -108,21 +108,6 @@ "additionalProperties": false } }, - "labels": { - "type": "array", - "items": { - "type": "object", - "properties": { - "label": { - "type": "string" - }, - "nodeFilters": { - "$ref": "#/definitions/nodeFilters" - } - }, - "additionalProperties": false - } - }, "options": { "type": "object", "properties": { @@ -170,12 +155,24 @@ "properties": { "arg": { "type": "string", - "examples": [ - "--tls-san=127.0.0.1", - "--disable=traefik" - ] + "examples": ["--tls-san=127.0.0.1", "--disable=traefik"] }, - "nodeFilters": { + "nodeFilters": { + "$ref": "#/definitions/nodeFilters" + } + }, + "additionalProperties": false + } + }, + "nodeLabels": { + "type": "array", + "items": { + "type": "object", + "properties": { + "label": { + "type": "string" + }, + "nodeFilters": { "$ref": "#/definitions/nodeFilters" } }, @@ -210,6 +207,21 @@ }, "agentsMemory": { "type": "string" + }, + "labels": { + "type": "array", + "items": { + "type": "object", + "properties": { + "label": { + "type": "string" + }, + "nodeFilters": { + "$ref": "#/definitions/nodeFilters" + } + }, + "additionalProperties": false + } } } } diff --git a/pkg/config/v1alpha3/types.go b/pkg/config/v1alpha3/types.go index 22465c71..9783d1c1 100644 --- a/pkg/config/v1alpha3/types.go +++ b/pkg/config/v1alpha3/types.go @@ -95,9 +95,10 @@ type SimpleConfigOptions struct { } type SimpleConfigOptionsRuntime struct { - GPURequest string `mapstructure:"gpuRequest" yaml:"gpuRequest"` - ServersMemory string `mapstructure:"serversMemory" yaml:"serversMemory"` - AgentsMemory string `mapstructure:"agentsMemory" yaml:"agentsMemory"` + GPURequest string `mapstructure:"gpuRequest" yaml:"gpuRequest"` + ServersMemory string `mapstructure:"serversMemory" yaml:"serversMemory"` + AgentsMemory string `mapstructure:"agentsMemory" yaml:"agentsMemory"` + Labels []LabelWithNodeFilters `mapstructure:"labels" yaml:"labels"` } type SimpleConfigOptionsK3d struct { @@ -111,7 +112,8 @@ type SimpleConfigOptionsK3d struct { } type SimpleConfigOptionsK3s struct { - ExtraArgs []K3sArgWithNodeFilters `mapstructure:"extraArgs" yaml:"extraArgs"` + ExtraArgs []K3sArgWithNodeFilters `mapstructure:"extraArgs" yaml:"extraArgs"` + NodeLabels []LabelWithNodeFilters `mapstructure:"nodeLabels" yaml:"nodeLabels"` } // SimpleConfig describes the toplevel k3d configuration file. @@ -127,7 +129,6 @@ type SimpleConfig struct { ClusterToken string `mapstructure:"token" yaml:"clusterToken" json:"clusterToken,omitempty"` // default: auto-generated Volumes []VolumeWithNodeFilters `mapstructure:"volumes" yaml:"volumes" json:"volumes,omitempty"` Ports []PortWithNodeFilters `mapstructure:"ports" yaml:"ports" json:"ports,omitempty"` - Labels []LabelWithNodeFilters `mapstructure:"labels" yaml:"labels" json:"labels,omitempty"` Options SimpleConfigOptions `mapstructure:"options" yaml:"options" json:"options,omitempty"` Env []EnvVarWithNodeFilters `mapstructure:"env" yaml:"env" json:"env,omitempty"` Registries struct { diff --git a/pkg/runtimes/docker/container.go b/pkg/runtimes/docker/container.go index 6124d4e3..296c3070 100644 --- a/pkg/runtimes/docker/container.go +++ b/pkg/runtimes/docker/container.go @@ -151,7 +151,7 @@ func getNodeContainer(ctx context.Context, node *k3d.Node) (*types.Container, er // (1) list containers which have the default k3d labels attached filters := filters.NewArgs() - for k, v := range node.Labels { + for k, v := range node.RuntimeLabels { filters.Add("label", fmt.Sprintf("%s=%s", k, v)) } diff --git a/pkg/runtimes/docker/network.go b/pkg/runtimes/docker/network.go index 80f62f4c..b1661233 100644 --- a/pkg/runtimes/docker/network.go +++ b/pkg/runtimes/docker/network.go @@ -147,7 +147,7 @@ func (d Docker) CreateNetworkIfNotPresent(ctx context.Context, inNet *k3d.Cluste // (3) Create a new network netCreateOpts := types.NetworkCreate{ CheckDuplicate: true, - Labels: k3d.DefaultObjectLabels, + Labels: k3d.DefaultRuntimeLabels, } // we want a managed (user-defined) network, but user didn't specify a subnet, so we try to auto-generate one diff --git a/pkg/runtimes/docker/node.go b/pkg/runtimes/docker/node.go index b5e3b39a..c7c3b37d 100644 --- a/pkg/runtimes/docker/node.go +++ b/pkg/runtimes/docker/node.go @@ -178,7 +178,7 @@ func getContainersByLabel(ctx context.Context, labels map[string]string) ([]type // (1) list containers which have the default k3d labels attached filters := filters.NewArgs() - for k, v := range k3d.DefaultObjectLabels { + for k, v := range k3d.DefaultRuntimeLabels { filters.Add("label", fmt.Sprintf("%s=%s", k, v)) } for k, v := range labels { diff --git a/pkg/runtimes/docker/translate.go b/pkg/runtimes/docker/translate.go index b6861aad..c8331c75 100644 --- a/pkg/runtimes/docker/translate.go +++ b/pkg/runtimes/docker/translate.go @@ -73,7 +73,7 @@ func TranslateNodeToContainer(node *k3d.Node) (*NodeInDocker, error) { containerConfig.Env = node.Env /* Labels */ - containerConfig.Labels = node.Labels // has to include the role + containerConfig.Labels = node.RuntimeLabels // has to include the role /* Auto-Restart */ if node.Restart { @@ -162,10 +162,10 @@ func TranslateNodeToContainer(node *k3d.Node) (*NodeInDocker, error) { // TranslateContainerToNode translates a docker container object into a k3d node representation func TranslateContainerToNode(cont *types.Container) (*k3d.Node, error) { node := &k3d.Node{ - Name: strings.TrimPrefix(cont.Names[0], "/"), // container name with leading '/' cut off - Image: cont.Image, - Labels: cont.Labels, - Role: k3d.NodeRoles[cont.Labels[k3d.LabelRole]], + Name: strings.TrimPrefix(cont.Names[0], "/"), // container name with leading '/' cut off + Image: cont.Image, + RuntimeLabels: cont.Labels, + Role: k3d.NodeRoles[cont.Labels[k3d.LabelRole]], // TODO: all the rest } return node, nil @@ -175,7 +175,7 @@ func TranslateContainerToNode(cont *types.Container) (*k3d.Node, error) { func TranslateContainerDetailsToNode(containerDetails types.ContainerJSON) (*k3d.Node, error) { // first, make sure, that it's actually a k3d managed container by checking if it has all the default labels - for k, v := range k3d.DefaultObjectLabels { + for k, v := range k3d.DefaultRuntimeLabels { log.Tracef("TranslateContainerDetailsToNode: Checking for default object label %s=%s on container %s", k, v, containerDetails.Name) found := false for lk, lv := range containerDetails.Config.Labels { @@ -273,22 +273,22 @@ func TranslateContainerDetailsToNode(containerDetails types.ContainerJSON) (*k3d } node := &k3d.Node{ - Name: strings.TrimPrefix(containerDetails.Name, "/"), // container name with leading '/' cut off - Role: k3d.NodeRoles[containerDetails.Config.Labels[k3d.LabelRole]], - Image: containerDetails.Image, - Volumes: containerDetails.HostConfig.Binds, - Env: env, - Cmd: containerDetails.Config.Cmd, - Args: []string{}, // empty, since Cmd already contains flags - Ports: containerDetails.HostConfig.PortBindings, - Restart: restart, - Created: containerDetails.Created, - Labels: labels, - Networks: orderedNetworks, - ServerOpts: serverOpts, - AgentOpts: k3d.AgentOpts{}, - State: nodeState, - Memory: memoryStr, + Name: strings.TrimPrefix(containerDetails.Name, "/"), // container name with leading '/' cut off + Role: k3d.NodeRoles[containerDetails.Config.Labels[k3d.LabelRole]], + Image: containerDetails.Image, + Volumes: containerDetails.HostConfig.Binds, + Env: env, + Cmd: containerDetails.Config.Cmd, + Args: []string{}, // empty, since Cmd already contains flags + Ports: containerDetails.HostConfig.PortBindings, + Restart: restart, + Created: containerDetails.Created, + RuntimeLabels: labels, + Networks: orderedNetworks, + ServerOpts: serverOpts, + AgentOpts: k3d.AgentOpts{}, + State: nodeState, + Memory: memoryStr, } return node, nil } diff --git a/pkg/runtimes/docker/translate_test.go b/pkg/runtimes/docker/translate_test.go index 38c6f3a2..4243dc57 100644 --- a/pkg/runtimes/docker/translate_test.go +++ b/pkg/runtimes/docker/translate_test.go @@ -52,9 +52,9 @@ func TestTranslateNodeToContainer(t *testing.T) { }, }, }, - Restart: true, - Labels: map[string]string{k3d.LabelRole: string(k3d.ServerRole), "test_key_1": "test_val_1"}, - Networks: []string{"mynet"}, + Restart: true, + RuntimeLabels: map[string]string{k3d.LabelRole: string(k3d.ServerRole), "test_key_1": "test_val_1"}, + Networks: []string{"mynet"}, } init := true diff --git a/pkg/runtimes/docker/util.go b/pkg/runtimes/docker/util.go index 7720c1e5..f64c39a2 100644 --- a/pkg/runtimes/docker/util.go +++ b/pkg/runtimes/docker/util.go @@ -44,7 +44,7 @@ import ( // GetDefaultObjectLabelsFilter returns docker type filters created from k3d labels func GetDefaultObjectLabelsFilter(clusterName string) filters.Args { filters := filters.NewArgs() - for key, value := range k3d.DefaultObjectLabels { + for key, value := range k3d.DefaultRuntimeLabels { filters.Add("label", fmt.Sprintf("%s=%s", key, value)) } filters.Add("label", fmt.Sprintf("%s=%s", k3d.LabelClusterName, clusterName)) diff --git a/pkg/runtimes/docker/volume.go b/pkg/runtimes/docker/volume.go index 6c0f4c80..32d39634 100644 --- a/pkg/runtimes/docker/volume.go +++ b/pkg/runtimes/docker/volume.go @@ -49,10 +49,10 @@ func (d Docker) CreateVolume(ctx context.Context, name string, labels map[string DriverOpts: map[string]string{}, } - for k, v := range k3d.DefaultObjectLabels { + for k, v := range k3d.DefaultRuntimeLabels { volumeCreateOptions.Labels[k] = v } - for k, v := range k3d.DefaultObjectLabelsVar { + for k, v := range k3d.DefaultRuntimeLabelsVar { volumeCreateOptions.Labels[k] = v } diff --git a/pkg/tools/tools.go b/pkg/tools/tools.go index 56866ed3..62e68fd9 100644 --- a/pkg/tools/tools.go +++ b/pkg/tools/tools.go @@ -65,7 +65,7 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime, var ok bool for _, node := range cluster.Nodes { if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole { - if imageVolume, ok = node.Labels[k3d.LabelImageVolume]; ok { + if imageVolume, ok = node.RuntimeLabels[k3d.LabelImageVolume]; ok { break } } @@ -276,23 +276,23 @@ func containsVersionPart(imageTag string) bool { // runToolsNode will start a new k3d tools container and connect it to the network of the chosen cluster func runToolsNode(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster, network string, volumes []string) (*k3d.Node, error) { labels := map[string]string{} - for k, v := range k3d.DefaultObjectLabels { + for k, v := range k3d.DefaultRuntimeLabels { labels[k] = v } - for k, v := range k3d.DefaultObjectLabelsVar { + for k, v := range k3d.DefaultRuntimeLabelsVar { labels[k] = v } node := &k3d.Node{ - Name: fmt.Sprintf("%s-%s-tools", k3d.DefaultObjectNamePrefix, cluster.Name), - Image: fmt.Sprintf("%s:%s", k3d.DefaultToolsImageRepo, version.GetHelperImageVersion()), - Role: k3d.NoRole, - Volumes: volumes, - Networks: []string{network}, - Cmd: []string{}, - Args: []string{"noop"}, - Labels: k3d.DefaultObjectLabels, + Name: fmt.Sprintf("%s-%s-tools", k3d.DefaultObjectNamePrefix, cluster.Name), + Image: fmt.Sprintf("%s:%s", k3d.DefaultToolsImageRepo, version.GetHelperImageVersion()), + Role: k3d.NoRole, + Volumes: volumes, + Networks: []string{network}, + Cmd: []string{}, + Args: []string{"noop"}, + RuntimeLabels: k3d.DefaultRuntimeLabels, } - node.Labels[k3d.LabelClusterName] = cluster.Name + node.RuntimeLabels[k3d.LabelClusterName] = cluster.Name if err := k3dc.NodeRun(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil { log.Errorf("Failed to create tools container for cluster '%s'", cluster.Name) return node, err diff --git a/pkg/types/types.go b/pkg/types/types.go index 23667855..4df5e434 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -105,13 +105,13 @@ var ClusterExternalNodeRoles = []Role{ RegistryRole, } -// DefaultObjectLabels specifies a set of labels that will be attached to k3d objects by default -var DefaultObjectLabels = map[string]string{ +// DefaultRuntimeLabels specifies a set of labels that will be attached to k3d runtime objects by default +var DefaultRuntimeLabels = map[string]string{ "app": "k3d", } -// DefaultObjectLabelsVar specifies a set of labels that will be attached to k3d objects by default but are not static (e.g. across k3d versions) -var DefaultObjectLabelsVar = map[string]string{ +// DefaultRuntimeLabelsVar specifies a set of labels that will be attached to k3d runtime objects by default but are not static (e.g. across k3d versions) +var DefaultRuntimeLabelsVar = map[string]string{ "k3d.version": version.GetVersion(), } @@ -339,7 +339,7 @@ type Node struct { Ports nat.PortMap `yaml:"portMappings" json:"portMappings,omitempty"` Restart bool `yaml:"restart" json:"restart,omitempty"` Created string `yaml:"created" json:"created,omitempty"` - Labels map[string]string // filled automatically + RuntimeLabels map[string]string `yaml:"runtimeLabels" json:"runtimeLabels,omitempty"` K3sNodeLabels map[string]string `yaml:"k3sNodeLabels" json:"k3sNodeLabels,omitempty"` Networks []string // filled automatically ExtraHosts []string // filled automatically diff --git a/tests/assets/config_test_simple.yaml b/tests/assets/config_test_simple.yaml index c1d05e2b..75588924 100755 --- a/tests/assets/config_test_simple.yaml +++ b/tests/assets/config_test_simple.yaml @@ -22,11 +22,6 @@ env: - envVar: bar=baz,bob nodeFilters: - all -labels: - - label: foo=bar - nodeFilters: - - server[0] - - loadbalancer registries: create: true use: [] @@ -47,6 +42,17 @@ options: - arg: --tls-san=127.0.0.1 nodeFilters: - server[*] + nodeLabels: + - label: foo=bar + nodeFilters: + - server[0] + - loadbalancer kubeconfig: updateDefaultKubeconfig: true - switchCurrentContext: true \ No newline at end of file + switchCurrentContext: true + runtime: + labels: + - label: foo=bar + nodeFilters: + - server[0] + - loadbalancer diff --git a/tests/assets/config_test_simple_migration_v1alpha3.yaml b/tests/assets/config_test_simple_migration_v1alpha3.yaml index c1d05e2b..98eccbde 100755 --- a/tests/assets/config_test_simple_migration_v1alpha3.yaml +++ b/tests/assets/config_test_simple_migration_v1alpha3.yaml @@ -22,11 +22,6 @@ env: - envVar: bar=baz,bob nodeFilters: - all -labels: - - label: foo=bar - nodeFilters: - - server[0] - - loadbalancer registries: create: true use: [] @@ -49,4 +44,10 @@ options: - server[*] kubeconfig: updateDefaultKubeconfig: true - switchCurrentContext: true \ No newline at end of file + switchCurrentContext: true + runtime: + labels: + - label: foo=bar + nodeFilters: + - server[0] + - loadbalancer diff --git a/tests/common.sh b/tests/common.sh index f1529fbb..4d4de061 100755 --- a/tests/common.sh +++ b/tests/common.sh @@ -175,4 +175,10 @@ docker_assert_container_label() { # $1 = container/node name # $2 = label to assert docker inspect --format '{{ range $k, $v := .Config.Labels }}{{ printf "%s=%s\n" $k $v }}{{ end }}' "$1" | grep -E "^$2$" +} + +k3s_assert_node_label() { + # $1 = node name + # $2 = label to assert + kubectl get node "$1" --output go-template='{{ range $k, $v := .metadata.labels }}{{ printf "%s=%s\n" $k $v }}{{ end }}' | grep -E "^$2$" } \ No newline at end of file diff --git a/tests/test_config_file.sh b/tests/test_config_file.sh index b367bac1..8793eb44 100755 --- a/tests/test_config_file.sh +++ b/tests/test_config_file.sh @@ -18,7 +18,7 @@ fi export CURRENT_STAGE="Test | config-file | $K3S_IMAGE_TAG" -clustername="ConfigTest" +clustername="configtest" highlight "[START] ConfigTest $EXTRA_TITLE" @@ -45,6 +45,10 @@ exec_in_node "k3d-$clustername-server-0" "env" | grep "bar=baz,bob" || failed "E info "Ensuring that container labels have been set as stated in the config" docker_assert_container_label "k3d-$clustername-server-0" "foo=bar" || failed "Expected label 'foo=bar' not present on container/node k3d-$clustername-server-0" +## K3s Node Labels +info "Ensuring that k3s node labels have been set as stated in the config" +k3s_assert_node_label "k3d-$clustername-server-0" "foo=bar" || failed "Expected label 'foo=bar' not present on node k3d-$clustername-server-0" + ## Registry Node info "Ensuring, that we have a registry node present" $EXE node list "k3d-$clustername-registry" || failed "Expected k3d-$clustername-registry to be present" diff --git a/tests/test_config_with_overrides.sh b/tests/test_config_with_overrides.sh index 294491b1..6b705a06 100755 --- a/tests/test_config_with_overrides.sh +++ b/tests/test_config_with_overrides.sh @@ -44,6 +44,10 @@ exec_in_node "k3d-$clustername-agent-1" "env" | grep "x=y" || failed "Expected e info "Ensuring that container labels have been set as stated in the config" docker_assert_container_label "k3d-$clustername-server-0" "foo=bar" || failed "Expected label 'foo=bar' not present on container/node k3d-$clustername-server-0" +## K3s Node Labels +info "Ensuring that k3s node labels have been set as stated in the config" +k3s_assert_node_label "k3d-$clustername-server-0" "foo=bar" || failed "Expected label 'foo=bar' not present on node k3d-$clustername-server-0" + ## Registry Node info "Ensuring, that we DO NOT have a registry node present" $EXE node list "k3d-$clustername-registry" && failed "Expected k3d-$clustername-registry to NOT be present" From 77aa76d7f24baa535ea742f3992f52e88bcfa357 Mon Sep 17 00:00:00 2001 From: Thorsten Klein Date: Wed, 16 Jun 2021 15:59:43 +0200 Subject: [PATCH 07/45] [FEATURE] add ability to add ports to an existing loadbalancer (#615) --- cmd/node/node.go | 1 + cmd/node/nodeEdit.go | 113 ++++ go.mod | 1 + go.sum | 4 + pkg/client/node.go | 150 +++++ pkg/runtimes/docker/node.go | 18 + pkg/runtimes/docker/translate.go | 10 +- pkg/runtimes/runtime.go | 1 + pkg/util/ports.go | 19 + tests/test_node_edit.sh | 39 ++ .../mitchellh/copystructure/LICENSE | 21 + .../mitchellh/copystructure/README.md | 21 + .../mitchellh/copystructure/copier_time.go | 15 + .../mitchellh/copystructure/copystructure.go | 631 ++++++++++++++++++ .../github.com/mitchellh/copystructure/go.mod | 5 + .../github.com/mitchellh/copystructure/go.sum | 2 + .../mitchellh/reflectwalk/.travis.yml | 1 + .../github.com/mitchellh/reflectwalk/LICENSE | 21 + .../mitchellh/reflectwalk/README.md | 6 + .../github.com/mitchellh/reflectwalk/go.mod | 1 + .../mitchellh/reflectwalk/location.go | 19 + .../mitchellh/reflectwalk/location_string.go | 16 + .../mitchellh/reflectwalk/reflectwalk.go | 420 ++++++++++++ vendor/modules.txt | 5 + 24 files changed, 1531 insertions(+), 9 deletions(-) create mode 100644 cmd/node/nodeEdit.go create mode 100755 tests/test_node_edit.sh create mode 100644 vendor/github.com/mitchellh/copystructure/LICENSE create mode 100644 vendor/github.com/mitchellh/copystructure/README.md create mode 100644 vendor/github.com/mitchellh/copystructure/copier_time.go create mode 100644 vendor/github.com/mitchellh/copystructure/copystructure.go create mode 100644 vendor/github.com/mitchellh/copystructure/go.mod create mode 100644 vendor/github.com/mitchellh/copystructure/go.sum create mode 100644 vendor/github.com/mitchellh/reflectwalk/.travis.yml create mode 100644 vendor/github.com/mitchellh/reflectwalk/LICENSE create mode 100644 vendor/github.com/mitchellh/reflectwalk/README.md create mode 100644 vendor/github.com/mitchellh/reflectwalk/go.mod create mode 100644 vendor/github.com/mitchellh/reflectwalk/location.go create mode 100644 vendor/github.com/mitchellh/reflectwalk/location_string.go create mode 100644 vendor/github.com/mitchellh/reflectwalk/reflectwalk.go diff --git a/cmd/node/node.go b/cmd/node/node.go index 5266e150..5d132fbe 100644 --- a/cmd/node/node.go +++ b/cmd/node/node.go @@ -48,6 +48,7 @@ func NewCmdNode() *cobra.Command { cmd.AddCommand(NewCmdNodeStop()) cmd.AddCommand(NewCmdNodeDelete()) cmd.AddCommand(NewCmdNodeList()) + cmd.AddCommand(NewCmdNodeEdit()) // add flags diff --git a/cmd/node/nodeEdit.go b/cmd/node/nodeEdit.go new file mode 100644 index 00000000..e723f43e --- /dev/null +++ b/cmd/node/nodeEdit.go @@ -0,0 +1,113 @@ +/* +Copyright © 2020-2021 The k3d Author(s) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ +package node + +import ( + "github.com/docker/go-connections/nat" + "github.com/rancher/k3d/v4/cmd/util" + "github.com/rancher/k3d/v4/pkg/client" + "github.com/rancher/k3d/v4/pkg/runtimes" + k3d "github.com/rancher/k3d/v4/pkg/types" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +// NewCmdNodeEdit returns a new cobra command +func NewCmdNodeEdit() *cobra.Command { + + // create new cobra command + cmd := &cobra.Command{ + Use: "edit NAME", + Short: "[EXPERIMENTAL] Edit node(s).", + Long: `[EXPERIMENTAL] Edit node(s).`, + Args: cobra.ExactArgs(1), + Aliases: []string{"update"}, + ValidArgsFunction: util.ValidArgsAvailableNodes, + Run: func(cmd *cobra.Command, args []string) { + + existingNode, changeset := parseEditNodeCmd(cmd, args) + + log.Debugf("===== Current =====\n%+v\n===== Changeset =====\n%+v\n", existingNode, changeset) + + if err := client.NodeEdit(cmd.Context(), runtimes.SelectedRuntime, existingNode, changeset); err != nil { + log.Fatalln(err) + } + + log.Infof("Successfully updated %s", existingNode.Name) + + }, + } + + // add subcommands + + // add flags + cmd.Flags().StringArray("port-add", nil, "[EXPERIMENTAL] (serverlb only!) Map ports from the node container to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d node edit k3d-mycluster-serverlb --port-add 8080:80`") + + // done + return cmd +} + +// parseEditNodeCmd parses the command input into variables required to delete nodes +func parseEditNodeCmd(cmd *cobra.Command, args []string) (*k3d.Node, *k3d.Node) { + + existingNode, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: args[0]}) + if err != nil { + log.Fatalln(err) + } + + if existingNode == nil { + log.Infof("Node %s not found", args[0]) + return nil, nil + } + + if existingNode.Role != k3d.LoadBalancerRole { + log.Fatalln("Currently only the loadbalancer can be updated!") + } + + changeset := &k3d.Node{} + + /* + * --port-add + */ + portFlags, err := cmd.Flags().GetStringArray("port-add") + if err != nil { + log.Errorln(err) + return nil, nil + } + + // init portmap + changeset.Ports = nat.PortMap{} + + for _, flag := range portFlags { + + portmappings, err := nat.ParsePortSpec(flag) + if err != nil { + log.Fatalf("Failed to parse port spec '%s': %+v", flag, err) + } + + for _, pm := range portmappings { + changeset.Ports[pm.Port] = append(changeset.Ports[pm.Port], pm.Binding) + } + } + + return existingNode, changeset +} diff --git a/go.mod b/go.mod index 74b0a384..6893fcd8 100644 --- a/go.mod +++ b/go.mod @@ -17,6 +17,7 @@ require ( github.com/heroku/docker-registry-client v0.0.0-20190909225348-afc9e1acc3d5 github.com/imdario/mergo v0.3.12 github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de + github.com/mitchellh/copystructure v1.2.0 github.com/mitchellh/go-homedir v1.1.0 github.com/moby/sys/mount v0.2.0 // indirect github.com/moby/term v0.0.0-20201110203204-bea5bbe245bf // indirect diff --git a/go.sum b/go.sum index 47bcb66f..879f84b4 100644 --- a/go.sum +++ b/go.sum @@ -368,6 +368,8 @@ github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpe github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -380,6 +382,8 @@ github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= diff --git a/pkg/client/node.go b/pkg/client/node.go index a00f4666..019a901b 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -33,6 +33,9 @@ import ( "strings" "time" + copystruct "github.com/mitchellh/copystructure" + + "github.com/docker/go-connections/nat" dockerunits "github.com/docker/go-units" "github.com/imdario/mergo" "github.com/rancher/k3d/v4/pkg/actions" @@ -639,3 +642,150 @@ nodeLoop: return resultList } + +// NodeEdit let's you update an existing node +func NodeEdit(ctx context.Context, runtime runtimes.Runtime, existingNode, changeset *k3d.Node) error { + + /* + * Make a deep copy of the existing node + */ + + result, err := CopyNode(ctx, existingNode, CopyNodeOpts{keepState: false}) + if err != nil { + return err + } + + /* + * Apply changes + */ + + // === Ports === + if result.Ports == nil { + result.Ports = nat.PortMap{} + } + for port, portbindings := range changeset.Ports { + loopChangesetPortbindings: + for _, portbinding := range portbindings { + + // loop over existing portbindings to avoid port collisions (docker doesn't check for it) + for _, existingPB := range result.Ports[port] { + if util.IsPortBindingEqual(portbinding, existingPB) { // also matches on "equal" HostIPs (127.0.0.1, "", 0.0.0.0) + log.Tracef("Skipping existing PortBinding: %+v", existingPB) + continue loopChangesetPortbindings + } + } + log.Tracef("Adding portbinding %+v for port %s", portbinding, port.Port()) + result.Ports[port] = append(result.Ports[port], portbinding) + } + } + + // --- Loadbalancer specifics --- + if result.Role == k3d.LoadBalancerRole { + nodeEditApplyLBSpecifics(ctx, result) + } + + // replace existing node + return NodeReplace(ctx, runtime, existingNode, result) + +} + +func nodeEditApplyLBSpecifics(ctx context.Context, lbNode *k3d.Node) { + tcp_ports := []string{} + udp_ports := []string{} + for index, env := range lbNode.Env { + if strings.HasPrefix(env, "PORTS=") || strings.HasPrefix(env, "UDP_PORTS=") { + // Remove matching environment variable from slice (does not preserve order) + lbNode.Env[index] = lbNode.Env[len(lbNode.Env)-1] // copy last element to index of matching env + lbNode.Env[len(lbNode.Env)-1] = "" // remove last element + lbNode.Env = lbNode.Env[:len(lbNode.Env)-1] // truncate + } + } + + for port := range lbNode.Ports { + switch port.Proto() { + case "tcp": + tcp_ports = append(tcp_ports, port.Port()) + break + case "udp": + udp_ports = append(udp_ports, port.Port()) + break + default: + log.Warnf("Unknown port protocol %s for port %s", port.Proto(), port.Port()) + } + } + lbNode.Env = append(lbNode.Env, fmt.Sprintf("PORTS=%s", strings.Join(tcp_ports, ","))) + lbNode.Env = append(lbNode.Env, fmt.Sprintf("UDP_PORTS=%s", strings.Join(udp_ports, ","))) +} + +func NodeReplace(ctx context.Context, runtime runtimes.Runtime, old, new *k3d.Node) error { + + // rename existing node + oldNameTemp := fmt.Sprintf("%s-%s", old.Name, util.GenerateRandomString(5)) + oldNameOriginal := old.Name + log.Infof("Renaming existing node %s to %s...", old.Name, oldNameTemp) + if err := runtime.RenameNode(ctx, old, oldNameTemp); err != nil { + return err + } + old.Name = oldNameTemp + + // create (not start) new node + log.Infof("Creating new node %s...", new.Name) + if err := NodeCreate(ctx, runtime, new, k3d.NodeCreateOpts{Wait: true}); err != nil { + if err := runtime.RenameNode(ctx, old, oldNameOriginal); err != nil { + return fmt.Errorf("Failed to create new node. Also failed to rename %s back to %s: %+v", old.Name, oldNameOriginal, err) + } + return fmt.Errorf("Failed to create new node. Brought back old node: %+v", err) + } + + // stop existing/old node + log.Infof("Stopping existing node %s...", old.Name) + if err := runtime.StopNode(ctx, old); err != nil { + return err + } + + // start new node + log.Infof("Starting new node %s...", new.Name) + if err := NodeStart(ctx, runtime, new, k3d.NodeStartOpts{Wait: true}); err != nil { + if err := NodeDelete(ctx, runtime, new, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil { + return fmt.Errorf("Failed to start new node. Also failed to rollback: %+v", err) + } + if err := runtime.RenameNode(ctx, old, oldNameOriginal); err != nil { + return fmt.Errorf("Failed to start new node. Also failed to rename %s back to %s: %+v", old.Name, oldNameOriginal, err) + } + old.Name = oldNameOriginal + if err := NodeStart(ctx, runtime, old, k3d.NodeStartOpts{Wait: true}); err != nil { + return fmt.Errorf("Failed to start new node. Also failed to restart old node: %+v", err) + } + return fmt.Errorf("Failed to start new node. Rolled back: %+v", err) + } + + // cleanup: delete old node + log.Infof("Deleting old node %s...", old.Name) + if err := NodeDelete(ctx, runtime, old, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil { + return err + } + + // done + return nil +} + +type CopyNodeOpts struct { + keepState bool +} + +func CopyNode(ctx context.Context, src *k3d.Node, opts CopyNodeOpts) (*k3d.Node, error) { + + targetCopy, err := copystruct.Copy(src) + if err != nil { + return nil, err + } + + result := targetCopy.(*k3d.Node) + + if !opts.keepState { + // ensure that node state is empty + result.State = k3d.NodeState{} + } + + return result, err +} diff --git a/pkg/runtimes/docker/node.go b/pkg/runtimes/docker/node.go index c7c3b37d..e8e1232c 100644 --- a/pkg/runtimes/docker/node.go +++ b/pkg/runtimes/docker/node.go @@ -447,3 +447,21 @@ func (d Docker) GetNodesInNetwork(ctx context.Context, network string) ([]*k3d.N return connectedNodes, nil } + +func (d Docker) RenameNode(ctx context.Context, node *k3d.Node, newName string) error { + // get the container for the given node + container, err := getNodeContainer(ctx, node) + if err != nil { + return err + } + + // create docker client + docker, err := GetDockerClient() + if err != nil { + log.Errorln("Failed to create docker client") + return err + } + defer docker.Close() + + return docker.ContainerRename(ctx, container.ID, newName) +} diff --git a/pkg/runtimes/docker/translate.go b/pkg/runtimes/docker/translate.go index c8331c75..564b0f6f 100644 --- a/pkg/runtimes/docker/translate.go +++ b/pkg/runtimes/docker/translate.go @@ -243,14 +243,6 @@ func TranslateContainerDetailsToNode(containerDetails types.ContainerJSON) (*k3d } } - // env vars: only copy K3S_* and K3D_* // FIXME: should we really do this? Might be unexpected, if user has e.g. HTTP_PROXY vars - env := []string{} - for _, envVar := range containerDetails.Config.Env { - if strings.HasPrefix(envVar, "K3D_") || strings.HasPrefix(envVar, "K3S_") { - env = append(env, envVar) - } - } - // labels: only copy k3d.* labels labels := map[string]string{} for k, v := range containerDetails.Config.Labels { @@ -277,7 +269,7 @@ func TranslateContainerDetailsToNode(containerDetails types.ContainerJSON) (*k3d Role: k3d.NodeRoles[containerDetails.Config.Labels[k3d.LabelRole]], Image: containerDetails.Image, Volumes: containerDetails.HostConfig.Binds, - Env: env, + Env: containerDetails.Config.Env, Cmd: containerDetails.Config.Cmd, Args: []string{}, // empty, since Cmd already contains flags Ports: containerDetails.HostConfig.PortBindings, diff --git a/pkg/runtimes/runtime.go b/pkg/runtimes/runtime.go index b27d87b1..d79cab77 100644 --- a/pkg/runtimes/runtime.go +++ b/pkg/runtimes/runtime.go @@ -52,6 +52,7 @@ type Runtime interface { GetHost() string CreateNode(context.Context, *k3d.Node) error DeleteNode(context.Context, *k3d.Node) error + RenameNode(context.Context, *k3d.Node, string) error GetNodesByLabel(context.Context, map[string]string) ([]*k3d.Node, error) GetNode(context.Context, *k3d.Node) (*k3d.Node, error) GetNodeStatus(context.Context, *k3d.Node) (bool, string, error) diff --git a/pkg/util/ports.go b/pkg/util/ports.go index acdc7322..4906ff57 100644 --- a/pkg/util/ports.go +++ b/pkg/util/ports.go @@ -25,6 +25,7 @@ package util import ( "net" + "github.com/docker/go-connections/nat" log "github.com/sirupsen/logrus" ) @@ -45,3 +46,21 @@ func GetFreePort() (int, error) { return tcpListener.Addr().(*net.TCPAddr).Port, nil } + +var equalHostIPs = map[string]interface{}{ + "": nil, + "127.0.0.1": nil, + "0.0.0.0": nil, + "localhost": nil, +} + +func IsPortBindingEqual(a, b nat.PortBinding) bool { + if a.HostPort == b.HostPort { + if _, ok := equalHostIPs[a.HostIP]; ok { + if _, ok := equalHostIPs[b.HostIP]; ok { + return true + } + } + } + return false +} diff --git a/tests/test_node_edit.sh b/tests/test_node_edit.sh new file mode 100755 index 00000000..b9a5de04 --- /dev/null +++ b/tests/test_node_edit.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +[ -d "$CURR_DIR" ] || { echo "FATAL: no current dir (maybe running in zsh?)"; exit 1; } + +# shellcheck source=./common.sh +source "$CURR_DIR/common.sh" + +export CURRENT_STAGE="Test | NodeEdit" + +highlight "[START] NodeEdit" + +clustername="test-node-edit" + +existingPortMappingHostPort="1111" +existingPortMappingContainerPort="2222" +newPortMappingHostPort="3333" +newPortMappingContainerPort="4444" + +info "Creating cluster $clustername..." +$EXE cluster create $clustername --port "$existingPortMappingHostPort:$existingPortMappingContainerPort@loadbalancer" || failed "could not create cluster $clustername" + +info "Checking cluster access..." +check_clusters "$clustername" || failed "error checking cluster access" + +info "Adding port-mapping to loadbalancer..." +$EXE node edit k3d-$clustername-serverlb --port-add $existingPortMappingHostPort:$existingPortMappingContainerPort --port-add $newPortMappingHostPort:$newPortMappingContainerPort || failed "failed to add port-mapping to serverlb in $clustername" + +info "Checking port-mappings..." +docker inspect k3d-$clustername-serverlb --format '{{ range $k, $v := .NetworkSettings.Ports }}{{ printf "%s->%s\n" $k $v }}{{ end }}' | grep -E "^$existingPortMappingContainerPort" || failed "failed to verify pre-existing port-mapping" +docker inspect k3d-$clustername-serverlb --format '{{ range $k, $v := .NetworkSettings.Ports }}{{ printf "%s->%s\n" $k $v }}{{ end }}' | grep -E "^$newPortMappingContainerPort" || failed "failed to verify pre-existing port-mapping" + +info "Checking cluster access..." +check_clusters "$clustername" || failed "error checking cluster access" + +info "Deleting cluster $clustername..." +$EXE cluster delete $clustername || failed "failed to delete the cluster $clustername" + +exit 0 diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/mitchellh/copystructure/LICENSE new file mode 100644 index 00000000..22985159 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md new file mode 100644 index 00000000..f0fbd2e5 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/README.md @@ -0,0 +1,21 @@ +# copystructure + +copystructure is a Go library for deep copying values in Go. + +This allows you to copy Go values that may contain reference values +such as maps, slices, or pointers, and copy their data as well instead +of just their references. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/copystructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). + +The `Copy` function has examples associated with it there. diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go new file mode 100644 index 00000000..db6a6aa1 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copier_time.go @@ -0,0 +1,15 @@ +package copystructure + +import ( + "reflect" + "time" +) + +func init() { + Copiers[reflect.TypeOf(time.Time{})] = timeCopier +} + +func timeCopier(v interface{}) (interface{}, error) { + // Just... copy it. + return v.(time.Time), nil +} diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go new file mode 100644 index 00000000..8089e667 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copystructure.go @@ -0,0 +1,631 @@ +package copystructure + +import ( + "errors" + "reflect" + "sync" + + "github.com/mitchellh/reflectwalk" +) + +const tagKey = "copy" + +// Copy returns a deep copy of v. +// +// Copy is unable to copy unexported fields in a struct (lowercase field names). +// Unexported fields can't be reflected by the Go runtime and therefore +// copystructure can't perform any data copies. +// +// For structs, copy behavior can be controlled with struct tags. For example: +// +// struct { +// Name string +// Data *bytes.Buffer `copy:"shallow"` +// } +// +// The available tag values are: +// +// * "ignore" - The field will be ignored, effectively resulting in it being +// assigned the zero value in the copy. +// +// * "shallow" - The field will be be shallow copied. This means that references +// values such as pointers, maps, slices, etc. will be directly assigned +// versus deep copied. +// +func Copy(v interface{}) (interface{}, error) { + return Config{}.Copy(v) +} + +// CopierFunc is a function that knows how to deep copy a specific type. +// Register these globally with the Copiers variable. +type CopierFunc func(interface{}) (interface{}, error) + +// Copiers is a map of types that behave specially when they are copied. +// If a type is found in this map while deep copying, this function +// will be called to copy it instead of attempting to copy all fields. +// +// The key should be the type, obtained using: reflect.TypeOf(value with type). +// +// It is unsafe to write to this map after Copies have started. If you +// are writing to this map while also copying, wrap all modifications to +// this map as well as to Copy in a mutex. +var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) + +// ShallowCopiers is a map of pointer types that behave specially +// when they are copied. If a type is found in this map while deep +// copying, the pointer value will be shallow copied and not walked +// into. +// +// The key should be the type, obtained using: reflect.TypeOf(value +// with type). +// +// It is unsafe to write to this map after Copies have started. If you +// are writing to this map while also copying, wrap all modifications to +// this map as well as to Copy in a mutex. +var ShallowCopiers map[reflect.Type]struct{} = make(map[reflect.Type]struct{}) + +// Must is a helper that wraps a call to a function returning +// (interface{}, error) and panics if the error is non-nil. It is intended +// for use in variable initializations and should only be used when a copy +// error should be a crashing case. +func Must(v interface{}, err error) interface{} { + if err != nil { + panic("copy error: " + err.Error()) + } + + return v +} + +var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true") + +type Config struct { + // Lock any types that are a sync.Locker and are not a mutex while copying. + // If there is an RLocker method, use that to get the sync.Locker. + Lock bool + + // Copiers is a map of types associated with a CopierFunc. Use the global + // Copiers map if this is nil. + Copiers map[reflect.Type]CopierFunc + + // ShallowCopiers is a map of pointer types that when they are + // shallow copied no matter where they are encountered. Use the + // global ShallowCopiers if this is nil. + ShallowCopiers map[reflect.Type]struct{} +} + +func (c Config) Copy(v interface{}) (interface{}, error) { + if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr { + return nil, errPointerRequired + } + + w := new(walker) + if c.Lock { + w.useLocks = true + } + + if c.Copiers == nil { + c.Copiers = Copiers + } + w.copiers = c.Copiers + + if c.ShallowCopiers == nil { + c.ShallowCopiers = ShallowCopiers + } + w.shallowCopiers = c.ShallowCopiers + + err := reflectwalk.Walk(v, w) + if err != nil { + return nil, err + } + + // Get the result. If the result is nil, then we want to turn it + // into a typed nil if we can. + result := w.Result + if result == nil { + val := reflect.ValueOf(v) + result = reflect.Indirect(reflect.New(val.Type())).Interface() + } + + return result, nil +} + +// Return the key used to index interfaces types we've seen. Store the number +// of pointers in the upper 32bits, and the depth in the lower 32bits. This is +// easy to calculate, easy to match a key with our current depth, and we don't +// need to deal with initializing and cleaning up nested maps or slices. +func ifaceKey(pointers, depth int) uint64 { + return uint64(pointers)<<32 | uint64(depth) +} + +type walker struct { + Result interface{} + + copiers map[reflect.Type]CopierFunc + shallowCopiers map[reflect.Type]struct{} + depth int + ignoreDepth int + vals []reflect.Value + cs []reflect.Value + + // This stores the number of pointers we've walked over, indexed by depth. + ps []int + + // If an interface is indirected by a pointer, we need to know the type of + // interface to create when creating the new value. Store the interface + // types here, indexed by both the walk depth and the number of pointers + // already seen at that depth. Use ifaceKey to calculate the proper uint64 + // value. + ifaceTypes map[uint64]reflect.Type + + // any locks we've taken, indexed by depth + locks []sync.Locker + // take locks while walking the structure + useLocks bool +} + +func (w *walker) Enter(l reflectwalk.Location) error { + w.depth++ + + // ensure we have enough elements to index via w.depth + for w.depth >= len(w.locks) { + w.locks = append(w.locks, nil) + } + + for len(w.ps) < w.depth+1 { + w.ps = append(w.ps, 0) + } + + return nil +} + +func (w *walker) Exit(l reflectwalk.Location) error { + locker := w.locks[w.depth] + w.locks[w.depth] = nil + if locker != nil { + defer locker.Unlock() + } + + // clear out pointers and interfaces as we exit the stack + w.ps[w.depth] = 0 + + for k := range w.ifaceTypes { + mask := uint64(^uint32(0)) + if k&mask == uint64(w.depth) { + delete(w.ifaceTypes, k) + } + } + + w.depth-- + if w.ignoreDepth > w.depth { + w.ignoreDepth = 0 + } + + if w.ignoring() { + return nil + } + + switch l { + case reflectwalk.Array: + fallthrough + case reflectwalk.Map: + fallthrough + case reflectwalk.Slice: + w.replacePointerMaybe() + + // Pop map off our container + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.MapValue: + // Pop off the key and value + mv := w.valPop() + mk := w.valPop() + m := w.cs[len(w.cs)-1] + + // If mv is the zero value, SetMapIndex deletes the key form the map, + // or in this case never adds it. We need to create a properly typed + // zero value so that this key can be set. + if !mv.IsValid() { + mv = reflect.Zero(m.Elem().Type().Elem()) + } + m.Elem().SetMapIndex(mk, mv) + case reflectwalk.ArrayElem: + // Pop off the value and the index and set it on the array + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + a := w.cs[len(w.cs)-1] + ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call + if ae.CanSet() { + ae.Set(v) + } + } + case reflectwalk.SliceElem: + // Pop off the value and the index and set it on the slice + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + se := s.Elem().Index(i) + if se.CanSet() { + se.Set(v) + } + } + case reflectwalk.Struct: + w.replacePointerMaybe() + + // Remove the struct from the container stack + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.StructField: + // Pop off the value and the field + v := w.valPop() + f := w.valPop().Interface().(reflect.StructField) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + sf := reflect.Indirect(s).FieldByName(f.Name) + + if sf.CanSet() { + sf.Set(v) + } + } + case reflectwalk.WalkLoc: + // Clear out the slices for GC + w.cs = nil + w.vals = nil + } + + return nil +} + +func (w *walker) Map(m reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(m) + + // Create the map. If the map itself is nil, then just make a nil map + var newMap reflect.Value + if m.IsNil() { + newMap = reflect.New(m.Type()) + } else { + newMap = wrapPtr(reflect.MakeMap(m.Type())) + } + + w.cs = append(w.cs, newMap) + w.valPush(newMap) + return nil +} + +func (w *walker) MapElem(m, k, v reflect.Value) error { + return nil +} + +func (w *walker) PointerEnter(v bool) error { + if v { + w.ps[w.depth]++ + } + return nil +} + +func (w *walker) PointerExit(v bool) error { + if v { + w.ps[w.depth]-- + } + return nil +} + +func (w *walker) Pointer(v reflect.Value) error { + if _, ok := w.shallowCopiers[v.Type()]; ok { + // Shallow copy this value. Use the same logic as primitive, then + // return skip. + if err := w.Primitive(v); err != nil { + return err + } + + return reflectwalk.SkipEntry + } + + return nil +} + +func (w *walker) Interface(v reflect.Value) error { + if !v.IsValid() { + return nil + } + if w.ifaceTypes == nil { + w.ifaceTypes = make(map[uint64]reflect.Type) + } + + w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type() + return nil +} + +func (w *walker) Primitive(v reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(v) + + // IsValid verifies the v is non-zero and CanInterface verifies + // that we're allowed to read this value (unexported fields). + var newV reflect.Value + if v.IsValid() && v.CanInterface() { + newV = reflect.New(v.Type()) + newV.Elem().Set(v) + } + + w.valPush(newV) + w.replacePointerMaybe() + return nil +} + +func (w *walker) Slice(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var newS reflect.Value + if s.IsNil() { + newS = reflect.New(s.Type()) + } else { + newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap())) + } + + w.cs = append(w.cs, newS) + w.valPush(newS) + return nil +} + +func (w *walker) SliceElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the slice here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Array(a reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(a) + + newA := reflect.New(a.Type()) + + w.cs = append(w.cs, newA) + w.valPush(newA) + return nil +} + +func (w *walker) ArrayElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the array here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Struct(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var v reflect.Value + if c, ok := w.copiers[s.Type()]; ok { + // We have a Copier for this struct, so we use that copier to + // get the copy, and we ignore anything deeper than this. + w.ignoreDepth = w.depth + + dup, err := c(s.Interface()) + if err != nil { + return err + } + + // We need to put a pointer to the value on the value stack, + // so allocate a new pointer and set it. + v = reflect.New(s.Type()) + reflect.Indirect(v).Set(reflect.ValueOf(dup)) + } else { + // No copier, we copy ourselves and allow reflectwalk to guide + // us deeper into the structure for copying. + v = reflect.New(s.Type()) + } + + // Push the value onto the value stack for setting the struct field, + // and add the struct itself to the containers stack in case we walk + // deeper so that its own fields can be modified. + w.valPush(v) + w.cs = append(w.cs, v) + + return nil +} + +func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { + if w.ignoring() { + return nil + } + + // If PkgPath is non-empty, this is a private (unexported) field. + // We do not set this unexported since the Go runtime doesn't allow us. + if f.PkgPath != "" { + return reflectwalk.SkipEntry + } + + switch f.Tag.Get(tagKey) { + case "shallow": + // If we're shallow copying then assign the value directly to the + // struct and skip the entry. + if v.IsValid() { + s := w.cs[len(w.cs)-1] + sf := reflect.Indirect(s).FieldByName(f.Name) + if sf.CanSet() { + sf.Set(v) + } + } + + return reflectwalk.SkipEntry + + case "ignore": + // Do nothing + return reflectwalk.SkipEntry + } + + // Push the field onto the stack, we'll handle it when we exit + // the struct field in Exit... + w.valPush(reflect.ValueOf(f)) + + return nil +} + +// ignore causes the walker to ignore any more values until we exit this on +func (w *walker) ignore() { + w.ignoreDepth = w.depth +} + +func (w *walker) ignoring() bool { + return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth +} + +func (w *walker) pointerPeek() bool { + return w.ps[w.depth] > 0 +} + +func (w *walker) valPop() reflect.Value { + result := w.vals[len(w.vals)-1] + w.vals = w.vals[:len(w.vals)-1] + + // If we're out of values, that means we popped everything off. In + // this case, we reset the result so the next pushed value becomes + // the result. + if len(w.vals) == 0 { + w.Result = nil + } + + return result +} + +func (w *walker) valPush(v reflect.Value) { + w.vals = append(w.vals, v) + + // If we haven't set the result yet, then this is the result since + // it is the first (outermost) value we're seeing. + if w.Result == nil && v.IsValid() { + w.Result = v.Interface() + } +} + +func (w *walker) replacePointerMaybe() { + // Determine the last pointer value. If it is NOT a pointer, then + // we need to push that onto the stack. + if !w.pointerPeek() { + w.valPush(reflect.Indirect(w.valPop())) + return + } + + v := w.valPop() + + // If the expected type is a pointer to an interface of any depth, + // such as *interface{}, **interface{}, etc., then we need to convert + // the value "v" from *CONCRETE to *interface{} so types match for + // Set. + // + // Example if v is type *Foo where Foo is a struct, v would become + // *interface{} instead. This only happens if we have an interface expectation + // at this depth. + // + // For more info, see GH-16 + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface { + y := reflect.New(iType) // Create *interface{} + y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced) + v = y // v is now typed *interface{} (where *v = Foo) + } + + for i := 1; i < w.ps[w.depth]; i++ { + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok { + iface := reflect.New(iType).Elem() + iface.Set(v) + v = iface + } + + p := reflect.New(v.Type()) + p.Elem().Set(v) + v = p + } + + w.valPush(v) +} + +// if this value is a Locker, lock it and add it to the locks slice +func (w *walker) lock(v reflect.Value) { + if !w.useLocks { + return + } + + if !v.IsValid() || !v.CanInterface() { + return + } + + type rlocker interface { + RLocker() sync.Locker + } + + var locker sync.Locker + + // We can't call Interface() on a value directly, since that requires + // a copy. This is OK, since the pointer to a value which is a sync.Locker + // is also a sync.Locker. + if v.Kind() == reflect.Ptr { + switch l := v.Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } else if v.CanAddr() { + switch l := v.Addr().Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } + + // still no callable locker + if locker == nil { + return + } + + // don't lock a mutex directly + switch locker.(type) { + case *sync.Mutex, *sync.RWMutex: + return + } + + locker.Lock() + w.locks[w.depth] = locker +} + +// wrapPtr is a helper that takes v and always make it *v. copystructure +// stores things internally as pointers until the last moment before unwrapping +func wrapPtr(v reflect.Value) reflect.Value { + if !v.IsValid() { + return v + } + vPtr := reflect.New(v.Type()) + vPtr.Elem().Set(v) + return vPtr +} diff --git a/vendor/github.com/mitchellh/copystructure/go.mod b/vendor/github.com/mitchellh/copystructure/go.mod new file mode 100644 index 00000000..cd9c050c --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/go.mod @@ -0,0 +1,5 @@ +module github.com/mitchellh/copystructure + +go 1.15 + +require github.com/mitchellh/reflectwalk v1.0.2 diff --git a/vendor/github.com/mitchellh/copystructure/go.sum b/vendor/github.com/mitchellh/copystructure/go.sum new file mode 100644 index 00000000..3e38da1e --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/go.sum @@ -0,0 +1,2 @@ +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= diff --git a/vendor/github.com/mitchellh/reflectwalk/.travis.yml b/vendor/github.com/mitchellh/reflectwalk/.travis.yml new file mode 100644 index 00000000..4f2ee4d9 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE new file mode 100644 index 00000000..f9c841a5 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md new file mode 100644 index 00000000..ac82cd2e --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/README.md @@ -0,0 +1,6 @@ +# reflectwalk + +reflectwalk is a Go library for "walking" a value in Go using reflection, +in the same way a directory tree can be "walked" on the filesystem. Walking +a complex structure can allow you to do manipulations on unknown structures +such as those decoded from JSON. diff --git a/vendor/github.com/mitchellh/reflectwalk/go.mod b/vendor/github.com/mitchellh/reflectwalk/go.mod new file mode 100644 index 00000000..52bb7c46 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/reflectwalk diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go new file mode 100644 index 00000000..6a7f1761 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location.go @@ -0,0 +1,19 @@ +package reflectwalk + +//go:generate stringer -type=Location location.go + +type Location uint + +const ( + None Location = iota + Map + MapKey + MapValue + Slice + SliceElem + Array + ArrayElem + Struct + StructField + WalkLoc +) diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go new file mode 100644 index 00000000..70760cf4 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=Location location.go"; DO NOT EDIT. + +package reflectwalk + +import "fmt" + +const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc" + +var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73} + +func (i Location) String() string { + if i >= Location(len(_Location_index)-1) { + return fmt.Sprintf("Location(%d)", i) + } + return _Location_name[_Location_index[i]:_Location_index[i+1]] +} diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go new file mode 100644 index 00000000..7fee7b05 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go @@ -0,0 +1,420 @@ +// reflectwalk is a package that allows you to "walk" complex structures +// similar to how you may "walk" a filesystem: visiting every element one +// by one and calling callback functions allowing you to handle and manipulate +// those elements. +package reflectwalk + +import ( + "errors" + "reflect" +) + +// PrimitiveWalker implementations are able to handle primitive values +// within complex structures. Primitive values are numbers, strings, +// booleans, funcs, chans. +// +// These primitive values are often members of more complex +// structures (slices, maps, etc.) that are walkable by other interfaces. +type PrimitiveWalker interface { + Primitive(reflect.Value) error +} + +// InterfaceWalker implementations are able to handle interface values as they +// are encountered during the walk. +type InterfaceWalker interface { + Interface(reflect.Value) error +} + +// MapWalker implementations are able to handle individual elements +// found within a map structure. +type MapWalker interface { + Map(m reflect.Value) error + MapElem(m, k, v reflect.Value) error +} + +// SliceWalker implementations are able to handle slice elements found +// within complex structures. +type SliceWalker interface { + Slice(reflect.Value) error + SliceElem(int, reflect.Value) error +} + +// ArrayWalker implementations are able to handle array elements found +// within complex structures. +type ArrayWalker interface { + Array(reflect.Value) error + ArrayElem(int, reflect.Value) error +} + +// StructWalker is an interface that has methods that are called for +// structs when a Walk is done. +type StructWalker interface { + Struct(reflect.Value) error + StructField(reflect.StructField, reflect.Value) error +} + +// EnterExitWalker implementations are notified before and after +// they walk deeper into complex structures (into struct fields, +// into slice elements, etc.) +type EnterExitWalker interface { + Enter(Location) error + Exit(Location) error +} + +// PointerWalker implementations are notified when the value they're +// walking is a pointer or not. Pointer is called for _every_ value whether +// it is a pointer or not. +type PointerWalker interface { + PointerEnter(bool) error + PointerExit(bool) error +} + +// PointerValueWalker implementations are notified with the value of +// a particular pointer when a pointer is walked. Pointer is called +// right before PointerEnter. +type PointerValueWalker interface { + Pointer(reflect.Value) error +} + +// SkipEntry can be returned from walk functions to skip walking +// the value of this field. This is only valid in the following functions: +// +// - Struct: skips all fields from being walked +// - StructField: skips walking the struct value +// +var SkipEntry = errors.New("skip this entry") + +// Walk takes an arbitrary value and an interface and traverses the +// value, calling callbacks on the interface if they are supported. +// The interface should implement one or more of the walker interfaces +// in this package, such as PrimitiveWalker, StructWalker, etc. +func Walk(data, walker interface{}) (err error) { + v := reflect.ValueOf(data) + ew, ok := walker.(EnterExitWalker) + if ok { + err = ew.Enter(WalkLoc) + } + + if err == nil { + err = walk(v, walker) + } + + if ok && err == nil { + err = ew.Exit(WalkLoc) + } + + return +} + +func walk(v reflect.Value, w interface{}) (err error) { + // Determine if we're receiving a pointer and if so notify the walker. + // The logic here is convoluted but very important (tests will fail if + // almost any part is changed). I will try to explain here. + // + // First, we check if the value is an interface, if so, we really need + // to check the interface's VALUE to see whether it is a pointer. + // + // Check whether the value is then a pointer. If so, then set pointer + // to true to notify the user. + // + // If we still have a pointer or an interface after the indirections, then + // we unwrap another level + // + // At this time, we also set "v" to be the dereferenced value. This is + // because once we've unwrapped the pointer we want to use that value. + pointer := false + pointerV := v + + for { + if pointerV.Kind() == reflect.Interface { + if iw, ok := w.(InterfaceWalker); ok { + if err = iw.Interface(pointerV); err != nil { + return + } + } + + pointerV = pointerV.Elem() + } + + if pointerV.Kind() == reflect.Ptr { + if pw, ok := w.(PointerValueWalker); ok { + if err = pw.Pointer(pointerV); err != nil { + if err == SkipEntry { + // Skip the rest of this entry but clear the error + return nil + } + + return + } + } + + pointer = true + v = reflect.Indirect(pointerV) + } + if pw, ok := w.(PointerWalker); ok { + if err = pw.PointerEnter(pointer); err != nil { + return + } + + defer func(pointer bool) { + if err != nil { + return + } + + err = pw.PointerExit(pointer) + }(pointer) + } + + if pointer { + pointerV = v + } + pointer = false + + // If we still have a pointer or interface we have to indirect another level. + switch pointerV.Kind() { + case reflect.Ptr, reflect.Interface: + continue + } + break + } + + // We preserve the original value here because if it is an interface + // type, we want to pass that directly into the walkPrimitive, so that + // we can set it. + originalV := v + if v.Kind() == reflect.Interface { + v = v.Elem() + } + + k := v.Kind() + if k >= reflect.Int && k <= reflect.Complex128 { + k = reflect.Int + } + + switch k { + // Primitives + case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid: + err = walkPrimitive(originalV, w) + return + case reflect.Map: + err = walkMap(v, w) + return + case reflect.Slice: + err = walkSlice(v, w) + return + case reflect.Struct: + err = walkStruct(v, w) + return + case reflect.Array: + err = walkArray(v, w) + return + default: + panic("unsupported type: " + k.String()) + } +} + +func walkMap(v reflect.Value, w interface{}) error { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Map) + } + + if mw, ok := w.(MapWalker); ok { + if err := mw.Map(v); err != nil { + return err + } + } + + for _, k := range v.MapKeys() { + kv := v.MapIndex(k) + + if mw, ok := w.(MapWalker); ok { + if err := mw.MapElem(v, k, kv); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(MapKey) + } + + if err := walk(k, w); err != nil { + return err + } + + if ok { + ew.Exit(MapKey) + ew.Enter(MapValue) + } + + // get the map value again as it may have changed in the MapElem call + if err := walk(v.MapIndex(k), w); err != nil { + return err + } + + if ok { + ew.Exit(MapValue) + } + } + + if ewok { + ew.Exit(Map) + } + + return nil +} + +func walkPrimitive(v reflect.Value, w interface{}) error { + if pw, ok := w.(PrimitiveWalker); ok { + return pw.Primitive(v) + } + + return nil +} + +func walkSlice(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Slice) + } + + if sw, ok := w.(SliceWalker); ok { + if err := sw.Slice(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if sw, ok := w.(SliceWalker); ok { + if err := sw.SliceElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(SliceElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(SliceElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Slice) + } + + return nil +} + +func walkArray(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Array) + } + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.Array(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.ArrayElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(ArrayElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(ArrayElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Array) + } + + return nil +} + +func walkStruct(v reflect.Value, w interface{}) (err error) { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Struct) + } + + skip := false + if sw, ok := w.(StructWalker); ok { + err = sw.Struct(v) + if err == SkipEntry { + skip = true + err = nil + } + if err != nil { + return + } + } + + if !skip { + vt := v.Type() + for i := 0; i < vt.NumField(); i++ { + sf := vt.Field(i) + f := v.FieldByIndex([]int{i}) + + if sw, ok := w.(StructWalker); ok { + err = sw.StructField(sf, f) + + // SkipEntry just pretends this field doesn't even exist + if err == SkipEntry { + continue + } + + if err != nil { + return + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(StructField) + } + + err = walk(f, w) + if err != nil { + return + } + + if ok { + ew.Exit(StructField) + } + } + } + + if ewok { + ew.Exit(Struct) + } + + return nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 2e8a869d..54f233ab 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -145,11 +145,16 @@ github.com/json-iterator/go github.com/liggitt/tabwriter # github.com/magiconair/properties v1.8.5 github.com/magiconair/properties +# github.com/mitchellh/copystructure v1.2.0 +## explicit +github.com/mitchellh/copystructure # github.com/mitchellh/go-homedir v1.1.0 ## explicit github.com/mitchellh/go-homedir # github.com/mitchellh/mapstructure v1.4.1 github.com/mitchellh/mapstructure +# github.com/mitchellh/reflectwalk v1.0.2 +github.com/mitchellh/reflectwalk # github.com/moby/sys/mount v0.2.0 ## explicit github.com/moby/sys/mount From bf2e81ca345d8b5ffc014bc66168df2fe9ffa3d8 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Thu, 17 Jun 2021 15:17:48 +0200 Subject: [PATCH 08/45] init enhanced templating for the nginx config --- proxy/Dockerfile | 1 + proxy/conf.d/nginx.toml | 6 ++--- proxy/nginx-proxy | 4 +++- proxy/templates/nginx.tmpl | 49 ++++++++++++++++---------------------- proxy/test/portmap.yaml | 7 ++++++ 5 files changed, 34 insertions(+), 33 deletions(-) create mode 100644 proxy/test/portmap.yaml diff --git a/proxy/Dockerfile b/proxy/Dockerfile index 105e89c4..300c37a1 100644 --- a/proxy/Dockerfile +++ b/proxy/Dockerfile @@ -11,5 +11,6 @@ RUN echo "Building for '${OS}/${ARCH}'..." \ COPY templates /etc/confd/templates/ COPY conf.d /etc/confd/conf.d/ COPY nginx-proxy /usr/bin/ +COPY test/portmap.yaml /etc/confd/values.yaml ENTRYPOINT nginx-proxy \ No newline at end of file diff --git a/proxy/conf.d/nginx.toml b/proxy/conf.d/nginx.toml index 6586eb9c..26decd79 100644 --- a/proxy/conf.d/nginx.toml +++ b/proxy/conf.d/nginx.toml @@ -2,7 +2,5 @@ src = "nginx.tmpl" dest = "/etc/nginx/nginx.conf" keys = [ - "SERVERS", - "PORTS", - "UDP_PORTS", -] + "ports" +] \ No newline at end of file diff --git a/proxy/nginx-proxy b/proxy/nginx-proxy index 39722944..1a1cd53b 100755 --- a/proxy/nginx-proxy +++ b/proxy/nginx-proxy @@ -1,7 +1,9 @@ #!/bin/sh # Run confd -confd -onetime -backend env +set -e +confd -onetime -backend file -file /etc/confd/values.yaml -log-level debug +set +e # Output Configuration echo "===== Initial nginx configuration =====" diff --git a/proxy/templates/nginx.tmpl b/proxy/templates/nginx.tmpl index 43e4a286..963b61dd 100644 --- a/proxy/templates/nginx.tmpl +++ b/proxy/templates/nginx.tmpl @@ -1,3 +1,10 @@ +################################### +# Generated by confd {{datetime}} # +# ####### # +# # k3d # # +# ####### # +################################### + {{- $servers := split (getenv "SERVERS") "," -}} {{- $ports := split (getenv "PORTS") "," -}} {{- $udp_ports := split (getenv "UDP_PORTS") "," -}} @@ -12,42 +19,28 @@ events { stream { - ####### - # TCP # - ####### - {{- range $port := $ports }} - upstream server_nodes_{{ $port }} { - {{- range $server := $servers }} + {{- range $portstring := lsdir "/ports" }} + + + {{- $portdir := printf "/ports/%s/*" $portstring -}} + {{- $port := index (split $portstring ".") 0 -}} + {{- $protocol := index (split $portstring ".") 1 -}} + {{- $upstream := replace $portstring "." "_" -1 }} + + upstream {{ $upstream }} { + {{- range $server := getvs $portdir }} server {{ $server }}:{{ $port }} max_fails=1 fail_timeout=10s; {{- end }} } server { - listen {{ $port }}; - proxy_pass server_nodes_{{ $port }}; + listen {{ $port }} {{- if (eq $protocol "udp") }} udp{{- end -}}; + proxy_pass {{ $upstream }}; proxy_timeout 600; proxy_connect_timeout 2s; } + + {{- end }} - ####### - # UDP # - ####### - - {{- range $port := $udp_ports }} - {{- if $port }} - upstream server_nodes_udp_{{ $port }} { - {{- range $server := $servers }} - server {{ $server }}:{{ $port }} max_fails=1 fail_timeout=10s; - {{- end }} - } - - server { - listen {{ $port }} udp; - proxy_pass server_nodes_udp_{{ $port }}; - proxy_timeout 600; - proxy_connect_timeout 2s; - } - {{- end }} - {{- end }} } diff --git a/proxy/test/portmap.yaml b/proxy/test/portmap.yaml new file mode 100644 index 00000000..02df30c3 --- /dev/null +++ b/proxy/test/portmap.yaml @@ -0,0 +1,7 @@ +ports: + 1234.tcp: + - server-0 + - server-1 + 4321.udp: + - agent-0 + - agent-1 \ No newline at end of file From de401c6db374e86ea578f03b2fc415475e314e03 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Thu, 17 Jun 2021 16:18:35 +0200 Subject: [PATCH 09/45] add debug functionality to get current loadbalancer config --- cmd/debug/debug.go | 91 ++++++++++++++++++++++++++++++++++++++ cmd/root.go | 2 + pkg/client/loadbalancer.go | 39 ++++++++++++++++ pkg/types/types.go | 20 +++++++++ 4 files changed, 152 insertions(+) create mode 100644 cmd/debug/debug.go diff --git a/cmd/debug/debug.go b/cmd/debug/debug.go new file mode 100644 index 00000000..c524790b --- /dev/null +++ b/cmd/debug/debug.go @@ -0,0 +1,91 @@ +/* +Copyright © 2020-2021 The k3d Author(s) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ +package debug + +import ( + "fmt" + + "github.com/rancher/k3d/v4/pkg/client" + "github.com/rancher/k3d/v4/pkg/runtimes" + "github.com/rancher/k3d/v4/pkg/types" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/yaml.v2" +) + +// NewCmdDebug returns a new cobra command +func NewCmdDebug() *cobra.Command { + cmd := &cobra.Command{ + Use: "debug", + Hidden: true, + Short: "Debug k3d cluster(s)", + Long: `Debug k3d cluster(s)`, + Run: func(cmd *cobra.Command, args []string) { + if err := cmd.Help(); err != nil { + log.Errorln("Couldn't get help text") + log.Fatalln(err) + } + }, + } + + cmd.AddCommand(NewCmdDebugLoadbalancer()) + + return cmd +} + +func NewCmdDebugLoadbalancer() *cobra.Command { + cmd := &cobra.Command{ + Use: "loadbalancer", + Aliases: []string{"lb"}, + Short: "Debug the loadbalancer", + Long: `Debug the loadbalancer`, + Run: func(cmd *cobra.Command, args []string) { + if err := cmd.Help(); err != nil { + log.Errorln("Couldn't get help text") + log.Fatalln(err) + } + }, + } + + cmd.AddCommand(&cobra.Command{ + Use: "get-config", + Args: cobra.ExactArgs(1), // cluster name + Run: func(cmd *cobra.Command, args []string) { + c, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &types.Cluster{Name: args[0]}) + if err != nil { + log.Fatalln(err) + } + + lbconf, err := client.GetLoadbalancerConfig(cmd.Context(), runtimes.SelectedRuntime, c) + if err != nil { + log.Fatalln(err) + } + yamlized, err := yaml.Marshal(lbconf) + if err != nil { + log.Fatalln(err) + } + fmt.Println(string(yamlized)) + }, + }) + + return cmd +} diff --git a/cmd/root.go b/cmd/root.go index 418e4008..8eb9c9d5 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -34,6 +34,7 @@ import ( "github.com/rancher/k3d/v4/cmd/cluster" cfg "github.com/rancher/k3d/v4/cmd/config" + "github.com/rancher/k3d/v4/cmd/debug" "github.com/rancher/k3d/v4/cmd/image" "github.com/rancher/k3d/v4/cmd/kubeconfig" "github.com/rancher/k3d/v4/cmd/node" @@ -116,6 +117,7 @@ func init() { rootCmd.AddCommand(image.NewCmdImage()) rootCmd.AddCommand(cfg.NewCmdConfig()) rootCmd.AddCommand(registry.NewCmdRegistry()) + rootCmd.AddCommand(debug.NewCmdDebug()) rootCmd.AddCommand(&cobra.Command{ Use: "version", diff --git a/pkg/client/loadbalancer.go b/pkg/client/loadbalancer.go index f0fa74fc..489a4ef1 100644 --- a/pkg/client/loadbalancer.go +++ b/pkg/client/loadbalancer.go @@ -22,13 +22,17 @@ THE SOFTWARE. package client import ( + "bytes" "context" "fmt" + "io/ioutil" "strings" "github.com/rancher/k3d/v4/pkg/runtimes" + "github.com/rancher/k3d/v4/pkg/types" k3d "github.com/rancher/k3d/v4/pkg/types" log "github.com/sirupsen/logrus" + "sigs.k8s.io/yaml" ) // UpdateLoadbalancerConfig updates the loadbalancer config with an updated list of servers belonging to that cluster @@ -70,3 +74,38 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu return nil } + +func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) (*types.LoadbalancerConfig, error) { + + if cluster.ServerLoadBalancer == nil { + for _, node := range cluster.Nodes { + if node.Role == types.LoadBalancerRole { + var err error + cluster.ServerLoadBalancer, err = NodeGet(ctx, runtime, node) + if err != nil { + return nil, err + } + } + } + } + + reader, err := runtime.ReadFromNode(ctx, types.DefaultLoadbalancerConfigPath, cluster.ServerLoadBalancer) + if err != nil { + return &k3d.LoadbalancerConfig{}, err + } + defer reader.Close() + + file, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + + file = bytes.Trim(file[512:], "\x00") // trim control characters, etc. + + currentConfig := &types.LoadbalancerConfig{} + if err := yaml.Unmarshal(file, currentConfig); err != nil { + return nil, err + } + + return currentConfig, nil +} diff --git a/pkg/types/types.go b/pkg/types/types.go index 4df5e434..674cf576 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -427,3 +427,23 @@ type RegistryExternal struct { Host string `yaml:"host" json:"host"` Port string `yaml:"port" json:"port"` } + +/* + * Loadbalancer + */ + +/* LoadbalancerConfig defines the coarse file structure to configure the k3d-proxy + * Example: + * ports: + * 1234.tcp: + * - k3d-k3s-default-server-0 + * - k3d-k3s-default-server-1 + * 4321.udp: + * - k3d-k3s-default-agent-0 + * - k3d-k3s-default-agent-1 + */ +type LoadbalancerConfig struct { + Ports map[string][]string `yaml:"ports"` +} + +const DefaultLoadbalancerConfigPath = "/etc/confd/portmap.yaml" From 21244172c9faf6a27684db504fc7da14cd44a8bc Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Fri, 18 Jun 2021 11:04:39 +0200 Subject: [PATCH 10/45] move loadbalancer creation to separate function --- pkg/client/cluster.go | 54 +--------------------------------- pkg/client/loadbalancer.go | 59 ++++++++++++++++++++++++++++++++++++++ pkg/types/types.go | 4 +++ 3 files changed, 64 insertions(+), 53 deletions(-) diff --git a/pkg/client/cluster.go b/pkg/client/cluster.go index e04484c1..6fdf2e7a 100644 --- a/pkg/client/cluster.go +++ b/pkg/client/cluster.go @@ -28,7 +28,6 @@ import ( "fmt" "sort" "strconv" - "strings" "time" gort "runtime" @@ -44,7 +43,6 @@ import ( k3d "github.com/rancher/k3d/v4/pkg/types" "github.com/rancher/k3d/v4/pkg/types/k3s" "github.com/rancher/k3d/v4/pkg/util" - "github.com/rancher/k3d/v4/version" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" ) @@ -509,59 +507,9 @@ ClusterCreatOpts: */ // *** ServerLoadBalancer *** if !clusterCreateOpts.DisableLoadBalancer { - // Generate a comma-separated list of server/server names to pass to the LB container - servers := "" - for _, node := range cluster.Nodes { - if node.Role == k3d.ServerRole { - if servers == "" { - servers = node.Name - } else { - servers = fmt.Sprintf("%s,%s", servers, node.Name) - } - } - } - - // generate comma-separated list of extra ports to forward - ports := []string{k3d.DefaultAPIPort} - var udp_ports []string - for exposedPort := range cluster.ServerLoadBalancer.Ports { - if exposedPort.Proto() == "udp" { - udp_ports = append(udp_ports, exposedPort.Port()) - continue - } - ports = append(ports, exposedPort.Port()) - } - - if cluster.ServerLoadBalancer.Ports == nil { - cluster.ServerLoadBalancer.Ports = nat.PortMap{} - } - cluster.ServerLoadBalancer.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding} - - // Create LB as a modified node with loadbalancerRole - lbNode := &k3d.Node{ - Name: fmt.Sprintf("%s-%s-serverlb", k3d.DefaultObjectNamePrefix, cluster.Name), - Image: fmt.Sprintf("%s:%s", k3d.DefaultLBImageRepo, version.GetHelperImageVersion()), - Ports: cluster.ServerLoadBalancer.Ports, - Env: []string{ - fmt.Sprintf("SERVERS=%s", servers), - fmt.Sprintf("PORTS=%s", strings.Join(ports, ",")), - fmt.Sprintf("WORKER_PROCESSES=%d", len(ports)), - }, - Role: k3d.LoadBalancerRole, - RuntimeLabels: clusterCreateOpts.GlobalLabels, // TODO: createLoadBalancer: add more expressive labels - Networks: []string{cluster.Network.Name}, - Restart: true, - } - if len(udp_ports) > 0 { - lbNode.Env = append(lbNode.Env, fmt.Sprintf("UDP_PORTS=%s", strings.Join(udp_ports, ","))) - } - cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback - log.Infof("Creating LoadBalancer '%s'", lbNode.Name) - if err := NodeCreate(clusterCreateCtx, runtime, lbNode, k3d.NodeCreateOpts{}); err != nil { - log.Errorln("Failed to create loadbalancer") + if err := LoadbalancerCreate(ctx, runtime, cluster, &k3d.LoadbalancerCreateOpts{Labels: clusterCreateOpts.GlobalLabels}); err != nil { return err } - log.Debugf("Created loadbalancer '%s'", lbNode.Name) } return nil diff --git a/pkg/client/loadbalancer.go b/pkg/client/loadbalancer.go index 489a4ef1..f76440f3 100644 --- a/pkg/client/loadbalancer.go +++ b/pkg/client/loadbalancer.go @@ -28,9 +28,11 @@ import ( "io/ioutil" "strings" + "github.com/docker/go-connections/nat" "github.com/rancher/k3d/v4/pkg/runtimes" "github.com/rancher/k3d/v4/pkg/types" k3d "github.com/rancher/k3d/v4/pkg/types" + "github.com/rancher/k3d/v4/version" log "github.com/sirupsen/logrus" "sigs.k8s.io/yaml" ) @@ -109,3 +111,60 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste return currentConfig, nil } + +func LoadbalancerCreate(ctx context.Context, runtime runtimes.Runtime, cluster *types.Cluster, opts *k3d.LoadbalancerCreateOpts) error { + // Generate a comma-separated list of server/server names to pass to the LB container + servers := "" + for _, node := range cluster.Nodes { + if node.Role == k3d.ServerRole { + if servers == "" { + servers = node.Name + } else { + servers = fmt.Sprintf("%s,%s", servers, node.Name) + } + } + } + + // generate comma-separated list of extra ports to forward + ports := []string{k3d.DefaultAPIPort} + var udp_ports []string + for exposedPort := range cluster.ServerLoadBalancer.Ports { + if exposedPort.Proto() == "udp" { + udp_ports = append(udp_ports, exposedPort.Port()) + continue + } + ports = append(ports, exposedPort.Port()) + } + + if cluster.ServerLoadBalancer.Ports == nil { + cluster.ServerLoadBalancer.Ports = nat.PortMap{} + } + cluster.ServerLoadBalancer.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding} + + // Create LB as a modified node with loadbalancerRole + lbNode := &k3d.Node{ + Name: fmt.Sprintf("%s-%s-serverlb", k3d.DefaultObjectNamePrefix, cluster.Name), + Image: fmt.Sprintf("%s:%s", k3d.DefaultLBImageRepo, version.GetHelperImageVersion()), + Ports: cluster.ServerLoadBalancer.Ports, + Env: []string{ + fmt.Sprintf("SERVERS=%s", servers), + fmt.Sprintf("PORTS=%s", strings.Join(ports, ",")), + fmt.Sprintf("WORKER_PROCESSES=%d", len(ports)), + }, + Role: k3d.LoadBalancerRole, + RuntimeLabels: opts.Labels, // TODO: createLoadBalancer: add more expressive labels + Networks: []string{cluster.Network.Name}, + Restart: true, + } + if len(udp_ports) > 0 { + lbNode.Env = append(lbNode.Env, fmt.Sprintf("UDP_PORTS=%s", strings.Join(udp_ports, ","))) + } + cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback + log.Infof("Creating LoadBalancer '%s'", lbNode.Name) + if err := NodeCreate(ctx, runtime, lbNode, k3d.NodeCreateOpts{}); err != nil { + log.Errorln("Failed to create loadbalancer") + return err + } + log.Debugf("Created loadbalancer '%s'", lbNode.Name) + return nil +} diff --git a/pkg/types/types.go b/pkg/types/types.go index 674cf576..621f2544 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -447,3 +447,7 @@ type LoadbalancerConfig struct { } const DefaultLoadbalancerConfigPath = "/etc/confd/portmap.yaml" + +type LoadbalancerCreateOpts struct { + Labels map[string]string +} From a3dd47029d9da45b6533128d32970fbd0a7be283 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Fri, 18 Jun 2021 12:57:27 +0200 Subject: [PATCH 11/45] generate new config file for lb and add helper functions to get images from env if needed --- pkg/client/cluster.go | 9 ++++- pkg/client/loadbalancer.go | 74 +++++++++++++++++++++----------------- pkg/types/images.go | 48 +++++++++++++++++++++++++ pkg/types/types.go | 12 +++++-- proxy/conf.d/nginx.toml | 3 +- proxy/templates/nginx.tmpl | 5 +-- proxy/test/portmap.yaml | 5 ++- 7 files changed, 114 insertions(+), 42 deletions(-) create mode 100644 pkg/types/images.go diff --git a/pkg/client/cluster.go b/pkg/client/cluster.go index 6fdf2e7a..4905f692 100644 --- a/pkg/client/cluster.go +++ b/pkg/client/cluster.go @@ -507,9 +507,16 @@ ClusterCreatOpts: */ // *** ServerLoadBalancer *** if !clusterCreateOpts.DisableLoadBalancer { - if err := LoadbalancerCreate(ctx, runtime, cluster, &k3d.LoadbalancerCreateOpts{Labels: clusterCreateOpts.GlobalLabels}); err != nil { + node, nodeCreateOpts, err := LoadbalancerPrepare(ctx, runtime, cluster, &k3d.LoadbalancerCreateOpts{Labels: clusterCreateOpts.GlobalLabels}) + if err != nil { return err } + if err := NodeCreate(ctx, runtime, node, *nodeCreateOpts); err != nil { + log.Errorln("Failed to create loadbalancer") + return err + } + log.Debugf("Created loadbalancer '%s'", node.Name) + return err } return nil diff --git a/pkg/client/loadbalancer.go b/pkg/client/loadbalancer.go index f76440f3..db47c494 100644 --- a/pkg/client/loadbalancer.go +++ b/pkg/client/loadbalancer.go @@ -29,10 +29,10 @@ import ( "strings" "github.com/docker/go-connections/nat" + "github.com/rancher/k3d/v4/pkg/actions" "github.com/rancher/k3d/v4/pkg/runtimes" "github.com/rancher/k3d/v4/pkg/types" k3d "github.com/rancher/k3d/v4/pkg/types" - "github.com/rancher/k3d/v4/version" log "github.com/sirupsen/logrus" "sigs.k8s.io/yaml" ) @@ -112,28 +112,28 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste return currentConfig, nil } -func LoadbalancerCreate(ctx context.Context, runtime runtimes.Runtime, cluster *types.Cluster, opts *k3d.LoadbalancerCreateOpts) error { - // Generate a comma-separated list of server/server names to pass to the LB container - servers := "" +func LoadbalancerPrepare(ctx context.Context, runtime runtimes.Runtime, cluster *types.Cluster, opts *k3d.LoadbalancerCreateOpts) (*k3d.Node, *k3d.NodeCreateOpts, error) { + + lbConfig := k3d.LoadbalancerConfig{ + Ports: map[string][]string{}, + Settings: k3d.LoadBalancerSettings{}, + } + + // get list of server nodes + servers := []string{} for _, node := range cluster.Nodes { if node.Role == k3d.ServerRole { - if servers == "" { - servers = node.Name - } else { - servers = fmt.Sprintf("%s,%s", servers, node.Name) - } + servers = append(servers, node.Name) } } - // generate comma-separated list of extra ports to forward - ports := []string{k3d.DefaultAPIPort} - var udp_ports []string + // Default API Port proxied to the server nodes + lbConfig.Ports[fmt.Sprintf("%s.tcp", k3d.DefaultAPIPort)] = servers + + // generate comma-separated list of extra ports to forward // TODO: no default targets? for exposedPort := range cluster.ServerLoadBalancer.Ports { - if exposedPort.Proto() == "udp" { - udp_ports = append(udp_ports, exposedPort.Port()) - continue - } - ports = append(ports, exposedPort.Port()) + // TODO: catch duplicates here? + lbConfig.Ports[fmt.Sprintf("%s.%s", exposedPort.Port(), exposedPort.Proto())] = servers } if cluster.ServerLoadBalancer.Ports == nil { @@ -143,28 +143,36 @@ func LoadbalancerCreate(ctx context.Context, runtime runtimes.Runtime, cluster * // Create LB as a modified node with loadbalancerRole lbNode := &k3d.Node{ - Name: fmt.Sprintf("%s-%s-serverlb", k3d.DefaultObjectNamePrefix, cluster.Name), - Image: fmt.Sprintf("%s:%s", k3d.DefaultLBImageRepo, version.GetHelperImageVersion()), - Ports: cluster.ServerLoadBalancer.Ports, - Env: []string{ - fmt.Sprintf("SERVERS=%s", servers), - fmt.Sprintf("PORTS=%s", strings.Join(ports, ",")), - fmt.Sprintf("WORKER_PROCESSES=%d", len(ports)), - }, + Name: fmt.Sprintf("%s-%s-serverlb", k3d.DefaultObjectNamePrefix, cluster.Name), + Image: k3d.GetLoadbalancerImage(), + Ports: cluster.ServerLoadBalancer.Ports, Role: k3d.LoadBalancerRole, RuntimeLabels: opts.Labels, // TODO: createLoadBalancer: add more expressive labels Networks: []string{cluster.Network.Name}, Restart: true, } - if len(udp_ports) > 0 { - lbNode.Env = append(lbNode.Env, fmt.Sprintf("UDP_PORTS=%s", strings.Join(udp_ports, ","))) - } cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback log.Infof("Creating LoadBalancer '%s'", lbNode.Name) - if err := NodeCreate(ctx, runtime, lbNode, k3d.NodeCreateOpts{}); err != nil { - log.Errorln("Failed to create loadbalancer") - return err + + // some additional nginx settings + lbConfig.Settings.WorkerProcesses = k3d.DefaultLoadbalancerWorkerProcesses + len(cluster.ServerLoadBalancer.Ports)*len(servers) + + // prepare to write config to lb container + configyaml, err := yaml.Marshal(lbConfig) + if err != nil { + return nil, nil, err } - log.Debugf("Created loadbalancer '%s'", lbNode.Name) - return nil + + writeLbConfigAction := k3d.NodeHook{ + Stage: k3d.LifecycleStagePreStart, + Action: actions.WriteFileAction{ + Runtime: runtime, + Dest: k3d.DefaultLoadbalancerConfigPath, + Mode: 0744, + Content: configyaml, + }, + } + + return lbNode, &k3d.NodeCreateOpts{NodeHooks: []k3d.NodeHook{writeLbConfigAction}}, nil + } diff --git a/pkg/types/images.go b/pkg/types/images.go new file mode 100644 index 00000000..713ab0a7 --- /dev/null +++ b/pkg/types/images.go @@ -0,0 +1,48 @@ +/* +Copyright © 2020-2021 The k3d Author(s) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ +package types + +import ( + "fmt" + "os" + + "github.com/rancher/k3d/v4/version" + log "github.com/sirupsen/logrus" +) + +func GetLoadbalancerImage() string { + if img := os.Getenv("K3D_IMAGE_LOADBALANCER"); img != "" { + log.Infof("Loadbalancer image set from env var $K3D_IMAGE_LOADBALANCER: %s", img) + return img + } + + return fmt.Sprintf("%s:%s", DefaultLBImageRepo, version.GetHelperImageVersion()) +} + +func GetToolsImage() string { + if img := os.Getenv("K3D_IMAGE_TOOLS"); img != "" { + log.Infof("Tools image set from env var $K3D_IMAGE_TOOLS: %s", img) + return img + } + + return fmt.Sprintf("%s:%s", DefaultToolsImageRepo, version.GetHelperImageVersion()) +} diff --git a/pkg/types/types.go b/pkg/types/types.go index 621f2544..d6a83e6e 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -443,10 +443,18 @@ type RegistryExternal struct { * - k3d-k3s-default-agent-1 */ type LoadbalancerConfig struct { - Ports map[string][]string `yaml:"ports"` + Ports map[string][]string `yaml:"ports"` + Settings LoadBalancerSettings `yaml:"settings"` } -const DefaultLoadbalancerConfigPath = "/etc/confd/portmap.yaml" +type LoadBalancerSettings struct { + WorkerProcesses int `yaml:"workerProcesses"` +} + +const ( + DefaultLoadbalancerConfigPath = "/etc/confd/portmap.yaml" + DefaultLoadbalancerWorkerProcesses = 1024 +) type LoadbalancerCreateOpts struct { Labels map[string]string diff --git a/proxy/conf.d/nginx.toml b/proxy/conf.d/nginx.toml index 26decd79..96805409 100644 --- a/proxy/conf.d/nginx.toml +++ b/proxy/conf.d/nginx.toml @@ -2,5 +2,6 @@ src = "nginx.tmpl" dest = "/etc/nginx/nginx.conf" keys = [ - "ports" + "ports", + "settings" ] \ No newline at end of file diff --git a/proxy/templates/nginx.tmpl b/proxy/templates/nginx.tmpl index 963b61dd..fa8c3087 100644 --- a/proxy/templates/nginx.tmpl +++ b/proxy/templates/nginx.tmpl @@ -5,16 +5,13 @@ # ####### # ################################### -{{- $servers := split (getenv "SERVERS") "," -}} -{{- $ports := split (getenv "PORTS") "," -}} -{{- $udp_ports := split (getenv "UDP_PORTS") "," -}} error_log stderr notice; worker_processes auto; events { multi_accept on; use epoll; - worker_connections {{ add 1024 (len $ports) }}; + worker_connections {{ getv "/settings/workerProcesses" }}; } stream { diff --git a/proxy/test/portmap.yaml b/proxy/test/portmap.yaml index 02df30c3..d102645b 100644 --- a/proxy/test/portmap.yaml +++ b/proxy/test/portmap.yaml @@ -4,4 +4,7 @@ ports: - server-1 4321.udp: - agent-0 - - agent-1 \ No newline at end of file + - agent-1 + +settings: + workerProcesses: 1030 \ No newline at end of file From ff76512e63c5f4ce0bdf0abf24bf0c46b6ca5956 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Fri, 18 Jun 2021 12:59:17 +0200 Subject: [PATCH 12/45] move defaults for images to new file --- pkg/types/images.go | 15 +++++++++++++++ pkg/types/types.go | 15 --------------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/pkg/types/images.go b/pkg/types/images.go index 713ab0a7..d6d6c7a9 100644 --- a/pkg/types/images.go +++ b/pkg/types/images.go @@ -29,6 +29,21 @@ import ( log "github.com/sirupsen/logrus" ) +// DefaultK3sImageRepo specifies the default image repository for the used k3s image +const DefaultK3sImageRepo = "docker.io/rancher/k3s" + +// DefaultLBImageRepo defines the default cluster load balancer image +const DefaultLBImageRepo = "docker.io/rancher/k3d-proxy" + +// DefaultToolsImageRepo defines the default image used for the tools container +const DefaultToolsImageRepo = "docker.io/rancher/k3d-tools" + +// DefaultRegistryImageRepo defines the default image used for the k3d-managed registry +const DefaultRegistryImageRepo = "docker.io/library/registry" + +// DefaultRegistryImageTag defines the default image tag used for the k3d-managed registry +const DefaultRegistryImageTag = "2" + func GetLoadbalancerImage() string { if img := os.Getenv("K3D_IMAGE_LOADBALANCER"); img != "" { log.Infof("Loadbalancer image set from env var $K3D_IMAGE_LOADBALANCER: %s", img) diff --git a/pkg/types/types.go b/pkg/types/types.go index d6a83e6e..d47e1cd7 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -41,21 +41,6 @@ const DefaultClusterName = "k3s-default" // ... and still stay within the 64 character limit (e.g. of docker) const DefaultClusterNameMaxLength = 32 -// DefaultK3sImageRepo specifies the default image repository for the used k3s image -const DefaultK3sImageRepo = "docker.io/rancher/k3s" - -// DefaultLBImageRepo defines the default cluster load balancer image -const DefaultLBImageRepo = "docker.io/rancher/k3d-proxy" - -// DefaultToolsImageRepo defines the default image used for the tools container -const DefaultToolsImageRepo = "docker.io/rancher/k3d-tools" - -// DefaultRegistryImageRepo defines the default image used for the k3d-managed registry -const DefaultRegistryImageRepo = "docker.io/library/registry" - -// DefaultRegistryImageTag defines the default image tag used for the k3d-managed registry -const DefaultRegistryImageTag = "2" - // DefaultObjectNamePrefix defines the name prefix for every object created by k3d const DefaultObjectNamePrefix = "k3d" From 756a7d7b6ef687fc61a537a59a59e15bb334b97f Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Fri, 18 Jun 2021 13:20:07 +0200 Subject: [PATCH 13/45] proxy: add Makefile and remove test file from Dockerfile --- proxy/Dockerfile | 1 - proxy/Makefile | 5 +++++ 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 proxy/Makefile diff --git a/proxy/Dockerfile b/proxy/Dockerfile index 300c37a1..105e89c4 100644 --- a/proxy/Dockerfile +++ b/proxy/Dockerfile @@ -11,6 +11,5 @@ RUN echo "Building for '${OS}/${ARCH}'..." \ COPY templates /etc/confd/templates/ COPY conf.d /etc/confd/conf.d/ COPY nginx-proxy /usr/bin/ -COPY test/portmap.yaml /etc/confd/values.yaml ENTRYPOINT nginx-proxy \ No newline at end of file diff --git a/proxy/Makefile b/proxy/Makefile new file mode 100644 index 00000000..6555f6ae --- /dev/null +++ b/proxy/Makefile @@ -0,0 +1,5 @@ +.PHONY: test + +test: + docker build . -t rancher/k3d-proxy:dev + docker run --rm -v $(shell pwd)/test/portmap.yaml:/etc/confd/values.yaml rancher/k3d-proxy:dev \ No newline at end of file From 44a79c46702097c22b72c08e152304b430a90ae1 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Fri, 18 Jun 2021 13:20:58 +0200 Subject: [PATCH 14/45] types/node: add nodehooks to node struct to have them around at any stage (TO-DO: use this everywhere -> breaking change) --- pkg/client/cluster.go | 37 ++++++++++++++++++++++++++++++++----- pkg/client/loadbalancer.go | 35 ++++++++++------------------------- pkg/types/types.go | 3 ++- 3 files changed, 44 insertions(+), 31 deletions(-) diff --git a/pkg/client/cluster.go b/pkg/client/cluster.go index 4905f692..27225ed9 100644 --- a/pkg/client/cluster.go +++ b/pkg/client/cluster.go @@ -507,15 +507,40 @@ ClusterCreatOpts: */ // *** ServerLoadBalancer *** if !clusterCreateOpts.DisableLoadBalancer { - node, nodeCreateOpts, err := LoadbalancerPrepare(ctx, runtime, cluster, &k3d.LoadbalancerCreateOpts{Labels: clusterCreateOpts.GlobalLabels}) + lbNode, err := LoadbalancerPrepare(ctx, runtime, cluster, &k3d.LoadbalancerCreateOpts{Labels: clusterCreateOpts.GlobalLabels}) if err != nil { return err } - if err := NodeCreate(ctx, runtime, node, *nodeCreateOpts); err != nil { - log.Errorln("Failed to create loadbalancer") + cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback + + lbConfig, err := LoadbalancerGenerateConfig(cluster) + if err != nil { + return fmt.Errorf("error generating loadbalancer config: %v", err) + } + + // prepare to write config to lb container + configyaml, err := yaml.Marshal(lbConfig) + if err != nil { return err } - log.Debugf("Created loadbalancer '%s'", node.Name) + + writeLbConfigAction := k3d.NodeHook{ + Stage: k3d.LifecycleStagePreStart, + Action: actions.WriteFileAction{ + Runtime: runtime, + Dest: k3d.DefaultLoadbalancerConfigPath, + Mode: 0744, + Content: configyaml, + }, + } + + lbNode.HookActions = append(lbNode.HookActions, writeLbConfigAction) + + log.Infof("Creating LoadBalancer '%s'", lbNode.Name) + if err := NodeCreate(ctx, runtime, lbNode, k3d.NodeCreateOpts{}); err != nil { + return fmt.Errorf("error creating loadbalancer: %v", err) + } + log.Debugf("Created loadbalancer '%s'", lbNode.Name) return err } @@ -865,7 +890,9 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust log.Infoln("Starting helpers...") failedHelpers := 0 for _, helperNode := range aux { - nodeStartOpts := k3d.NodeStartOpts{} + nodeStartOpts := k3d.NodeStartOpts{ + NodeHooks: helperNode.HookActions, + } if helperNode.Role == k3d.LoadBalancerRole { nodeStartOpts.Wait = true } diff --git a/pkg/client/loadbalancer.go b/pkg/client/loadbalancer.go index db47c494..2a1debcc 100644 --- a/pkg/client/loadbalancer.go +++ b/pkg/client/loadbalancer.go @@ -29,7 +29,6 @@ import ( "strings" "github.com/docker/go-connections/nat" - "github.com/rancher/k3d/v4/pkg/actions" "github.com/rancher/k3d/v4/pkg/runtimes" "github.com/rancher/k3d/v4/pkg/types" k3d "github.com/rancher/k3d/v4/pkg/types" @@ -112,8 +111,7 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste return currentConfig, nil } -func LoadbalancerPrepare(ctx context.Context, runtime runtimes.Runtime, cluster *types.Cluster, opts *k3d.LoadbalancerCreateOpts) (*k3d.Node, *k3d.NodeCreateOpts, error) { - +func LoadbalancerGenerateConfig(cluster *k3d.Cluster) (k3d.LoadbalancerConfig, error) { lbConfig := k3d.LoadbalancerConfig{ Ports: map[string][]string{}, Settings: k3d.LoadBalancerSettings{}, @@ -136,6 +134,14 @@ func LoadbalancerPrepare(ctx context.Context, runtime runtimes.Runtime, cluster lbConfig.Ports[fmt.Sprintf("%s.%s", exposedPort.Port(), exposedPort.Proto())] = servers } + // some additional nginx settings + lbConfig.Settings.WorkerProcesses = k3d.DefaultLoadbalancerWorkerProcesses + len(cluster.ServerLoadBalancer.Ports)*len(servers) + + return lbConfig, nil +} + +func LoadbalancerPrepare(ctx context.Context, runtime runtimes.Runtime, cluster *types.Cluster, opts *k3d.LoadbalancerCreateOpts) (*k3d.Node, error) { + if cluster.ServerLoadBalancer.Ports == nil { cluster.ServerLoadBalancer.Ports = nat.PortMap{} } @@ -151,28 +157,7 @@ func LoadbalancerPrepare(ctx context.Context, runtime runtimes.Runtime, cluster Networks: []string{cluster.Network.Name}, Restart: true, } - cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback - log.Infof("Creating LoadBalancer '%s'", lbNode.Name) - // some additional nginx settings - lbConfig.Settings.WorkerProcesses = k3d.DefaultLoadbalancerWorkerProcesses + len(cluster.ServerLoadBalancer.Ports)*len(servers) - - // prepare to write config to lb container - configyaml, err := yaml.Marshal(lbConfig) - if err != nil { - return nil, nil, err - } - - writeLbConfigAction := k3d.NodeHook{ - Stage: k3d.LifecycleStagePreStart, - Action: actions.WriteFileAction{ - Runtime: runtime, - Dest: k3d.DefaultLoadbalancerConfigPath, - Mode: 0744, - Content: configyaml, - }, - } - - return lbNode, &k3d.NodeCreateOpts{NodeHooks: []k3d.NodeHook{writeLbConfigAction}}, nil + return lbNode, nil } diff --git a/pkg/types/types.go b/pkg/types/types.go index d47e1cd7..296d1a84 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -334,6 +334,7 @@ type Node struct { Memory string // filled automatically State NodeState // filled automatically IP NodeIP // filled automatically + HookActions []NodeHook `yaml:"hooks" json:"hooks,omitempty"` } // ServerOpts describes some additional server role specific opts @@ -437,7 +438,7 @@ type LoadBalancerSettings struct { } const ( - DefaultLoadbalancerConfigPath = "/etc/confd/portmap.yaml" + DefaultLoadbalancerConfigPath = "/etc/confd/values.yaml" DefaultLoadbalancerWorkerProcesses = 1024 ) From 2d52949859f2897298b126e22c671df7b42b95c2 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Fri, 18 Jun 2021 13:44:46 +0200 Subject: [PATCH 15/45] tests/e2e: ensure that we use a cluster create timeout everywhere --- tests/test_ipam.sh | 2 +- tests/test_memory_limits.sh | 2 +- tests/test_node_edit.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_ipam.sh b/tests/test_ipam.sh index 1d5e56d2..194c7822 100755 --- a/tests/test_ipam.sh +++ b/tests/test_ipam.sh @@ -18,7 +18,7 @@ expectedIPServer0="$expectedIPLabelServer0/16" # k3d excludes the subnet_start ( expectedIPServerLB="172.45.0.3/16" info "Creating cluster $clustername..." -$EXE cluster create $clustername --subnet $subnet || failed "could not create cluster $clustername" +$EXE cluster create $clustername --timeout 360s --subnet $subnet || failed "could not create cluster $clustername" info "Checking we have access to the cluster..." check_clusters "$clustername" || failed "error checking cluster" diff --git a/tests/test_memory_limits.sh b/tests/test_memory_limits.sh index a5b81501..934a0f38 100755 --- a/tests/test_memory_limits.sh +++ b/tests/test_memory_limits.sh @@ -13,7 +13,7 @@ highlight "[START] MemoryLimitTest $EXTRA_TITLE" clustername="memlimittest" info "Creating cluster $clustername..." -$EXE cluster create $clustername --servers-memory 1g --agents 1 --agents-memory 1.5g || failed "could not create cluster $clustername" +$EXE cluster create $clustername --timeout 360s --servers-memory 1g --agents 1 --agents-memory 1.5g || failed "could not create cluster $clustername" info "Checking we have access to the cluster..." check_clusters "$clustername" || failed "error checking cluster" diff --git a/tests/test_node_edit.sh b/tests/test_node_edit.sh index b9a5de04..80f896d3 100755 --- a/tests/test_node_edit.sh +++ b/tests/test_node_edit.sh @@ -18,7 +18,7 @@ newPortMappingHostPort="3333" newPortMappingContainerPort="4444" info "Creating cluster $clustername..." -$EXE cluster create $clustername --port "$existingPortMappingHostPort:$existingPortMappingContainerPort@loadbalancer" || failed "could not create cluster $clustername" +$EXE cluster create $clustername --timeout 360s --port "$existingPortMappingHostPort:$existingPortMappingContainerPort@loadbalancer" || failed "could not create cluster $clustername" info "Checking cluster access..." check_clusters "$clustername" || failed "error checking cluster access" From 2daf3872ca9ed5db67f31567798f221a1dbdb88f Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Fri, 18 Jun 2021 14:28:41 +0200 Subject: [PATCH 16/45] nodeAdd: do not copy ports from existing node (+ some cleanup) --- pkg/client/node.go | 86 ++++++++++++++++++++++++++-------------------- 1 file changed, 48 insertions(+), 38 deletions(-) diff --git a/pkg/client/node.go b/pkg/client/node.go index 019a901b..f981d304 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -70,36 +70,75 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N node.Env = []string{} // copy labels and env vars from a similar node in the selected cluster - var chosenNode *k3d.Node + var srcNode *k3d.Node for _, existingNode := range cluster.Nodes { if existingNode.Role == node.Role { - chosenNode = existingNode + srcNode = existingNode break } } // if we didn't find a node with the same role in the cluster, just choose any other node - if chosenNode == nil { + if srcNode == nil { log.Debugf("Didn't find node with role '%s' in cluster '%s'. Choosing any other node (and using defaults)...", node.Role, cluster.Name) node.Cmd = k3d.DefaultRoleCmds[node.Role] for _, existingNode := range cluster.Nodes { if existingNode.Role != k3d.LoadBalancerRole { // any role except for the LoadBalancer role - chosenNode = existingNode + srcNode = existingNode break } } } // get node details - chosenNode, err = NodeGet(ctx, runtime, chosenNode) + srcNode, err = NodeGet(ctx, runtime, srcNode) if err != nil { return err } - log.Debugf("Adding node %+v \n>>> to cluster %+v\n>>> based on existing node %+v", node, cluster, chosenNode) + /* + * Sanitize Source Node + * -> remove fields that are not safe to copy as they break something down the stream + */ + + // TODO: I guess proper deduplication can be handled in a cleaner/better way or at the infofaker level at some point + for _, forbiddenMount := range util.DoNotCopyVolumeSuffices { + for i, mount := range node.Volumes { + if strings.Contains(mount, forbiddenMount) { + log.Tracef("Dropping copied volume mount %s to avoid issues...", mount) + node.Volumes = util.RemoveElementFromStringSlice(node.Volumes, i) + } + } + } + + // drop port mappings as we cannot use the same port mapping for a two nodes (port collisions) + srcNode.Ports = nat.PortMap{} + + // we cannot have two servers as init servers + if node.Role == k3d.ServerRole { + for _, forbiddenCmd := range k3d.DoNotCopyServerFlags { + for i, cmd := range srcNode.Cmd { + // cut out the '--cluster-init' flag as this should only be done by the initializing server node + if cmd == forbiddenCmd { + log.Tracef("Dropping '%s' from source node's cmd", forbiddenCmd) + srcNode.Cmd = append(srcNode.Cmd[:i], srcNode.Cmd[i+1:]...) + } + } + for i, arg := range node.Args { + // cut out the '--cluster-init' flag as this should only be done by the initializing server node + if arg == forbiddenCmd { + log.Tracef("Dropping '%s' from source node's args", forbiddenCmd) + srcNode.Args = append(srcNode.Args[:i], srcNode.Args[i+1:]...) + } + } + } + } + + log.Debugf("Adding node %s to cluster %s based on existing (sanitized) node %s", node.Name, cluster.Name, srcNode.Name) + log.Tracef("Sanitized Source Node: %+v\nNew Node: %+v", srcNode, node) // fetch registry config registryConfigBytes := []byte{} - registryConfigReader, err := runtime.ReadFromNode(ctx, k3d.DefaultRegistriesFilePath, chosenNode) + registryConfigReader, err := runtime.ReadFromNode(ctx, k3d.DefaultRegistriesFilePath, srcNode) if err != nil { if !errors.Is(err, runtimeErrors.ErrRuntimeFileNotFound) { log.Warnf("Failed to read registry config from node %s: %+v", node.Name, err) @@ -117,22 +156,12 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N } // merge node config of new node into existing node config - if err := mergo.MergeWithOverwrite(chosenNode, *node); err != nil { + if err := mergo.MergeWithOverwrite(srcNode, *node); err != nil { log.Errorln("Failed to merge new node config into existing node config") return err } - node = chosenNode - - // TODO: I guess proper deduplication can be handled in a cleaner/better way or at the infofaker level at some point - for _, forbiddenMount := range util.DoNotCopyVolumeSuffices { - for i, mount := range node.Volumes { - if strings.Contains(mount, forbiddenMount) { - log.Tracef("Dropping copied volume mount %s to avoid issues...", mount) - node.Volumes = util.RemoveElementFromStringSlice(node.Volumes, i) - } - } - } + node = srcNode log.Debugf("Resulting node %+v", node) @@ -151,25 +180,6 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N } } - if node.Role == k3d.ServerRole { - for _, forbiddenCmd := range k3d.DoNotCopyServerFlags { - for i, cmd := range node.Cmd { - // cut out the '--cluster-init' flag as this should only be done by the initializing server node - if cmd == forbiddenCmd { - log.Debugf("Dropping '%s' from node's cmd", forbiddenCmd) - node.Cmd = append(node.Cmd[:i], node.Cmd[i+1:]...) - } - } - for i, arg := range node.Args { - // cut out the '--cluster-init' flag as this should only be done by the initializing server node - if arg == forbiddenCmd { - log.Debugf("Dropping '%s' from node's args", forbiddenCmd) - node.Args = append(node.Args[:i], node.Args[i+1:]...) - } - } - } - } - // add node actions if len(registryConfigBytes) != 0 { if createNodeOpts.NodeHooks == nil { From 339187b4da4b2c62f262b39c94ccbd7eeb45c282 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Mon, 21 Jun 2021 18:53:43 +0200 Subject: [PATCH 17/45] adapt updating the loadbalancer config when adding a new node --- pkg/client/loadbalancer.go | 61 ++++++++++++++++++++++---------------- 1 file changed, 36 insertions(+), 25 deletions(-) diff --git a/pkg/client/loadbalancer.go b/pkg/client/loadbalancer.go index 2a1debcc..0ed93bd1 100644 --- a/pkg/client/loadbalancer.go +++ b/pkg/client/loadbalancer.go @@ -29,11 +29,12 @@ import ( "strings" "github.com/docker/go-connections/nat" + "github.com/go-test/deep" "github.com/rancher/k3d/v4/pkg/runtimes" "github.com/rancher/k3d/v4/pkg/types" k3d "github.com/rancher/k3d/v4/pkg/types" log "github.com/sirupsen/logrus" - "sigs.k8s.io/yaml" + "gopkg.in/yaml.v2" ) // UpdateLoadbalancerConfig updates the loadbalancer config with an updated list of servers belonging to that cluster @@ -47,25 +48,34 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu return err } - // find the LoadBalancer for the target cluster - serverNodesList := []string{} - var loadbalancer *k3d.Node - for _, node := range cluster.Nodes { - if node.Role == k3d.LoadBalancerRole { // get the loadbalancer we want to update - loadbalancer = node - } else if node.Role == k3d.ServerRole { // create a list of server nodes - serverNodesList = append(serverNodesList, node.Name) - } - } - serverNodes := strings.Join(serverNodesList, ",") - if loadbalancer == nil { - return fmt.Errorf("Failed to find loadbalancer for cluster '%s'", cluster.Name) + currentConfig, err := GetLoadbalancerConfig(ctx, runtime, cluster) + if err != nil { + return fmt.Errorf("error getting current config from loadbalancer: %w", err) } - log.Debugf("Servers as passed to serverlb: '%s'", serverNodes) + log.Tracef("Current loadbalancer config:\n%+v", currentConfig) - command := fmt.Sprintf("SERVERS=%s %s", serverNodes, "confd -onetime -backend env && nginx -s reload") - if err := runtime.ExecInNode(ctx, loadbalancer, []string{"sh", "-c", command}); err != nil { + newLBConfig, err := LoadbalancerGenerateConfig(cluster) + if err != nil { + return fmt.Errorf("error generating new loadbalancer config: %w", err) + } + log.Tracef("New loadbalancer config:\n%+v", currentConfig) + + if diff := deep.Equal(currentConfig, newLBConfig); diff != nil { + log.Debugf("Updating the loadbalancer with this diff: %+v", diff) + } + + newLbConfigYaml, err := yaml.Marshal(&newLBConfig) + if err != nil { + return fmt.Errorf("error marshalling the new loadbalancer config: %w", err) + } + log.Debugf("Writing lb config:\n%s", string(newLbConfigYaml)) + if err := runtime.WriteToNode(ctx, newLbConfigYaml, k3d.DefaultLoadbalancerConfigPath, 0744, cluster.ServerLoadBalancer); err != nil { + return fmt.Errorf("error writing new loadbalancer config to container: %w", err) + } + + command := "confd -onetime -backend file -file /etc/confd/values.yaml -log-level debug && nginx -s reload" + if err := runtime.ExecInNode(ctx, cluster.ServerLoadBalancer, []string{"sh", "-c", command}); err != nil { if strings.Contains(err.Error(), "host not found in upstream") { log.Warnf("Loadbalancer configuration updated, but one or more k3d nodes seem to be down, check the logs:\n%s", err.Error()) return nil @@ -76,7 +86,9 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu return nil } -func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) (*types.LoadbalancerConfig, error) { +func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) (types.LoadbalancerConfig, error) { + + var cfg k3d.LoadbalancerConfig if cluster.ServerLoadBalancer == nil { for _, node := range cluster.Nodes { @@ -84,7 +96,7 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste var err error cluster.ServerLoadBalancer, err = NodeGet(ctx, runtime, node) if err != nil { - return nil, err + return cfg, err } } } @@ -92,23 +104,22 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste reader, err := runtime.ReadFromNode(ctx, types.DefaultLoadbalancerConfigPath, cluster.ServerLoadBalancer) if err != nil { - return &k3d.LoadbalancerConfig{}, err + return cfg, err } defer reader.Close() file, err := ioutil.ReadAll(reader) if err != nil { - return nil, err + return cfg, err } file = bytes.Trim(file[512:], "\x00") // trim control characters, etc. - currentConfig := &types.LoadbalancerConfig{} - if err := yaml.Unmarshal(file, currentConfig); err != nil { - return nil, err + if err := yaml.Unmarshal(file, &cfg); err != nil { + return cfg, err } - return currentConfig, nil + return cfg, nil } func LoadbalancerGenerateConfig(cluster *k3d.Cluster) (k3d.LoadbalancerConfig, error) { From 2a4c8910e886ce538aef017fe3775a02315ab486 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Mon, 21 Jun 2021 20:14:31 +0200 Subject: [PATCH 18/45] proxy: use new confd release with fixed file watcher --- proxy/Dockerfile | 2 +- proxy/Makefile | 2 +- proxy/conf.d/nginx.toml | 4 +++- proxy/nginx-proxy | 18 ++++++++++++++---- 4 files changed, 19 insertions(+), 7 deletions(-) diff --git a/proxy/Dockerfile b/proxy/Dockerfile index 105e89c4..7885f15e 100644 --- a/proxy/Dockerfile +++ b/proxy/Dockerfile @@ -1,6 +1,6 @@ FROM nginx:1.19-alpine ARG CONFD_REPO=iwilltry42/confd -ARG CONFD_VERSION=0.16.1 +ARG CONFD_VERSION=0.17.0-rc.0 ARG OS=linux ARG ARCH=amd64 RUN echo "Building for '${OS}/${ARCH}'..." \ diff --git a/proxy/Makefile b/proxy/Makefile index 6555f6ae..53b943de 100644 --- a/proxy/Makefile +++ b/proxy/Makefile @@ -1,5 +1,5 @@ .PHONY: test test: - docker build . -t rancher/k3d-proxy:dev + docker build . -t rancher/k3d-proxy:dev --no-cache docker run --rm -v $(shell pwd)/test/portmap.yaml:/etc/confd/values.yaml rancher/k3d-proxy:dev \ No newline at end of file diff --git a/proxy/conf.d/nginx.toml b/proxy/conf.d/nginx.toml index 96805409..a82534b3 100644 --- a/proxy/conf.d/nginx.toml +++ b/proxy/conf.d/nginx.toml @@ -4,4 +4,6 @@ dest = "/etc/nginx/nginx.conf" keys = [ "ports", "settings" -] \ No newline at end of file +] +check_cmd = "/usr/sbin/nginx -T -c {{.src}}" +reload_cmd = "/usr/sbin/nginx -s reload" diff --git a/proxy/nginx-proxy b/proxy/nginx-proxy index 1a1cd53b..33fb5f87 100755 --- a/proxy/nginx-proxy +++ b/proxy/nginx-proxy @@ -1,14 +1,24 @@ #!/bin/sh -# Run confd set -e -confd -onetime -backend file -file /etc/confd/values.yaml -log-level debug -set +e + +# Config Options +INIT_CONFIG_MAX_RETRIES=3 + +# Run confd +for i in $(seq 1 $INIT_CONFIG_MAX_RETRIES); do + echo "[$(date -Iseconds)] creating initial nginx config (try $i/$INIT_CONFIG_MAX_RETRIES)" + confd -onetime -backend file -file /etc/confd/values.yaml -log-level debug -sync-only + sleep 2 +done # Output Configuration echo "===== Initial nginx configuration =====" -cat /etc/nginx/nginx.conf +nginx -T -c /etc/nginx/nginx.conf echo "=======================================" +# Start confd in watch mode (every second) +confd -watch -backend file -file /etc/confd/values.yaml -log-level debug & + # Start nginx nginx -g 'daemon off;' \ No newline at end of file From 2b2041f028dc601d93d11748ebdef56cded6efe1 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Mon, 21 Jun 2021 20:34:55 +0200 Subject: [PATCH 19/45] loadbalancer: use auto-reload for confd (file watcher) --- pkg/client/loadbalancer.go | 10 +--------- proxy/Dockerfile | 1 + 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/pkg/client/loadbalancer.go b/pkg/client/loadbalancer.go index 0ed93bd1..efac0d33 100644 --- a/pkg/client/loadbalancer.go +++ b/pkg/client/loadbalancer.go @@ -26,7 +26,6 @@ import ( "context" "fmt" "io/ioutil" - "strings" "github.com/docker/go-connections/nat" "github.com/go-test/deep" @@ -74,14 +73,7 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu return fmt.Errorf("error writing new loadbalancer config to container: %w", err) } - command := "confd -onetime -backend file -file /etc/confd/values.yaml -log-level debug && nginx -s reload" - if err := runtime.ExecInNode(ctx, cluster.ServerLoadBalancer, []string{"sh", "-c", command}); err != nil { - if strings.Contains(err.Error(), "host not found in upstream") { - log.Warnf("Loadbalancer configuration updated, but one or more k3d nodes seem to be down, check the logs:\n%s", err.Error()) - return nil - } - return err - } + // TODO: check if loadbalancer is running fine after auto-applying the change return nil } diff --git a/proxy/Dockerfile b/proxy/Dockerfile index 7885f15e..3ffee0a3 100644 --- a/proxy/Dockerfile +++ b/proxy/Dockerfile @@ -1,4 +1,5 @@ FROM nginx:1.19-alpine +# TODO:_ consider switching to https://github.com/abtreece/confd to not maintain a custom fork anymore ARG CONFD_REPO=iwilltry42/confd ARG CONFD_VERSION=0.17.0-rc.0 ARG OS=linux From a4dc34531a0ad8e9dd2941ba70c39fb3657aee75 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Mon, 21 Jun 2021 21:00:26 +0200 Subject: [PATCH 20/45] node/edit: use new loadbalancer config file for ports update --- pkg/client/node.go | 60 ++++++++++++++++++++++------------------------ 1 file changed, 29 insertions(+), 31 deletions(-) diff --git a/pkg/client/node.go b/pkg/client/node.go index f981d304..a824d4c7 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -34,6 +34,7 @@ import ( "time" copystruct "github.com/mitchellh/copystructure" + "gopkg.in/yaml.v2" "github.com/docker/go-connections/nat" dockerunits "github.com/docker/go-units" @@ -691,40 +692,37 @@ func NodeEdit(ctx context.Context, runtime runtimes.Runtime, existingNode, chang // --- Loadbalancer specifics --- if result.Role == k3d.LoadBalancerRole { - nodeEditApplyLBSpecifics(ctx, result) + cluster, err := ClusterGet(ctx, runtime, &k3d.Cluster{Name: existingNode.RuntimeLabels[k3d.LabelClusterName]}) + if err != nil { + return fmt.Errorf("error updating loadbalancer config: %w", err) + } + cluster.ServerLoadBalancer = result + lbConfig, err := LoadbalancerGenerateConfig(cluster) + if err != nil { + return fmt.Errorf("error generating loadbalancer config: %v", err) + } + + // prepare to write config to lb container + configyaml, err := yaml.Marshal(lbConfig) + if err != nil { + return err + } + + writeLbConfigAction := k3d.NodeHook{ + Stage: k3d.LifecycleStagePreStart, + Action: actions.WriteFileAction{ + Runtime: runtime, + Dest: k3d.DefaultLoadbalancerConfigPath, + Mode: 0744, + Content: configyaml, + }, + } + + result.HookActions = append(result.HookActions, writeLbConfigAction) } // replace existing node return NodeReplace(ctx, runtime, existingNode, result) - -} - -func nodeEditApplyLBSpecifics(ctx context.Context, lbNode *k3d.Node) { - tcp_ports := []string{} - udp_ports := []string{} - for index, env := range lbNode.Env { - if strings.HasPrefix(env, "PORTS=") || strings.HasPrefix(env, "UDP_PORTS=") { - // Remove matching environment variable from slice (does not preserve order) - lbNode.Env[index] = lbNode.Env[len(lbNode.Env)-1] // copy last element to index of matching env - lbNode.Env[len(lbNode.Env)-1] = "" // remove last element - lbNode.Env = lbNode.Env[:len(lbNode.Env)-1] // truncate - } - } - - for port := range lbNode.Ports { - switch port.Proto() { - case "tcp": - tcp_ports = append(tcp_ports, port.Port()) - break - case "udp": - udp_ports = append(udp_ports, port.Port()) - break - default: - log.Warnf("Unknown port protocol %s for port %s", port.Proto(), port.Port()) - } - } - lbNode.Env = append(lbNode.Env, fmt.Sprintf("PORTS=%s", strings.Join(tcp_ports, ","))) - lbNode.Env = append(lbNode.Env, fmt.Sprintf("UDP_PORTS=%s", strings.Join(udp_ports, ","))) } func NodeReplace(ctx context.Context, runtime runtimes.Runtime, old, new *k3d.Node) error { @@ -755,7 +753,7 @@ func NodeReplace(ctx context.Context, runtime runtimes.Runtime, old, new *k3d.No // start new node log.Infof("Starting new node %s...", new.Name) - if err := NodeStart(ctx, runtime, new, k3d.NodeStartOpts{Wait: true}); err != nil { + if err := NodeStart(ctx, runtime, new, k3d.NodeStartOpts{Wait: true, NodeHooks: new.HookActions}); err != nil { if err := NodeDelete(ctx, runtime, new, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil { return fmt.Errorf("Failed to start new node. Also failed to rollback: %+v", err) } From 4a84874a86a81e7866bb6991b00f47063e1bda31 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 22 Jun 2021 19:00:33 +0200 Subject: [PATCH 21/45] nodeWaitForLogMessage: log found target line when on >= trace level logging --- pkg/client/node.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pkg/client/node.go b/pkg/client/node.go index a824d4c7..2ba1c0b3 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -604,6 +604,14 @@ func NodeWaitForLogMessage(ctx context.Context, runtime runtimes.Runtime, node * } // check if we can find the specified line in the log if nRead > 0 && strings.Contains(output, message) { + if log.GetLevel() >= log.TraceLevel { + temp := strings.Split(output, "\n") + for _, l := range temp { + if strings.Contains(l, message) { + log.Tracef("Found target log line: `%s`", l) + } + } + } break } From 1a68ae037221f9f0cbda9b0da3e1446e1a531c0c Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 22 Jun 2021 19:46:44 +0200 Subject: [PATCH 22/45] updatelbconfig: check for log output to see if the update succeeded and give proper info --- pkg/client/loadbalancer.go | 32 +++++++++++++++++++++++++++++++- pkg/client/node.go | 15 ++++++++------- 2 files changed, 39 insertions(+), 8 deletions(-) diff --git a/pkg/client/loadbalancer.go b/pkg/client/loadbalancer.go index efac0d33..ec43d9fd 100644 --- a/pkg/client/loadbalancer.go +++ b/pkg/client/loadbalancer.go @@ -24,8 +24,10 @@ package client import ( "bytes" "context" + "errors" "fmt" "io/ioutil" + "time" "github.com/docker/go-connections/nat" "github.com/go-test/deep" @@ -36,6 +38,11 @@ import ( "gopkg.in/yaml.v2" ) +var ( + LBConfigErrHostNotFound = errors.New("lbconfig: host not found") + LBConfigErrFailedTest = errors.New("lbconfig: failed to test") +) + // UpdateLoadbalancerConfig updates the loadbalancer config with an updated list of servers belonging to that cluster func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) error { @@ -69,11 +76,34 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu return fmt.Errorf("error marshalling the new loadbalancer config: %w", err) } log.Debugf("Writing lb config:\n%s", string(newLbConfigYaml)) + startTime := time.Now().Truncate(time.Second).UTC() + log.Debugf("timestamp: %s", startTime.Format("2006-01-02T15:04:05.999999999Z")) if err := runtime.WriteToNode(ctx, newLbConfigYaml, k3d.DefaultLoadbalancerConfigPath, 0744, cluster.ServerLoadBalancer); err != nil { return fmt.Errorf("error writing new loadbalancer config to container: %w", err) } - // TODO: check if loadbalancer is running fine after auto-applying the change + successCtx, successCtxCancel := context.WithDeadline(ctx, time.Now().Add(5*time.Second)) + defer successCtxCancel() + err = NodeWaitForLogMessage(successCtx, runtime, cluster.ServerLoadBalancer, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], startTime) + if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + failureCtx, failureCtxCancel := context.WithDeadline(ctx, time.Now().Add(5*time.Second)) + defer failureCtxCancel() + err = NodeWaitForLogMessage(failureCtx, runtime, cluster.ServerLoadBalancer, "host not found in upstream", startTime) + if err != nil { + log.Warnf("Failed to check if the loadbalancer was configured correctly or if it broke. Please check it manually or try again: %v", err) + return LBConfigErrFailedTest + } else { + log.Warnln("Failed to configure loadbalancer because one of the nodes seems to be down! Run `k3d node list` to see which one it could be.") + return LBConfigErrHostNotFound + } + } else { + log.Warnf("Failed to ensure that loadbalancer was configured correctly. Please check it manually or try again: %v", err) + return LBConfigErrFailedTest + } + } + + time.Sleep(1 * time.Second) // waiting for a second, to avoid issues with too fast lb updates which would screw up the log waits return nil } diff --git a/pkg/client/node.go b/pkg/client/node.go index 2ba1c0b3..8e644b79 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -208,8 +208,9 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N // if it's a server node, then update the loadbalancer configuration if node.Role == k3d.ServerRole { if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil { - log.Errorln("Failed to update cluster loadbalancer") - return err + if !errors.Is(err, LBConfigErrHostNotFound) { + return fmt.Errorf("error updating loadbalancer: %w", err) + } } } @@ -496,8 +497,9 @@ func NodeDelete(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, o // if it's a server node, then update the loadbalancer configuration if node.Role == k3d.ServerRole { if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil { - log.Errorln("Failed to update cluster loadbalancer") - return err + if !errors.Is(err, LBConfigErrHostNotFound) { + return fmt.Errorf("Failed to update cluster loadbalancer: %w", err) + } } } } @@ -577,7 +579,7 @@ func NodeWaitForLogMessage(ctx context.Context, runtime runtimes.Runtime, node * if ok { log.Debugf("NodeWaitForLogMessage: Context Deadline (%s) > Current Time (%s)", d, time.Now()) } - return fmt.Errorf("Context deadline exceeded while waiting for log message '%s' of node %s", message, node.Name) + return fmt.Errorf("Context deadline exceeded while waiting for log message '%s' of node %s: %w", message, node.Name, ctx.Err()) } return ctx.Err() default: @@ -589,8 +591,7 @@ func NodeWaitForLogMessage(ctx context.Context, runtime runtimes.Runtime, node * if out != nil { out.Close() } - log.Errorf("Failed waiting for log message '%s' from node '%s'", message, node.Name) - return err + return fmt.Errorf("Failed waiting for log message '%s' from node '%s': %w", message, node.Name, err) } defer out.Close() From c2ad051f4b9af44641b0fa221d0e423130958d39 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 22 Jun 2021 20:15:09 +0200 Subject: [PATCH 23/45] update changelog --- CHANGELOG.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6f9381cb..f2c0fb9b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,28 @@ # Changelog +## v5.0.0 + +### Fixes + +- cleaned up and properly sorted the sanitization of existing resources used to create new nodes (#638) + +### Features & Enhancements + +- new command: `k3d node edit` to edit existing nodes (#615) + - currently only allows `k3d node edit NODE --port-add HOSTPORT:CONTAINERPORT` for the serverlb/loadbalancer to add new ports + - pkg: new `NodeEdit` function +- new (hidden) command: `k3d debug` with some options for debugging k3d resources (#638) + - e.g. `k3d debug loadbalancer get-config` to get the current loadbalancer configuration +- loadbalancer / k3d-proxy (#638) + - updated fork of `confd` to make usage of the file backend including a file watcher for auto-reloads + - this also checks the config before applying it, so the lb doesn't crash on a faulty config + - updating the loadbalancer writes the new config file and also checks if everything's going fine afterwards +- helper images can now be set explicitly via environment variables: `K3D_IMAGE_LOADBALANCER` & `K3D_IMAGE_TOOLS` (#638) + +### Misc + +- tests/e2e: timeouts everywhere to avoid killing DroneCI (#638) + ## v4.4.7 ### Features / Enhancements From e063405b020d85c1a77683f1b3176aa758c5425c Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 22 Jun 2021 20:56:40 +0200 Subject: [PATCH 24/45] some really final log output after creating/deleting nodes --- cmd/node/nodeCreate.go | 1 + cmd/node/nodeDelete.go | 1 + pkg/client/node.go | 1 + 3 files changed, 3 insertions(+) diff --git a/cmd/node/nodeCreate.go b/cmd/node/nodeCreate.go index 8169170c..3ded6faa 100644 --- a/cmd/node/nodeCreate.go +++ b/cmd/node/nodeCreate.go @@ -55,6 +55,7 @@ func NewCmdNodeCreate() *cobra.Command { log.Errorf("Failed to add nodes to cluster '%s'", cluster.Name) log.Fatalln(err) } + log.Infof("Successfully created %d node(s)!", len(nodes)) }, } diff --git a/cmd/node/nodeDelete.go b/cmd/node/nodeDelete.go index 91c50764..ad1bb72b 100644 --- a/cmd/node/nodeDelete.go +++ b/cmd/node/nodeDelete.go @@ -59,6 +59,7 @@ func NewCmdNodeDelete() *cobra.Command { log.Fatalln(err) } } + log.Infof("Successfully deleted %d node(s)!", len(nodes)) } }, } diff --git a/pkg/client/node.go b/pkg/client/node.go index 8e644b79..ef0dce2f 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -207,6 +207,7 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N // if it's a server node, then update the loadbalancer configuration if node.Role == k3d.ServerRole { + log.Infoln("Updating loadbalancer config to include new server node(s)") if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil { if !errors.Is(err, LBConfigErrHostNotFound) { return fmt.Errorf("error updating loadbalancer: %w", err) From 7bcb1730c6ae29ae18354d003f15f198ae9b9712 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 22 Jun 2021 21:08:57 +0200 Subject: [PATCH 25/45] nodeCreate: remove dead code and parallelize adding nodes to the cluster completely --- CHANGELOG.md | 3 +++ cmd/node/nodeCreate.go | 2 +- pkg/client/loadbalancer.go | 2 +- pkg/client/node.go | 23 +++++------------------ 4 files changed, 10 insertions(+), 20 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f2c0fb9b..dbe5e8d7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,10 +18,13 @@ - this also checks the config before applying it, so the lb doesn't crash on a faulty config - updating the loadbalancer writes the new config file and also checks if everything's going fine afterwards - helper images can now be set explicitly via environment variables: `K3D_IMAGE_LOADBALANCER` & `K3D_IMAGE_TOOLS` (#638) +- concurrently add new nodes to an existing cluster (remove some dumb code) (#640) + - `--wait` is now the default for `k3d node create` ### Misc - tests/e2e: timeouts everywhere to avoid killing DroneCI (#638) +- logs: really final output when creating/deleting nodes (so far, we were not outputting a final success message and the process was still doing stuff) (#640) ## v4.4.7 diff --git a/cmd/node/nodeCreate.go b/cmd/node/nodeCreate.go index 3ded6faa..76151033 100644 --- a/cmd/node/nodeCreate.go +++ b/cmd/node/nodeCreate.go @@ -73,7 +73,7 @@ func NewCmdNodeCreate() *cobra.Command { cmd.Flags().StringP("image", "i", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), "Specify k3s image used for the node(s)") cmd.Flags().String("memory", "", "Memory limit imposed on the node [From docker]") - cmd.Flags().BoolVar(&createNodeOpts.Wait, "wait", false, "Wait for the node(s) to be ready before returning.") + cmd.Flags().BoolVar(&createNodeOpts.Wait, "wait", true, "Wait for the node(s) to be ready before returning.") cmd.Flags().DurationVar(&createNodeOpts.Timeout, "timeout", 0*time.Second, "Maximum waiting time for '--wait' before canceling/returning.") cmd.Flags().StringSliceP("runtime-label", "", []string{}, "Specify container runtime labels in format \"foo=bar\"") diff --git a/pkg/client/loadbalancer.go b/pkg/client/loadbalancer.go index ec43d9fd..84843c0a 100644 --- a/pkg/client/loadbalancer.go +++ b/pkg/client/loadbalancer.go @@ -77,7 +77,6 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu } log.Debugf("Writing lb config:\n%s", string(newLbConfigYaml)) startTime := time.Now().Truncate(time.Second).UTC() - log.Debugf("timestamp: %s", startTime.Format("2006-01-02T15:04:05.999999999Z")) if err := runtime.WriteToNode(ctx, newLbConfigYaml, k3d.DefaultLoadbalancerConfigPath, 0744, cluster.ServerLoadBalancer); err != nil { return fmt.Errorf("error writing new loadbalancer config to container: %w", err) } @@ -102,6 +101,7 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu return LBConfigErrFailedTest } } + log.Infof("Successfully configured loadbalancer %s!", cluster.ServerLoadBalancer.Name) time.Sleep(1 * time.Second) // waiting for a second, to avoid issues with too fast lb updates which would screw up the log waits diff --git a/pkg/client/node.go b/pkg/client/node.go index ef0dce2f..f0e00482 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -228,26 +228,13 @@ func NodeAddToClusterMulti(ctx context.Context, runtime runtimes.Runtime, nodes nodeWaitGroup, ctx := errgroup.WithContext(ctx) for _, node := range nodes { - if err := NodeAddToCluster(ctx, runtime, node, cluster, createNodeOpts); err != nil { - return err - } - if createNodeOpts.Wait { - currentNode := node - nodeWaitGroup.Go(func() error { - log.Debugf("Starting to wait for node '%s'", currentNode.Name) - readyLogMessage := k3d.ReadyLogMessageByRole[currentNode.Role] - if readyLogMessage != "" { - return NodeWaitForLogMessage(ctx, runtime, currentNode, readyLogMessage, time.Time{}) - } - log.Warnf("NodeAddToClusterMulti: Set to wait for node %s to get ready, but there's no target log message defined", currentNode.Name) - return nil - }) - } + currentNode := node + nodeWaitGroup.Go(func() error { + return NodeAddToCluster(ctx, runtime, currentNode, cluster, createNodeOpts) + }) } if err := nodeWaitGroup.Wait(); err != nil { - log.Errorln("Failed to bring up all nodes in time. Check the logs:") - log.Errorf(">>> %+v", err) - return fmt.Errorf("Failed to add nodes") + return fmt.Errorf("Failed to add one or more nodes: %w", err) } return nil From d593530f76b2bda8040c31bb19abaad7c5f9e150 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 22 Jun 2021 21:49:16 +0200 Subject: [PATCH 26/45] update changelog to include already merged prs --- CHANGELOG.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index dbe5e8d7..cd5a951d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,11 +20,29 @@ - helper images can now be set explicitly via environment variables: `K3D_IMAGE_LOADBALANCER` & `K3D_IMAGE_TOOLS` (#638) - concurrently add new nodes to an existing cluster (remove some dumb code) (#640) - `--wait` is now the default for `k3d node create` +- normalized flag usage for k3s and runtime (#598, @ejose19) + - rename `k3d cluster create --label` to `k3d cluster create --runtime-label` (as it's labelling the node on runtime level, e.g. docker) + - config option moved to `options.runtime.labels` + - add `k3d cluster create --k3s-node-label` to add Kubernetes node labels via k3s flag (#584, @developer-guy, @ejose, @dentrax) + - new config option `options.k3s.nodeLabels` + - the same for `k3d node create` +- improved config file handling (#605) + - new version `v1alpha3` + - warning when using outdated version + - validation dynamically based on provided config apiVersion + - new default for `k3d config init` + - new command `k3d config migrate INPUT [OUTPUT]` to migrate config files between versions + - currently supported migration `v1alpha2` -> `v1alpha3` + - pkg: new `Config` interface type to support new generic `FromViper` config file parsing +- changed flags `--k3s-server-arg` & `--k3s-agent-arg` into `--k3s-arg` with nodefilter support (#605) + - new config path `options.k3s.extraArgs` ### Misc - tests/e2e: timeouts everywhere to avoid killing DroneCI (#638) - logs: really final output when creating/deleting nodes (so far, we were not outputting a final success message and the process was still doing stuff) (#640) +- tests/e2e: add tests for v1alpha2 to v1alpha3 migration +- docs: use v1alpha3 config version ## v4.4.7 From f6c3bd2e8b39ccea3324149a16cc4c850e8ec2d3 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Thu, 24 Jun 2021 10:53:39 +0200 Subject: [PATCH 27/45] clusterCreate: use tempfile with expanded env vars as viper input file --- CHANGELOG.md | 1 + cmd/cluster/clusterCreate.go | 25 +++++++++++++++++++++++-- tests/assets/config_test_simple.yaml | 2 +- 3 files changed, 25 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cd5a951d..efd64333 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ - pkg: new `Config` interface type to support new generic `FromViper` config file parsing - changed flags `--k3s-server-arg` & `--k3s-agent-arg` into `--k3s-arg` with nodefilter support (#605) - new config path `options.k3s.extraArgs` +- config file: environment variables (`$VAR`, `${VAR}` will be expanded unconditionally) (#643) ### Misc diff --git a/cmd/cluster/clusterCreate.go b/cmd/cluster/clusterCreate.go index 89a2fbae..7e977f5a 100644 --- a/cmd/cluster/clusterCreate.go +++ b/cmd/cluster/clusterCreate.go @@ -24,7 +24,9 @@ package cluster import ( "fmt" + "io/ioutil" "os" + "path/filepath" "runtime" "strings" "time" @@ -72,12 +74,31 @@ func initConfig() { // Set config file, if specified if configFile != "" { - cfgViper.SetConfigFile(configFile) if _, err := os.Stat(configFile); err != nil { log.Fatalf("Failed to stat config file %s: %+v", configFile, err) } + // create temporary file to expand environment variables in the config without writing that back to the original file + // we're doing it here, because this happens just before absolutely all other processing + tmpfile, err := os.CreateTemp(os.TempDir(), fmt.Sprintf("k3d-config-tmp-%s", filepath.Base(configFile))) + if err != nil { + log.Fatalf("error creating temp copy of configfile %s for variable expansion: %v", configFile, err) + } + defer tmpfile.Close() + + originalcontent, err := ioutil.ReadFile(configFile) + if err != nil { + log.Fatalf("error reading config file %s: %v", configFile, err) + } + expandedcontent := os.ExpandEnv(string(originalcontent)) + if _, err := tmpfile.WriteString(expandedcontent); err != nil { + log.Fatalf("error writing expanded config file contents to temp file %s: %v", tmpfile.Name(), err) + } + + // use temp file with expanded variables + cfgViper.SetConfigFile(tmpfile.Name()) + // try to read config into memory (viper map structure) if err := cfgViper.ReadInConfig(); err != nil { if _, ok := err.(viper.ConfigFileNotFoundError); ok { @@ -96,7 +117,7 @@ func initConfig() { log.Fatalf("Schema Validation failed for config file %s: %+v", configFile, err) } - log.Infof("Using config file %s (%s#%s)", cfgViper.ConfigFileUsed(), strings.ToLower(cfgViper.GetString("apiVersion")), strings.ToLower(cfgViper.GetString("kind"))) + log.Infof("Using config file %s (%s#%s)", configFile, strings.ToLower(cfgViper.GetString("apiVersion")), strings.ToLower(cfgViper.GetString("kind"))) } if log.GetLevel() >= log.DebugLevel { c, _ := yaml.Marshal(cfgViper.AllSettings()) diff --git a/tests/assets/config_test_simple.yaml b/tests/assets/config_test_simple.yaml index 75588924..0d4440fa 100755 --- a/tests/assets/config_test_simple.yaml +++ b/tests/assets/config_test_simple.yaml @@ -8,7 +8,7 @@ kubeAPI: hostPort: "6446" image: rancher/k3s:latest volumes: - - volume: /my/path:/some/path + - volume: $HOME:/some/path nodeFilters: - all ports: From 2092ceaaa9f6d871ef40617db14e12a374613146 Mon Sep 17 00:00:00 2001 From: Thorsten Klein Date: Wed, 16 Jun 2021 15:59:43 +0200 Subject: [PATCH 28/45] [FEATURE] add ability to add ports to an existing loadbalancer (#615) --- go.mod | 1 - go.sum | 1 - pkg/client/node.go | 7 ++ vendor/golang.org/x/sync/AUTHORS | 3 - vendor/golang.org/x/sync/CONTRIBUTORS | 3 - vendor/golang.org/x/sync/LICENSE | 27 -------- vendor/golang.org/x/sync/PATENTS | 22 ------- vendor/golang.org/x/sync/errgroup/errgroup.go | 66 ------------------- vendor/modules.txt | 3 - 9 files changed, 7 insertions(+), 126 deletions(-) delete mode 100644 vendor/golang.org/x/sync/AUTHORS delete mode 100644 vendor/golang.org/x/sync/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/sync/LICENSE delete mode 100644 vendor/golang.org/x/sync/PATENTS delete mode 100644 vendor/golang.org/x/sync/errgroup/errgroup.go diff --git a/go.mod b/go.mod index 6893fcd8..b0d20808 100644 --- a/go.mod +++ b/go.mod @@ -30,7 +30,6 @@ require ( github.com/spf13/viper v1.8.1 github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonschema v1.2.0 - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 // indirect golang.org/x/text v0.3.6 // indirect gopkg.in/yaml.v2 v2.4.0 diff --git a/go.sum b/go.sum index 879f84b4..6f0cbba9 100644 --- a/go.sum +++ b/go.sum @@ -686,7 +686,6 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/pkg/client/node.go b/pkg/client/node.go index f0e00482..3a895ed7 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -34,7 +34,10 @@ import ( "time" copystruct "github.com/mitchellh/copystructure" +<<<<<<< HEAD "gopkg.in/yaml.v2" +======= +>>>>>>> 9a18c3a3 ([FEATURE] add ability to add ports to an existing loadbalancer (#615)) "github.com/docker/go-connections/nat" dockerunits "github.com/docker/go-units" @@ -750,7 +753,11 @@ func NodeReplace(ctx context.Context, runtime runtimes.Runtime, old, new *k3d.No // start new node log.Infof("Starting new node %s...", new.Name) +<<<<<<< HEAD if err := NodeStart(ctx, runtime, new, k3d.NodeStartOpts{Wait: true, NodeHooks: new.HookActions}); err != nil { +======= + if err := NodeStart(ctx, runtime, new, k3d.NodeStartOpts{Wait: true}); err != nil { +>>>>>>> 9a18c3a3 ([FEATURE] add ability to add ports to an existing loadbalancer (#615)) if err := NodeDelete(ctx, runtime, new, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil { return fmt.Errorf("Failed to start new node. Also failed to rollback: %+v", err) } diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/golang.org/x/sync/AUTHORS deleted file mode 100644 index 15167cd7..00000000 --- a/vendor/golang.org/x/sync/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/golang.org/x/sync/CONTRIBUTORS deleted file mode 100644 index 1c4577e9..00000000 --- a/vendor/golang.org/x/sync/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE deleted file mode 100644 index 6a66aea5..00000000 --- a/vendor/golang.org/x/sync/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS deleted file mode 100644 index 73309904..00000000 --- a/vendor/golang.org/x/sync/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go deleted file mode 100644 index 9857fe53..00000000 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package errgroup provides synchronization, error propagation, and Context -// cancelation for groups of goroutines working on subtasks of a common task. -package errgroup - -import ( - "context" - "sync" -) - -// A Group is a collection of goroutines working on subtasks that are part of -// the same overall task. -// -// A zero Group is valid and does not cancel on error. -type Group struct { - cancel func() - - wg sync.WaitGroup - - errOnce sync.Once - err error -} - -// WithContext returns a new Group and an associated Context derived from ctx. -// -// The derived Context is canceled the first time a function passed to Go -// returns a non-nil error or the first time Wait returns, whichever occurs -// first. -func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := context.WithCancel(ctx) - return &Group{cancel: cancel}, ctx -} - -// Wait blocks until all function calls from the Go method have returned, then -// returns the first non-nil error (if any) from them. -func (g *Group) Wait() error { - g.wg.Wait() - if g.cancel != nil { - g.cancel() - } - return g.err -} - -// Go calls the given function in a new goroutine. -// -// The first call to return a non-nil error cancels the group; its error will be -// returned by Wait. -func (g *Group) Go(f func() error) { - g.wg.Add(1) - - go func() { - defer g.wg.Done() - - if err := f(); err != nil { - g.errOnce.Do(func() { - g.err = err - if g.cancel != nil { - g.cancel() - } - }) - } - }() -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 54f233ab..d499b5ef 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -233,9 +233,6 @@ golang.org/x/net/proxy # golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c -## explicit -golang.org/x/sync/errgroup # golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 ## explicit golang.org/x/sys/execabs From 8e29ad4f1a999bbb62a7118f449d77c77b843bf1 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Mon, 28 Jun 2021 11:16:00 +0200 Subject: [PATCH 29/45] add some more portmapping work --- cmd/cluster/clusterCreate.go | 2 +- pkg/client/node.go | 14 +----- pkg/types/loadbalancer.go | 94 ++++++++++++++++++++++++++++++++++++ pkg/types/node.go | 39 +++++++++++++++ pkg/types/types.go | 42 ---------------- pkg/util/filter.go | 23 ++++++--- 6 files changed, 152 insertions(+), 62 deletions(-) create mode 100644 pkg/types/loadbalancer.go create mode 100644 pkg/types/node.go diff --git a/cmd/cluster/clusterCreate.go b/cmd/cluster/clusterCreate.go index 7e977f5a..c238a736 100644 --- a/cmd/cluster/clusterCreate.go +++ b/cmd/cluster/clusterCreate.go @@ -301,7 +301,7 @@ func NewCmdClusterCreate() *cobra.Command { cmd.Flags().StringArrayP("volume", "v", nil, "Mount volumes into the nodes (Format: `[SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -v /my/path@agent[0,1] -v /tmp/test:/tmp/other@server[0]`") _ = ppViper.BindPFlag("cli.volumes", cmd.Flags().Lookup("volume")) - cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d cluster create --agents 2 -p 8080:80@agent[0] -p 8081@agent[1]`") + cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers (via the serverlb) to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d cluster create --agents 2 -p 8080:80@agent[0] -p 8081@agent[1]`") _ = ppViper.BindPFlag("cli.ports", cmd.Flags().Lookup("port")) cmd.Flags().StringArrayP("k3s-node-label", "", nil, "Add label to k3s node (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 --k3s-node-label \"my.label@agent[0,1]\" --k3s-node-label \"other.label=somevalue@server[0]\"`") diff --git a/pkg/client/node.go b/pkg/client/node.go index 3a895ed7..97a2212e 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -386,19 +386,7 @@ func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, c /* global node configuration (applies for any node role) */ // ### Labels ### - labels := make(map[string]string) - for k, v := range k3d.DefaultRuntimeLabels { - labels[k] = v - } - for k, v := range k3d.DefaultRuntimeLabelsVar { - labels[k] = v - } - for k, v := range node.RuntimeLabels { - labels[k] = v - } - node.RuntimeLabels = labels - // second most important: the node role label - node.RuntimeLabels[k3d.LabelRole] = string(node.Role) + node.FillRuntimeLabels() for k, v := range node.K3sNodeLabels { node.Args = append(node.Args, "--node-label", fmt.Sprintf("%s=%s", k, v)) diff --git a/pkg/types/loadbalancer.go b/pkg/types/loadbalancer.go new file mode 100644 index 00000000..4145440c --- /dev/null +++ b/pkg/types/loadbalancer.go @@ -0,0 +1,94 @@ +/* +Copyright © 2020-2021 The k3d Author(s) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ +package types + +/* DESCRIPTION + * The Loadbalancer is a customized NGINX container running side-by-side with the cluster, NOT INSIDE IT. + * It is used to do plain proxying of tcp/udp ports to the k3d node containers. + * One advantage of this approach is, that we can add new ports while the cluster is still running by re-creating + * the loadbalancer and adding the new port config in the NGINX config. As the loadbalancer doesn't hold any state + * (apart from the config file), it can easily be re-created in just a few seconds. + */ + +/* + * Loadbalancer Definition + */ + +type Loadbalancer struct { + Node Node `mapstructure:",squash" yaml:",inline"` // the underlying node + Config LoadbalancerConfig `mapstructure:"config" yaml:"config"` // its configuration +} + +func NewLoadbalancer() *Loadbalancer { + return &Loadbalancer{ + Node: Node{ + Role: LoadBalancerRole, + Image: GetLoadbalancerImage(), + }, + } +} + +/* + * Loadbalancer Configuration + */ + +/* LoadbalancerConfig defines the coarse file structure to configure the k3d-proxy + * Example: + * ports: + * 1234.tcp: + * - k3d-k3s-default-server-0 + * - k3d-k3s-default-server-1 + * 4321.udp: + * - k3d-k3s-default-agent-0 + * - k3d-k3s-default-agent-1 + */ +type LoadbalancerConfig struct { + Ports map[string][]string `yaml:"ports"` + Settings LoadBalancerSettings `yaml:"settings"` +} + +type LoadBalancerSettings struct { + WorkerProcesses int `yaml:"workerProcesses"` +} + +const ( + DefaultLoadbalancerConfigPath = "/etc/confd/values.yaml" + DefaultLoadbalancerWorkerProcesses = 1024 +) + +type LoadbalancerCreateOpts struct { + Labels map[string]string +} + +/* + * Helper Functions + */ + +// HasLoadBalancer returns true if cluster has a loadbalancer node +func (c *Cluster) HasLoadBalancer() bool { + for _, node := range c.Nodes { + if node.Role == LoadBalancerRole { + return true + } + } + return false +} diff --git a/pkg/types/node.go b/pkg/types/node.go new file mode 100644 index 00000000..d1a6c55b --- /dev/null +++ b/pkg/types/node.go @@ -0,0 +1,39 @@ +/* +Copyright © 2020-2021 The k3d Author(s) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ +package types + +func (node *Node) FillRuntimeLabels() { + labels := make(map[string]string) + for k, v := range DefaultRuntimeLabels { + labels[k] = v + } + for k, v := range DefaultRuntimeLabelsVar { + labels[k] = v + } + for k, v := range node.RuntimeLabels { + labels[k] = v + } + node.RuntimeLabels = labels + // second most important: the node role label + node.RuntimeLabels[LabelRole] = string(node.Role) + +} diff --git a/pkg/types/types.go b/pkg/types/types.go index 296d1a84..caff5aad 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -297,16 +297,6 @@ func (c *Cluster) AgentCountRunning() (int, int) { return agentCount, agentsRunning } -// HasLoadBalancer returns true if cluster has a loadbalancer node -func (c *Cluster) HasLoadBalancer() bool { - for _, node := range c.Nodes { - if node.Role == LoadBalancerRole { - return true - } - } - return false -} - type NodeIP struct { IP netaddr.IP Static bool @@ -413,35 +403,3 @@ type RegistryExternal struct { Host string `yaml:"host" json:"host"` Port string `yaml:"port" json:"port"` } - -/* - * Loadbalancer - */ - -/* LoadbalancerConfig defines the coarse file structure to configure the k3d-proxy - * Example: - * ports: - * 1234.tcp: - * - k3d-k3s-default-server-0 - * - k3d-k3s-default-server-1 - * 4321.udp: - * - k3d-k3s-default-agent-0 - * - k3d-k3s-default-agent-1 - */ -type LoadbalancerConfig struct { - Ports map[string][]string `yaml:"ports"` - Settings LoadBalancerSettings `yaml:"settings"` -} - -type LoadBalancerSettings struct { - WorkerProcesses int `yaml:"workerProcesses"` -} - -const ( - DefaultLoadbalancerConfigPath = "/etc/confd/values.yaml" - DefaultLoadbalancerWorkerProcesses = 1024 -) - -type LoadbalancerCreateOpts struct { - Labels map[string]string -} diff --git a/pkg/util/filter.go b/pkg/util/filter.go index 99bcab9f..d9f80a7a 100644 --- a/pkg/util/filter.go +++ b/pkg/util/filter.go @@ -31,15 +31,26 @@ import ( log "github.com/sirupsen/logrus" ) +type NodeFilterSuffix string + +const ( + NodeFilterSuffixNone NodeFilterSuffix = "none" + NodeFilterMapKeyAll = "all" +) + // Regexp pattern to match node filters -var filterRegexp = regexp.MustCompile(`^(?Pserver|agent|loadbalancer|all)(?P\[(?P(?P(\d+,?)+)|(?P\d*:\d*)|(?P\*))\])?$`) +var NodeFilterRegexp = regexp.MustCompile(`^(?Pserver|servers|agent|agents|loadbalancer|all)(?P:(?P(?P(\d+,?)+)|(?P\d*-\d*)|(?P\*)))?(?P:[[:alpha:]]+)?$`) // FilterNodes takes a string filter to return a filtered list of nodes -func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) { +func FilterNodes(nodes []*k3d.Node, filters []string) (map[string][]*k3d.Node, error) { + + result := map[string][]*k3d.Node{ + NodeFilterMapKeyAll: nodes, + } if len(filters) == 0 || len(filters[0]) == 0 { log.Warnln("No node filter specified") - return nodes, nil + return result, nil } // map roles to subsets @@ -64,21 +75,21 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) { for _, filter := range filters { // match regex with capturing groups - match := filterRegexp.FindStringSubmatch(filter) + match := NodeFilterRegexp.FindStringSubmatch(filter) if len(match) == 0 { return nil, fmt.Errorf("Failed to parse node filters: invalid format or empty subset in '%s'", filter) } // map capturing group names to submatches - submatches := MapSubexpNames(filterRegexp.SubexpNames(), match) + submatches := MapSubexpNames(NodeFilterRegexp.SubexpNames(), match) // if one of the filters is 'all', we only return this and drop all others if submatches["group"] == "all" { if len(filters) > 1 { log.Warnf("Node filter 'all' set, but more were specified in '%+v'", filters) } - return nodes, nil + return result, nil } // Choose the group of nodes to operate on From 91db3f647c86b52495595f1dd1b6c2d609c071ff Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Wed, 30 Jun 2021 08:29:13 +0200 Subject: [PATCH 30/45] changes when creating clusters + new nodefilter syntax - generate node names when transforming from simple to cluster config - ClusterCreate(clusterconfig) should have a ready made config and not generate variables - ClusterCreate() only prep LB if not already present (to be removed) - cluster struct: serverloadbalancer is now of type LoadBalancer (Node + Config) - use new nodefilter syntax with 'id:index:suffix' instead of 'id[index]' everywhere - use suffix when creating the LB --- pkg/client/cluster.go | 54 ++++++------ pkg/client/loadbalancer.go | 31 ++++--- pkg/client/node.go | 2 +- pkg/config/config_test.go | 6 +- .../test_assets/config_test_simple.yaml | 6 +- .../config_test_simple_invalid_servers.yaml | 6 +- pkg/config/transform.go | 83 +++++++++++++++---- pkg/types/loadbalancer.go | 5 +- pkg/types/types.go | 2 +- pkg/util/filter.go | 56 +++++++++++-- tests/assets/config_test_simple.yaml | 6 +- ...config_test_simple_migration_v1alpha2.yaml | 2 +- ...config_test_simple_migration_v1alpha3.yaml | 4 +- tests/test_basic.sh | 2 +- tests/test_config_with_overrides.sh | 2 +- 15 files changed, 182 insertions(+), 85 deletions(-) diff --git a/pkg/client/cluster.go b/pkg/client/cluster.go index 27225ed9..6ebb82b8 100644 --- a/pkg/client/cluster.go +++ b/pkg/client/cluster.go @@ -372,12 +372,12 @@ ClusterCreatOpts: clusterCreateOpts.GlobalLabels[k3d.LabelClusterName] = cluster.Name // agent defaults (per cluster) - // connection url is always the name of the first server node (index 0) - connectionURL := fmt.Sprintf("https://%s:%s", generateNodeName(cluster.Name, k3d.ServerRole, 0), k3d.DefaultAPIPort) + // connection url is always the name of the first server node (index 0) // TODO: change this to the server loadbalancer + connectionURL := fmt.Sprintf("https://%s:%s", GenerateNodeName(cluster.Name, k3d.ServerRole, 0), k3d.DefaultAPIPort) clusterCreateOpts.GlobalLabels[k3d.LabelClusterURL] = connectionURL clusterCreateOpts.GlobalEnv = append(clusterCreateOpts.GlobalEnv, fmt.Sprintf("K3S_TOKEN=%s", cluster.Token)) - nodeSetup := func(node *k3d.Node, suffix int) error { + nodeSetup := func(node *k3d.Node) error { // cluster specific settings if node.RuntimeLabels == nil { node.RuntimeLabels = make(map[string]string) // TODO: maybe create an init function? @@ -417,7 +417,6 @@ ClusterCreatOpts: node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", connectionURL)) } - node.Name = generateNodeName(cluster.Name, node.Role, suffix) node.Networks = []string{cluster.Network.Name} node.Restart = true node.GPURequest = clusterCreateOpts.GPURequest @@ -437,8 +436,6 @@ ClusterCreatOpts: // used for node suffices serverCount := 0 - agentCount := 0 - suffix := 0 // create init node first if cluster.InitNode != nil { @@ -457,7 +454,7 @@ ClusterCreatOpts: cluster.InitNode.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding} } - if err := nodeSetup(cluster.InitNode, serverCount); err != nil { + if err := nodeSetup(cluster.InitNode); err != nil { return err } serverCount++ @@ -481,17 +478,11 @@ ClusterCreatOpts: time.Sleep(1 * time.Second) // FIXME: arbitrary wait for one second to avoid race conditions of servers registering - // name suffix - suffix = serverCount serverCount++ - } else if node.Role == k3d.AgentRole { - // name suffix - suffix = agentCount - agentCount++ } if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole { - if err := nodeSetup(node, suffix); err != nil { + if err := nodeSetup(node); err != nil { return err } } @@ -499,7 +490,7 @@ ClusterCreatOpts: // WARN, if there are exactly two server nodes: that means we're using etcd, but don't have fault tolerance if serverCount == 2 { - log.Warnln("You're creating 2 server nodes: Please consider creating at least 3 to achieve quorum & fault tolerance") + log.Warnln("You're creating 2 server nodes: Please consider creating at least 3 to achieve etcd quorum & fault tolerance") } /* @@ -507,19 +498,26 @@ ClusterCreatOpts: */ // *** ServerLoadBalancer *** if !clusterCreateOpts.DisableLoadBalancer { - lbNode, err := LoadbalancerPrepare(ctx, runtime, cluster, &k3d.LoadbalancerCreateOpts{Labels: clusterCreateOpts.GlobalLabels}) - if err != nil { - return err + if cluster.ServerLoadBalancer == nil { + lbNode, err := LoadbalancerPrepare(ctx, runtime, cluster, &k3d.LoadbalancerCreateOpts{Labels: clusterCreateOpts.GlobalLabels}) + if err != nil { + return err + } + cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback } - cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback - lbConfig, err := LoadbalancerGenerateConfig(cluster) - if err != nil { - return fmt.Errorf("error generating loadbalancer config: %v", err) + if len(cluster.ServerLoadBalancer.Config.Ports) == 0 { + lbConfig, err := LoadbalancerGenerateConfig(cluster) + if err != nil { + return fmt.Errorf("error generating loadbalancer config: %v", err) + } + cluster.ServerLoadBalancer.Config = lbConfig } + cluster.ServerLoadBalancer.Node.RuntimeLabels = clusterCreateOpts.GlobalLabels + // prepare to write config to lb container - configyaml, err := yaml.Marshal(lbConfig) + configyaml, err := yaml.Marshal(cluster.ServerLoadBalancer.Config) if err != nil { return err } @@ -534,13 +532,13 @@ ClusterCreatOpts: }, } - lbNode.HookActions = append(lbNode.HookActions, writeLbConfigAction) + cluster.ServerLoadBalancer.Node.HookActions = append(cluster.ServerLoadBalancer.Node.HookActions, writeLbConfigAction) - log.Infof("Creating LoadBalancer '%s'", lbNode.Name) - if err := NodeCreate(ctx, runtime, lbNode, k3d.NodeCreateOpts{}); err != nil { + log.Infof("Creating LoadBalancer '%s'", cluster.ServerLoadBalancer.Node.Name) + if err := NodeCreate(ctx, runtime, cluster.ServerLoadBalancer.Node, k3d.NodeCreateOpts{}); err != nil { return fmt.Errorf("error creating loadbalancer: %v", err) } - log.Debugf("Created loadbalancer '%s'", lbNode.Name) + log.Debugf("Created loadbalancer '%s'", cluster.ServerLoadBalancer.Node.Name) return err } @@ -795,7 +793,7 @@ func GenerateClusterToken() string { return util.GenerateRandomString(20) } -func generateNodeName(cluster string, role k3d.Role, suffix int) string { +func GenerateNodeName(cluster string, role k3d.Role, suffix int) string { return fmt.Sprintf("%s-%s-%s-%d", k3d.DefaultObjectNamePrefix, cluster, role, suffix) } diff --git a/pkg/client/loadbalancer.go b/pkg/client/loadbalancer.go index 84843c0a..dc37d5a6 100644 --- a/pkg/client/loadbalancer.go +++ b/pkg/client/loadbalancer.go @@ -77,18 +77,18 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu } log.Debugf("Writing lb config:\n%s", string(newLbConfigYaml)) startTime := time.Now().Truncate(time.Second).UTC() - if err := runtime.WriteToNode(ctx, newLbConfigYaml, k3d.DefaultLoadbalancerConfigPath, 0744, cluster.ServerLoadBalancer); err != nil { + if err := runtime.WriteToNode(ctx, newLbConfigYaml, k3d.DefaultLoadbalancerConfigPath, 0744, cluster.ServerLoadBalancer.Node); err != nil { return fmt.Errorf("error writing new loadbalancer config to container: %w", err) } successCtx, successCtxCancel := context.WithDeadline(ctx, time.Now().Add(5*time.Second)) defer successCtxCancel() - err = NodeWaitForLogMessage(successCtx, runtime, cluster.ServerLoadBalancer, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], startTime) + err = NodeWaitForLogMessage(successCtx, runtime, cluster.ServerLoadBalancer.Node, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], startTime) if err != nil { if errors.Is(err, context.DeadlineExceeded) { failureCtx, failureCtxCancel := context.WithDeadline(ctx, time.Now().Add(5*time.Second)) defer failureCtxCancel() - err = NodeWaitForLogMessage(failureCtx, runtime, cluster.ServerLoadBalancer, "host not found in upstream", startTime) + err = NodeWaitForLogMessage(failureCtx, runtime, cluster.ServerLoadBalancer.Node, "host not found in upstream", startTime) if err != nil { log.Warnf("Failed to check if the loadbalancer was configured correctly or if it broke. Please check it manually or try again: %v", err) return LBConfigErrFailedTest @@ -101,7 +101,7 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu return LBConfigErrFailedTest } } - log.Infof("Successfully configured loadbalancer %s!", cluster.ServerLoadBalancer.Name) + log.Infof("Successfully configured loadbalancer %s!", cluster.ServerLoadBalancer.Node.Name) time.Sleep(1 * time.Second) // waiting for a second, to avoid issues with too fast lb updates which would screw up the log waits @@ -116,7 +116,7 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste for _, node := range cluster.Nodes { if node.Role == types.LoadBalancerRole { var err error - cluster.ServerLoadBalancer, err = NodeGet(ctx, runtime, node) + cluster.ServerLoadBalancer.Node, err = NodeGet(ctx, runtime, node) if err != nil { return cfg, err } @@ -124,7 +124,7 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste } } - reader, err := runtime.ReadFromNode(ctx, types.DefaultLoadbalancerConfigPath, cluster.ServerLoadBalancer) + reader, err := runtime.ReadFromNode(ctx, types.DefaultLoadbalancerConfigPath, cluster.ServerLoadBalancer.Node) if err != nil { return cfg, err } @@ -162,31 +162,36 @@ func LoadbalancerGenerateConfig(cluster *k3d.Cluster) (k3d.LoadbalancerConfig, e lbConfig.Ports[fmt.Sprintf("%s.tcp", k3d.DefaultAPIPort)] = servers // generate comma-separated list of extra ports to forward // TODO: no default targets? - for exposedPort := range cluster.ServerLoadBalancer.Ports { + for exposedPort := range cluster.ServerLoadBalancer.Node.Ports { // TODO: catch duplicates here? lbConfig.Ports[fmt.Sprintf("%s.%s", exposedPort.Port(), exposedPort.Proto())] = servers } // some additional nginx settings - lbConfig.Settings.WorkerProcesses = k3d.DefaultLoadbalancerWorkerProcesses + len(cluster.ServerLoadBalancer.Ports)*len(servers) + lbConfig.Settings.WorkerProcesses = k3d.DefaultLoadbalancerWorkerProcesses + len(cluster.ServerLoadBalancer.Node.Ports)*len(servers) return lbConfig, nil } func LoadbalancerPrepare(ctx context.Context, runtime runtimes.Runtime, cluster *types.Cluster, opts *k3d.LoadbalancerCreateOpts) (*k3d.Node, error) { + labels := map[string]string{} - if cluster.ServerLoadBalancer.Ports == nil { - cluster.ServerLoadBalancer.Ports = nat.PortMap{} + if opts != nil && opts.Labels == nil && len(opts.Labels) == 0 { + labels = opts.Labels } - cluster.ServerLoadBalancer.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding} + + if cluster.ServerLoadBalancer.Node.Ports == nil { + cluster.ServerLoadBalancer.Node.Ports = nat.PortMap{} + } + cluster.ServerLoadBalancer.Node.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding} // Create LB as a modified node with loadbalancerRole lbNode := &k3d.Node{ Name: fmt.Sprintf("%s-%s-serverlb", k3d.DefaultObjectNamePrefix, cluster.Name), Image: k3d.GetLoadbalancerImage(), - Ports: cluster.ServerLoadBalancer.Ports, + Ports: cluster.ServerLoadBalancer.Node.Ports, Role: k3d.LoadBalancerRole, - RuntimeLabels: opts.Labels, // TODO: createLoadBalancer: add more expressive labels + RuntimeLabels: labels, // TODO: createLoadBalancer: add more expressive labels Networks: []string{cluster.Network.Name}, Restart: true, } diff --git a/pkg/client/node.go b/pkg/client/node.go index 97a2212e..21946798 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -684,7 +684,7 @@ func NodeEdit(ctx context.Context, runtime runtimes.Runtime, existingNode, chang if err != nil { return fmt.Errorf("error updating loadbalancer config: %w", err) } - cluster.ServerLoadBalancer = result + cluster.ServerLoadBalancer.Node = result lbConfig, err := LoadbalancerGenerateConfig(cluster) if err != nil { return fmt.Errorf("error generating loadbalancer config: %v", err) diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index 41d76422..7b0c08da 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -81,13 +81,13 @@ func TestReadSimpleConfig(t *testing.T) { ExtraArgs: []conf.K3sArgWithNodeFilters{ { Arg: "--tls-san=127.0.0.1", - NodeFilters: []string{"server[*]"}, + NodeFilters: []string{"server:*"}, }, }, NodeLabels: []conf.LabelWithNodeFilters{ { Label: "foo=bar", - NodeFilters: []string{"server[0]", "loadbalancer"}, + NodeFilters: []string{"server:0", "loadbalancer"}, }, }, }, @@ -99,7 +99,7 @@ func TestReadSimpleConfig(t *testing.T) { Labels: []conf.LabelWithNodeFilters{ { Label: "foo=bar", - NodeFilters: []string{"server[0]", "loadbalancer"}, + NodeFilters: []string{"server:0", "loadbalancer"}, }, }, }, diff --git a/pkg/config/test_assets/config_test_simple.yaml b/pkg/config/test_assets/config_test_simple.yaml index f8f873cb..9ababa90 100644 --- a/pkg/config/test_assets/config_test_simple.yaml +++ b/pkg/config/test_assets/config_test_simple.yaml @@ -33,11 +33,11 @@ options: extraArgs: - arg: --tls-san=127.0.0.1 nodeFilters: - - "server[*]" + - server:* nodeLabels: - label: foo=bar nodeFilters: - - server[0] + - server:0 - loadbalancer kubeconfig: updateDefaultKubeconfig: true @@ -46,5 +46,5 @@ options: labels: - label: foo=bar nodeFilters: - - server[0] + - server:0 - loadbalancer diff --git a/pkg/config/test_assets/config_test_simple_invalid_servers.yaml b/pkg/config/test_assets/config_test_simple_invalid_servers.yaml index 7b9bc8a0..6c67602c 100644 --- a/pkg/config/test_assets/config_test_simple_invalid_servers.yaml +++ b/pkg/config/test_assets/config_test_simple_invalid_servers.yaml @@ -33,11 +33,11 @@ options: extraArgs: - arg: --tls-san=127.0.0.1 nodeFilters: - - "server[*]" + - "server:*" nodeLabels: - label: foo=bar nodeFilters: - - server[0] + - server:0 - loadbalancer kubeconfig: updateDefaultKubeconfig: true @@ -46,5 +46,5 @@ options: labels: - label: foo=bar nodeFilters: - - server[0] + - server:0 - loadbalancer diff --git a/pkg/config/transform.go b/pkg/config/transform.go index 59e674d9..0d35677b 100644 --- a/pkg/config/transform.go +++ b/pkg/config/transform.go @@ -31,6 +31,7 @@ import ( "github.com/docker/go-connections/nat" cliutil "github.com/rancher/k3d/v4/cmd/util" // TODO: move parseapiport to pkg + "github.com/rancher/k3d/v4/pkg/client" conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" "github.com/rancher/k3d/v4/pkg/runtimes" k3d "github.com/rancher/k3d/v4/pkg/types" @@ -102,8 +103,11 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim newCluster.Nodes = []*k3d.Node{} if !simpleConfig.Options.K3dOptions.DisableLoadbalancer { - newCluster.ServerLoadBalancer = &k3d.Node{ - Role: k3d.LoadBalancerRole, + newCluster.ServerLoadBalancer = k3d.NewLoadbalancer() + var err error + newCluster.ServerLoadBalancer.Node, err = client.LoadbalancerPrepare(ctx, runtime, &newCluster, nil) + if err != nil { + return nil, fmt.Errorf("error preparing the loadbalancer: %w", err) } } else { log.Debugln("Disabling the load balancer") @@ -115,6 +119,7 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim for i := 0; i < simpleConfig.Servers; i++ { serverNode := k3d.Node{ + Name: client.GenerateNodeName(newCluster.Name, k3d.ServerRole, i), Role: k3d.ServerRole, Image: simpleConfig.Image, ServerOpts: k3d.ServerOpts{}, @@ -132,6 +137,7 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim for i := 0; i < simpleConfig.Agents; i++ { agentNode := k3d.Node{ + Name: client.GenerateNodeName(newCluster.Name, k3d.AgentRole, i), Role: k3d.AgentRole, Image: simpleConfig.Image, Memory: simpleConfig.Options.Runtime.AgentsMemory, @@ -148,7 +154,7 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim nodeList := newCluster.Nodes if !simpleConfig.Options.K3dOptions.DisableLoadbalancer { nodeCount++ - nodeList = append(nodeList, newCluster.ServerLoadBalancer) + nodeList = append(nodeList, newCluster.ServerLoadBalancer.Node) } for _, volumeWithNodeFilters := range simpleConfig.Volumes { nodes, err := util.FilterNodes(nodeList, volumeWithNodeFilters.NodeFilters) @@ -167,27 +173,35 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim return nil, fmt.Errorf("Portmapping '%s' lacks a node filter, but there's more than one node", portWithNodeFilters.Port) } - nodes, err := util.FilterNodes(nodeList, portWithNodeFilters.NodeFilters) + x, err := util.FilterNodesWithSuffix(nodeList, portWithNodeFilters.NodeFilters) if err != nil { return nil, err } - for _, node := range nodes { + for suffix, nodes := range x { portmappings, err := nat.ParsePortSpec(portWithNodeFilters.Port) if err != nil { - return nil, fmt.Errorf("Failed to parse port spec '%s': %+v", portWithNodeFilters.Port, err) + return nil, fmt.Errorf("error parsing port spec '%s': %+v", portWithNodeFilters.Port, err) } - if node.Ports == nil { - node.Ports = nat.PortMap{} - } - for _, pm := range portmappings { - if _, exists := node.Ports[pm.Port]; exists { - node.Ports[pm.Port] = append(node.Ports[pm.Port], pm.Binding) - } else { - node.Ports[pm.Port] = []nat.PortBinding{pm.Binding} + if suffix == "proxy" || suffix == util.NodeFilterSuffixNone { // proxy is the default suffix for port mappings + if newCluster.ServerLoadBalancer == nil { + return nil, fmt.Errorf("port-mapping of type 'proxy' specified, but loadbalancer is disabled") + } + if err := addPortMappings(newCluster.ServerLoadBalancer.Node, portmappings); err != nil { + return nil, err + } + for _, pm := range portmappings { + loadbalancerAddPortConfigs(newCluster.ServerLoadBalancer, pm, nodes) + } + } else if suffix == "direct" { + for _, node := range nodes { + if err := addPortMappings(node, portmappings); err != nil { + return nil, err + } } } } + } // -> K3S NODE LABELS @@ -358,3 +372,44 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim return clusterConfig, nil } + +func addPortMappings(node *k3d.Node, portmappings []nat.PortMapping) error { + + if node.Ports == nil { + node.Ports = nat.PortMap{} + } + for _, pm := range portmappings { + if _, exists := node.Ports[pm.Port]; exists { + node.Ports[pm.Port] = append(node.Ports[pm.Port], pm.Binding) + } else { + node.Ports[pm.Port] = []nat.PortBinding{pm.Binding} + } + } + return nil +} + +func loadbalancerAddPortConfigs(loadbalancer *k3d.Loadbalancer, pm nat.PortMapping, nodes []*k3d.Node) error { + portconfig := fmt.Sprintf("%s.%s", pm.Port.Port(), pm.Port.Proto()) + nodenames := []string{} + for _, node := range nodes { + nodenames = append(nodenames, node.Name) + } + + // entry for that port doesn't exist yet, so we simply create it with the list of node names + if _, ok := loadbalancer.Config.Ports[portconfig]; !ok { + loadbalancer.Config.Ports[portconfig] = nodenames + return nil + } + +nodenameLoop: + for _, nodename := range nodenames { + for _, existingNames := range loadbalancer.Config.Ports[portconfig] { + if nodename == existingNames { + continue nodenameLoop + } + loadbalancer.Config.Ports[portconfig] = append(loadbalancer.Config.Ports[portconfig], nodename) + } + } + + return nil +} diff --git a/pkg/types/loadbalancer.go b/pkg/types/loadbalancer.go index 4145440c..158fe61b 100644 --- a/pkg/types/loadbalancer.go +++ b/pkg/types/loadbalancer.go @@ -34,16 +34,17 @@ package types */ type Loadbalancer struct { - Node Node `mapstructure:",squash" yaml:",inline"` // the underlying node + Node *Node `mapstructure:",squash" yaml:",inline"` // the underlying node Config LoadbalancerConfig `mapstructure:"config" yaml:"config"` // its configuration } func NewLoadbalancer() *Loadbalancer { return &Loadbalancer{ - Node: Node{ + Node: &Node{ Role: LoadBalancerRole, Image: GetLoadbalancerImage(), }, + Config: LoadbalancerConfig{Ports: map[string][]string{}}, } } diff --git a/pkg/types/types.go b/pkg/types/types.go index caff5aad..63092502 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -263,7 +263,7 @@ type Cluster struct { InitNode *Node // init server node ExternalDatastore *ExternalDatastore `yaml:"externalDatastore,omitempty" json:"externalDatastore,omitempty"` KubeAPI *ExposureOpts `yaml:"kubeAPI" json:"kubeAPI,omitempty"` - ServerLoadBalancer *Node `yaml:"serverLoadbalancer,omitempty" json:"serverLoadBalancer,omitempty"` + ServerLoadBalancer *Loadbalancer `yaml:"serverLoadbalancer,omitempty" json:"serverLoadBalancer,omitempty"` ImageVolume string `yaml:"imageVolume" json:"imageVolume,omitempty"` } diff --git a/pkg/util/filter.go b/pkg/util/filter.go index d9f80a7a..3ab76ea8 100644 --- a/pkg/util/filter.go +++ b/pkg/util/filter.go @@ -31,26 +31,65 @@ import ( log "github.com/sirupsen/logrus" ) -type NodeFilterSuffix string - const ( - NodeFilterSuffixNone NodeFilterSuffix = "none" - NodeFilterMapKeyAll = "all" + NodeFilterSuffixNone = "nosuffix" + NodeFilterMapKeyAll = "all" ) // Regexp pattern to match node filters var NodeFilterRegexp = regexp.MustCompile(`^(?Pserver|servers|agent|agents|loadbalancer|all)(?P:(?P(?P(\d+,?)+)|(?P\d*-\d*)|(?P\*)))?(?P:[[:alpha:]]+)?$`) -// FilterNodes takes a string filter to return a filtered list of nodes -func FilterNodes(nodes []*k3d.Node, filters []string) (map[string][]*k3d.Node, error) { +// FilterNodesBySuffix properly interprets NodeFilters with suffix +func FilterNodesWithSuffix(nodes []*k3d.Node, nodefilters []string) (map[string][]*k3d.Node, error) { + if len(nodefilters) == 0 || len(nodefilters[0]) == 0 { + return nil, fmt.Errorf("No nodefilters specified") + } result := map[string][]*k3d.Node{ NodeFilterMapKeyAll: nodes, } + for _, nf := range nodefilters { + suffix := NodeFilterSuffixNone + + // match regex with capturing groups + match := NodeFilterRegexp.FindStringSubmatch(nf) + + if len(match) == 0 { + return nil, fmt.Errorf("Failed to parse node filters: invalid format or empty subset in '%s'", nf) + } + + // map capturing group names to submatches + submatches := MapSubexpNames(NodeFilterRegexp.SubexpNames(), match) + + // get suffix + if sf, ok := submatches["suffix"]; ok && sf != "" { + suffix = sf + } + + result[suffix] = make([]*k3d.Node, 0) // init map for this suffix + + filteredNodes, err := FilterNodes(nodes, []string{nf}) + if err != nil { + return nil, err + } + + log.Tracef("Adding %d nodes for suffix >%s< (filter: %s)", len(filteredNodes), suffix, nf) + + result[suffix] = filteredNodes + } + + return result, nil +} + +// FilterNodes takes a string filter to return a filtered list of nodes +func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) { + + log.Tracef("Filtering %d nodes by %s", len(nodes), filters) + if len(filters) == 0 || len(filters[0]) == 0 { log.Warnln("No node filter specified") - return result, nil + return nodes, nil } // map roles to subsets @@ -58,7 +97,6 @@ func FilterNodes(nodes []*k3d.Node, filters []string) (map[string][]*k3d.Node, e agentNodes := []*k3d.Node{} var serverlb *k3d.Node for _, node := range nodes { - log.Tracef("FilterNodes (%+v): Checking node role %s", filters, node.Role) if node.Role == k3d.ServerRole { serverNodes = append(serverNodes, node) } else if node.Role == k3d.AgentRole { @@ -89,7 +127,7 @@ func FilterNodes(nodes []*k3d.Node, filters []string) (map[string][]*k3d.Node, e if len(filters) > 1 { log.Warnf("Node filter 'all' set, but more were specified in '%+v'", filters) } - return result, nil + return nodes, nil } // Choose the group of nodes to operate on diff --git a/tests/assets/config_test_simple.yaml b/tests/assets/config_test_simple.yaml index 0d4440fa..67c3fd6d 100755 --- a/tests/assets/config_test_simple.yaml +++ b/tests/assets/config_test_simple.yaml @@ -41,11 +41,11 @@ options: extraArgs: - arg: --tls-san=127.0.0.1 nodeFilters: - - server[*] + - server:* nodeLabels: - label: foo=bar nodeFilters: - - server[0] + - server:0 - loadbalancer kubeconfig: updateDefaultKubeconfig: true @@ -54,5 +54,5 @@ options: labels: - label: foo=bar nodeFilters: - - server[0] + - server:0 - loadbalancer diff --git a/tests/assets/config_test_simple_migration_v1alpha2.yaml b/tests/assets/config_test_simple_migration_v1alpha2.yaml index 4d9f7255..26140674 100755 --- a/tests/assets/config_test_simple_migration_v1alpha2.yaml +++ b/tests/assets/config_test_simple_migration_v1alpha2.yaml @@ -25,7 +25,7 @@ env: labels: - label: foo=bar nodeFilters: - - server[0] + - server:0 - loadbalancer registries: create: true diff --git a/tests/assets/config_test_simple_migration_v1alpha3.yaml b/tests/assets/config_test_simple_migration_v1alpha3.yaml index 98eccbde..f2203186 100755 --- a/tests/assets/config_test_simple_migration_v1alpha3.yaml +++ b/tests/assets/config_test_simple_migration_v1alpha3.yaml @@ -41,7 +41,7 @@ options: extraArgs: - arg: --tls-san=127.0.0.1 nodeFilters: - - server[*] + - server:* kubeconfig: updateDefaultKubeconfig: true switchCurrentContext: true @@ -49,5 +49,5 @@ options: labels: - label: foo=bar nodeFilters: - - server[0] + - server:0 - loadbalancer diff --git a/tests/test_basic.sh b/tests/test_basic.sh index 5a09aba7..8969c410 100755 --- a/tests/test_basic.sh +++ b/tests/test_basic.sh @@ -9,7 +9,7 @@ source "$CURR_DIR/common.sh" export CURRENT_STAGE="Test | basic" info "Creating two clusters..." -$EXE cluster create c1 --wait --timeout 60s --api-port 6443 --env 'TEST_VAR=user\@pass\\@server[0]' || failed "could not create cluster c1" +$EXE cluster create c1 --wait --timeout 60s --api-port 6443 --env 'TEST_VAR=user\@pass\\@server:0' || failed "could not create cluster c1" $EXE cluster create c2 --wait --timeout 60s || failed "could not create cluster c2" info "Checking that we can get both clusters..." diff --git a/tests/test_config_with_overrides.sh b/tests/test_config_with_overrides.sh index 6b705a06..8269a326 100755 --- a/tests/test_config_with_overrides.sh +++ b/tests/test_config_with_overrides.sh @@ -21,7 +21,7 @@ clustername="cfgoverridetest" highlight "[START] Config With Override $EXTRA_TITLE" info "Creating cluster $clustername..." -$EXE cluster create "$clustername" --config "$CURR_DIR/assets/config_test_simple.yaml" --servers 4 -v /tmp/test:/tmp/test@loadbalancer --registry-create=false --env "x=y@agent[1]" $EXTRA_FLAG || failed "could not create cluster $clustername $EXTRA_TITLE" +$EXE cluster create "$clustername" --config "$CURR_DIR/assets/config_test_simple.yaml" --servers 4 -v /tmp/test:/tmp/test@loadbalancer --registry-create=false --env "x=y@agent:1" $EXTRA_FLAG || failed "could not create cluster $clustername $EXTRA_TITLE" info "Sleeping for 5 seconds to give the cluster enough time to get ready..." sleep 5 From edfd54c6d5ab8c45763c60559814fe9835458f39 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Wed, 30 Jun 2021 09:41:57 +0200 Subject: [PATCH 31/45] remove debug logs for merged kubeconfig as they add too much noise --- pkg/client/kubeconfig.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/pkg/client/kubeconfig.go b/pkg/client/kubeconfig.go index a016f153..cd47b6ee 100644 --- a/pkg/client/kubeconfig.go +++ b/pkg/client/kubeconfig.go @@ -278,13 +278,6 @@ func KubeconfigMerge(ctx context.Context, newKubeConfig *clientcmdapi.Config, ex existingKubeConfig.CurrentContext = newKubeConfig.CurrentContext } - kubeconfigYaml, err := clientcmd.Write(*existingKubeConfig) - if err != nil { - log.Debugf("Merged Kubeconfig:\n%+v", existingKubeConfig) - } else { - log.Tracef("Merged Kubeconfig:\n%s", kubeconfigYaml) - } - return KubeconfigWrite(ctx, existingKubeConfig, outPath) } From d41b49d4235a3d01a0710edf34afa10876901659 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 6 Jul 2021 13:10:02 +0200 Subject: [PATCH 32/45] document using new nodefilter syntax with : instead of [] for indexing --- cmd/cluster/clusterCreate.go | 12 ++++++------ cmd/debug/debug.go | 2 +- docs/usage/commands/k3d_cluster_create.md | 10 +++++----- docs/usage/configfile.md | 10 +++++----- docs/usage/guides/exposing_services.md | 4 ++-- pkg/config/v1alpha3/migrations.go | 4 ++-- pkg/config/v1alpha3/schema.json | 2 +- 7 files changed, 22 insertions(+), 22 deletions(-) diff --git a/cmd/cluster/clusterCreate.go b/cmd/cluster/clusterCreate.go index c238a736..543084d7 100644 --- a/cmd/cluster/clusterCreate.go +++ b/cmd/cluster/clusterCreate.go @@ -295,23 +295,23 @@ func NewCmdClusterCreate() *cobra.Command { cmd.Flags().String("api-port", "", "Specify the Kubernetes API server port exposed on the LoadBalancer (Format: `[HOST:]HOSTPORT`)\n - Example: `k3d cluster create --servers 3 --api-port 0.0.0.0:6550`") _ = ppViper.BindPFlag("cli.api-port", cmd.Flags().Lookup("api-port")) - cmd.Flags().StringArrayP("env", "e", nil, "Add environment variables to nodes (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -e \"HTTP_PROXY=my.proxy.com@server[0]\" -e \"SOME_KEY=SOME_VAL@server[0]\"`") + cmd.Flags().StringArrayP("env", "e", nil, "Add environment variables to nodes (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -e \"HTTP_PROXY=my.proxy.com@server:0\" -e \"SOME_KEY=SOME_VAL@server:0\"`") _ = ppViper.BindPFlag("cli.env", cmd.Flags().Lookup("env")) - cmd.Flags().StringArrayP("volume", "v", nil, "Mount volumes into the nodes (Format: `[SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -v /my/path@agent[0,1] -v /tmp/test:/tmp/other@server[0]`") + cmd.Flags().StringArrayP("volume", "v", nil, "Mount volumes into the nodes (Format: `[SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -v /my/path@agent[0,1] -v /tmp/test:/tmp/other@server:0`") _ = ppViper.BindPFlag("cli.volumes", cmd.Flags().Lookup("volume")) - cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers (via the serverlb) to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d cluster create --agents 2 -p 8080:80@agent[0] -p 8081@agent[1]`") + cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers (via the serverlb) to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d cluster create --agents 2 -p 8080:80@agent:0 -p 8081@agent[1]`") _ = ppViper.BindPFlag("cli.ports", cmd.Flags().Lookup("port")) - cmd.Flags().StringArrayP("k3s-node-label", "", nil, "Add label to k3s node (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 --k3s-node-label \"my.label@agent[0,1]\" --k3s-node-label \"other.label=somevalue@server[0]\"`") + cmd.Flags().StringArrayP("k3s-node-label", "", nil, "Add label to k3s node (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 --k3s-node-label \"my.label@agent[0,1]\" --k3s-node-label \"other.label=somevalue@server:0\"`") _ = ppViper.BindPFlag("cli.k3s-node-labels", cmd.Flags().Lookup("k3s-node-label")) - cmd.Flags().StringArrayP("runtime-label", "", nil, "Add label to container runtime (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 --runtime-label \"my.label@agent[0,1]\" --runtime-label \"other.label=somevalue@server[0]\"`") + cmd.Flags().StringArrayP("runtime-label", "", nil, "Add label to container runtime (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 --runtime-label \"my.label@agent[0,1]\" --runtime-label \"other.label=somevalue@server:0\"`") _ = ppViper.BindPFlag("cli.runtime-labels", cmd.Flags().Lookup("runtime-label")) /* k3s */ - cmd.Flags().StringArray("k3s-arg", nil, "Additional args passed to k3s command (Format: `ARG@NODEFILTER[;@NODEFILTER]`)\n - Example: `k3d cluster create --k3s-arg \"--disable=traefik@server[0]\"") + cmd.Flags().StringArray("k3s-arg", nil, "Additional args passed to k3s command (Format: `ARG@NODEFILTER[;@NODEFILTER]`)\n - Example: `k3d cluster create --k3s-arg \"--disable=traefik@server:0\"") _ = cfgViper.BindPFlag("cli.k3sargs", cmd.Flags().Lookup("k3s-arg")) /****************** diff --git a/cmd/debug/debug.go b/cmd/debug/debug.go index c524790b..19a58b6a 100644 --- a/cmd/debug/debug.go +++ b/cmd/debug/debug.go @@ -67,7 +67,7 @@ func NewCmdDebugLoadbalancer() *cobra.Command { } cmd.AddCommand(&cobra.Command{ - Use: "get-config", + Use: "get-config CLUSTERNAME", Args: cobra.ExactArgs(1), // cluster name Run: func(cmd *cobra.Command, args []string) { c, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &types.Cluster{Name: args[0]}) diff --git a/docs/usage/commands/k3d_cluster_create.md b/docs/usage/commands/k3d_cluster_create.md index f784462d..e0969db9 100644 --- a/docs/usage/commands/k3d_cluster_create.md +++ b/docs/usage/commands/k3d_cluster_create.md @@ -25,23 +25,23 @@ k3d cluster create NAME [flags] - Example: `k3d cluster create --servers 3 --api-port 0.0.0.0:6550` -c, --config string Path of a config file to use -e, --env KEY[=VALUE][@NODEFILTER[;NODEFILTER...]] Add environment variables to nodes (Format: KEY[=VALUE][@NODEFILTER[;NODEFILTER...]] - - Example: `k3d cluster create --agents 2 -e "HTTP_PROXY=my.proxy.com@server[0]" -e "SOME_KEY=SOME_VAL@server[0]"` + - Example: `k3d cluster create --agents 2 -e "HTTP_PROXY=my.proxy.com@server:0" -e "SOME_KEY=SOME_VAL@server:0"` --gpus string GPU devices to add to the cluster node containers ('all' to pass all GPUs) [From docker] -h, --help help for create -i, --image string Specify k3s image that you want to use for the nodes --k3s-arg ARG@NODEFILTER[;@NODEFILTER] Additional args passed to k3s command (Format: ARG@NODEFILTER[;@NODEFILTER]) - - Example: `k3d cluster create --k3s-arg "--disable=traefik@server[0]" + - Example: `k3d cluster create --k3s-arg "--disable=traefik@server:0" --kubeconfig-switch-context Directly switch the default kubeconfig's current-context to the new cluster's context (requires --kubeconfig-update-default) (default true) --kubeconfig-update-default Directly update the default kubeconfig with the new cluster's context (default true) -l, --label KEY[=VALUE][@NODEFILTER[;NODEFILTER...]] Add label to node container (Format: KEY[=VALUE][@NODEFILTER[;NODEFILTER...]] - - Example: `k3d cluster create --agents 2 -l "my.label@agent[0,1]" -l "other.label=somevalue@server[0]"` + - Example: `k3d cluster create --agents 2 -l "my.label@agent[0,1]" -l "other.label=somevalue@server:0"` --network string Join an existing network --no-hostip Disable the automatic injection of the Host IP as 'host.k3d.internal' into the containers and CoreDNS --no-image-volume Disable the creation of a volume for importing images --no-lb Disable the creation of a LoadBalancer in front of the server nodes --no-rollback Disable the automatic rollback actions, if anything goes wrong -p, --port [HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER] Map ports from the node containers to the host (Format: [HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]) - - Example: `k3d cluster create --agents 2 -p 8080:80@agent[0] -p 8081@agent[1]` + - Example: `k3d cluster create --agents 2 -p 8080:80@agent:0 -p 8081@agent[1]` --registry-config string Specify path to an extra registries.yaml file --registry-create Create a k3d-managed registry and connect it to the cluster --registry-use stringArray Connect to one or more k3d-managed registries running locally @@ -51,7 +51,7 @@ k3d cluster create NAME [flags] --timeout duration Rollback changes if cluster couldn't be created in specified duration. --token string Specify a cluster token. By default, we generate one. -v, --volume [SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]] Mount volumes into the nodes (Format: [SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]] - - Example: `k3d cluster create --agents 2 -v /my/path@agent[0,1] -v /tmp/test:/tmp/other@server[0]` + - Example: `k3d cluster create --agents 2 -v /my/path@agent[0,1] -v /tmp/test:/tmp/other@server:0` --wait Wait for the server(s) to be ready before returning. Use '--timeout DURATION' to not wait forever. (default true) ``` diff --git a/docs/usage/configfile.md b/docs/usage/configfile.md index 97fa45c0..4f5178fd 100644 --- a/docs/usage/configfile.md +++ b/docs/usage/configfile.md @@ -19,7 +19,7 @@ Using a config file is as easy as putting it in a well-known place in your file - All options in config file: `k3d cluster create --config /home/me/my-awesome-config.yaml` (must be `.yaml`/`.yml`) - With CLI override (name): `k3d cluster create somename --config /home/me/my-awesome-config.yaml` -- With CLI override (extra volume): `k3d cluster create --config /home/me/my-awesome-config.yaml --volume '/some/path:/some:path@server[0]'` +- With CLI override (extra volume): `k3d cluster create --config /home/me/my-awesome-config.yaml --volume '/some/path:/some:path@server:0'` ## Required Fields @@ -64,9 +64,9 @@ image: rancher/k3s:v1.20.4-k3s1 # same as `--image rancher/k3s:v1.20.4-k3s1` network: my-custom-net # same as `--network my-custom-net` token: superSecretToken # same as `--token superSecretToken` volumes: # repeatable flags are represented as YAML lists - - volume: /my/host/path:/path/in/node # same as `--volume '/my/host/path:/path/in/node@server[0];agent[*]'` + - volume: /my/host/path:/path/in/node # same as `--volume '/my/host/path:/path/in/node@server:0;agent[*]'` nodeFilters: - - server[0] + - server:0 - agent[*] ports: - port: 8080:80 # same as `--port '8080:80@loadbalancer'` @@ -77,9 +77,9 @@ labels: nodeFilters: - agent[1] env: - - envVar: bar=baz # same as `--env 'bar=baz@server[0]'` + - envVar: bar=baz # same as `--env 'bar=baz@server:0'` nodeFilters: - - server[0] + - server:0 registries: # define how registries should be created or used create: true # creates a default registry to be used with the cluster; same as `--registry-create` use: diff --git a/docs/usage/guides/exposing_services.md b/docs/usage/guides/exposing_services.md index 07b4cc7b..33b0d379 100644 --- a/docs/usage/guides/exposing_services.md +++ b/docs/usage/guides/exposing_services.md @@ -62,10 +62,10 @@ Therefore, we have to create the cluster in a way, that the internal port 80 (wh 1. Create a cluster, mapping the port `30080` from `agent-0` to `localhost:8082` - `#!bash k3d cluster create mycluster -p "8082:30080@agent[0]" --agents 2` + `#!bash k3d cluster create mycluster -p "8082:30080@agent:0" --agents 2` - **Note 1**: Kubernetes' default NodePort range is [`30000-32767`](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport) - - **Note 2**: You may as well expose the whole NodePort range from the very beginning, e.g. via `k3d cluster create mycluster --agents 3 -p "30000-32767:30000-32767@server[0]"` (See [this video from @portainer](https://www.youtube.com/watch?v=5HaU6338lAk)) + - **Note 2**: You may as well expose the whole NodePort range from the very beginning, e.g. via `k3d cluster create mycluster --agents 3 -p "30000-32767:30000-32767@server:0"` (See [this video from @portainer](https://www.youtube.com/watch?v=5HaU6338lAk)) - **Warning**: Docker creates iptable entries and a new proxy process per port-mapping, so this may take a very long time or even freeze your system! ... (Steps 2 and 3 like above) ... diff --git a/pkg/config/v1alpha3/migrations.go b/pkg/config/v1alpha3/migrations.go index fd1bc6dc..b1068de3 100644 --- a/pkg/config/v1alpha3/migrations.go +++ b/pkg/config/v1alpha3/migrations.go @@ -64,7 +64,7 @@ func MigrateV1Alpha2(input configtypes.Config) (configtypes.Config, error) { cfg.Options.K3sOptions.ExtraArgs = append(cfg.Options.K3sOptions.ExtraArgs, K3sArgWithNodeFilters{ Arg: arg, NodeFilters: []string{ - "server[*]", + "server:*", }, }) } @@ -73,7 +73,7 @@ func MigrateV1Alpha2(input configtypes.Config) (configtypes.Config, error) { cfg.Options.K3sOptions.ExtraArgs = append(cfg.Options.K3sOptions.ExtraArgs, K3sArgWithNodeFilters{ Arg: arg, NodeFilters: []string{ - "agent[*]", + "agent:*", }, }) } diff --git a/pkg/config/v1alpha3/schema.json b/pkg/config/v1alpha3/schema.json index 1deb0060..f04bf00e 100644 --- a/pkg/config/v1alpha3/schema.json +++ b/pkg/config/v1alpha3/schema.json @@ -257,7 +257,7 @@ "examples": [ "loadbalancer", "server[*]", - "server[0]", + "server:0", "agent[1]", "all" ] From a8a643eed5fbcb50a2a41d571caa533ace5ebdaa Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 6 Jul 2021 13:11:48 +0200 Subject: [PATCH 33/45] tests/e2e: add test for loadbalancer --- tests/test_loadbalancer.sh | 46 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100755 tests/test_loadbalancer.sh diff --git a/tests/test_loadbalancer.sh b/tests/test_loadbalancer.sh new file mode 100755 index 00000000..9fbde168 --- /dev/null +++ b/tests/test_loadbalancer.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +[ -d "$CURR_DIR" ] || { echo "FATAL: no current dir (maybe running in zsh?)"; exit 1; } + +# shellcheck source=./common.sh +source "$CURR_DIR/common.sh" + +export CURRENT_STAGE="Test | Loadbalancer" + +highlight "[START] LoadbalancerTest $EXTRA_TITLE" + +function check_container_port() { + # $1 = container name + # $2 = wanted port + exists=$(docker inspect "$1" --format '{{ range $k, $_ := .NetworkSettings.Ports }}{{ if eq $k "'"$2"'" }}true{{ end }}{{ end }}') + if [[ $exists == "true" ]]; then + return 0 + else + docker inspect "$1" --format '{{ range $k, $_ := .NetworkSettings.Ports }}{{ printf "%s\n" $k }}{{ end }}' + return 1 + fi +} + +clustername="lbtest" + +info "Creating cluster $clustername..." +$EXE cluster create $clustername --timeout 360s --agents 1 -p 8080:80@server:0:proxy -p 1234:4321/tcp@agent:0:direct || failed "could not create cluster $clustername" + +info "Checking we have access to the cluster..." +check_clusters "$clustername" || failed "error checking cluster" + +info "Checking Container Ports..." +check_container_port k3d-$clustername-serverlb "6443/tcp" || failed "6443/tcp not on serverlb" +check_container_port k3d-$clustername-serverlb "80/tcp" || failed "80/tcp not on serverlb" +check_container_port k3d-$clustername-agent-0 "4321/tcp" || failed "4321/tcp not on agent-0" + +info "Checking Loadbalancer Config..." +$EXE debug loadbalancer get-config $clustername | grep -A1 "80.tcp" | grep "k3d-$clustername-server-0" || failed "port 80.tcp not configured for server-0" + +info "Deleting clusters..." +$EXE cluster delete $clustername || failed "could not delete the cluster $clustername" + +exit 0 + + From fb1c45b9ae91ea5c94e53fb5b39e62de1aeda47a Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 6 Jul 2021 13:13:18 +0200 Subject: [PATCH 34/45] client/cluster:make injecting the coredns entry for host.k3d.internal more robust --- pkg/client/cluster.go | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/pkg/client/cluster.go b/pkg/client/cluster.go index 6ebb82b8..b387d722 100644 --- a/pkg/client/cluster.go +++ b/pkg/client/cluster.go @@ -26,6 +26,7 @@ import ( _ "embed" "errors" "fmt" + "io/ioutil" "sort" "strconv" "time" @@ -511,7 +512,7 @@ ClusterCreatOpts: if err != nil { return fmt.Errorf("error generating loadbalancer config: %v", err) } - cluster.ServerLoadBalancer.Config = lbConfig + cluster.ServerLoadBalancer.Config = &lbConfig } cluster.ServerLoadBalancer.Node.RuntimeLabels = clusterCreateOpts.GlobalLabels @@ -956,9 +957,29 @@ func prepInjectHostIP(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.C hostRecordSuccessMessage += fmt.Sprintf("Successfully added host record to /etc/hosts in %d/%d nodes", (len(cluster.Nodes) - etcHostsFailureCount), len(cluster.Nodes)) } - patchCmd := `test=$(kubectl get cm coredns -n kube-system --template='{{.data.NodeHosts}}' | sed -n -E -e '/[0-9\.]{4,12}\s+host\.k3d\.internal$/!p' -e '$a` + hostsEntry + `' | tr '\n' '^' | busybox xargs -0 printf '{"data": {"NodeHosts":"%s"}}'| sed -E 's%\^%\\n%g') && kubectl patch cm coredns -n kube-system -p="$test"` - if err = runtime.ExecInNode(ctx, cluster.Nodes[0], []string{"sh", "-c", patchCmd}); err != nil { - log.Warnf("Failed to patch CoreDNS ConfigMap to include entry '%s': %+v", hostsEntry, err) + patchCmd := `patch=$(kubectl get cm coredns -n kube-system --template='{{.data.NodeHosts}}' | sed -n -E -e '/[0-9\.]{4,12}\s+host\.k3d\.internal$/!p' -e '$a` + hostsEntry + `' | tr '\n' '^' | busybox xargs -0 printf '{"data": {"NodeHosts":"%s"}}'| sed -E 's%\^%\\n%g') && kubectl patch cm coredns -n kube-system -p="$patch"` + successInjectCoreDNSEntry := false + for _, node := range cluster.Nodes { + + if node.Role == k3d.AgentRole || node.Role == k3d.ServerRole { + logreader, err := runtime.ExecInNodeGetLogs(ctx, node, []string{"sh", "-c", patchCmd}) + if err == nil { + successInjectCoreDNSEntry = true + break + } else { + msg := fmt.Sprintf("error patching the CoreDNS ConfigMap to include entry '%s': %+v", hostsEntry, err) + readlogs, err := ioutil.ReadAll(logreader) + if err != nil { + log.Debugf("error reading the logs from failed CoreDNS patch exec process in node %s: %v", node.Name, err) + } else { + msg += fmt.Sprintf("\nLogs: %s", string(readlogs)) + } + log.Debugln(msg) + } + } + } + if successInjectCoreDNSEntry == false { + log.Warnf("Failed to patch CoreDNS ConfigMap to include entry '%s' (see debug logs)", hostsEntry) } else { hostRecordSuccessMessage += " and to the CoreDNS ConfigMap" } From 8fef6aee09c729c5042f54a6cedf982675882e75 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 6 Jul 2021 13:14:05 +0200 Subject: [PATCH 35/45] fix usage of the new loadbalancer type and ordering when cluster is created --- pkg/client/loadbalancer.go | 9 ++++++++- pkg/config/transform.go | 16 ++++++++++------ pkg/runtimes/docker/translate.go | 3 +-- pkg/types/loadbalancer.go | 11 ++++++++--- pkg/util/filter.go | 2 +- 5 files changed, 28 insertions(+), 13 deletions(-) diff --git a/pkg/client/loadbalancer.go b/pkg/client/loadbalancer.go index dc37d5a6..fe1989b4 100644 --- a/pkg/client/loadbalancer.go +++ b/pkg/client/loadbalancer.go @@ -112,7 +112,8 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste var cfg k3d.LoadbalancerConfig - if cluster.ServerLoadBalancer == nil { + if cluster.ServerLoadBalancer == nil || cluster.ServerLoadBalancer.Node == nil { + cluster.ServerLoadBalancer = &k3d.Loadbalancer{} for _, node := range cluster.Nodes { if node.Role == types.LoadBalancerRole { var err error @@ -185,6 +186,12 @@ func LoadbalancerPrepare(ctx context.Context, runtime runtimes.Runtime, cluster } cluster.ServerLoadBalancer.Node.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding} + if cluster.ServerLoadBalancer.Config == nil { + cluster.ServerLoadBalancer.Config = &k3d.LoadbalancerConfig{ + Ports: map[string][]string{}, + } + } + // Create LB as a modified node with loadbalancerRole lbNode := &k3d.Node{ Name: fmt.Sprintf("%s-%s-serverlb", k3d.DefaultObjectNamePrefix, cluster.Name), diff --git a/pkg/config/transform.go b/pkg/config/transform.go index 0d35677b..417869c8 100644 --- a/pkg/config/transform.go +++ b/pkg/config/transform.go @@ -61,6 +61,9 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim if simpleConfig.Network != "" { clusterNetwork.Name = simpleConfig.Network clusterNetwork.External = true + } else { + clusterNetwork.Name = fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, simpleConfig.Name) + clusterNetwork.External = false } if simpleConfig.Subnet != "" { @@ -109,6 +112,7 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim if err != nil { return nil, fmt.Errorf("error preparing the loadbalancer: %w", err) } + newCluster.Nodes = append(newCluster.Nodes, newCluster.ServerLoadBalancer.Node) } else { log.Debugln("Disabling the load balancer") } @@ -133,6 +137,8 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim } newCluster.Nodes = append(newCluster.Nodes, &serverNode) + + newCluster.ServerLoadBalancer.Config.Ports[fmt.Sprintf("%s.tcp", k3d.DefaultAPIPort)] = append(newCluster.ServerLoadBalancer.Config.Ports[fmt.Sprintf("%s.tcp", k3d.DefaultAPIPort)], serverNode.Name) } for i := 0; i < simpleConfig.Agents; i++ { @@ -150,12 +156,8 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim ****************************/ // -> VOLUMES - nodeCount := simpleConfig.Servers + simpleConfig.Agents + nodeCount := len(newCluster.Nodes) nodeList := newCluster.Nodes - if !simpleConfig.Options.K3dOptions.DisableLoadbalancer { - nodeCount++ - nodeList = append(nodeList, newCluster.ServerLoadBalancer.Node) - } for _, volumeWithNodeFilters := range simpleConfig.Volumes { nodes, err := util.FilterNodes(nodeList, volumeWithNodeFilters.NodeFilters) if err != nil { @@ -191,7 +193,9 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim return nil, err } for _, pm := range portmappings { - loadbalancerAddPortConfigs(newCluster.ServerLoadBalancer, pm, nodes) + if err := loadbalancerAddPortConfigs(newCluster.ServerLoadBalancer, pm, nodes); err != nil { + return nil, err + } } } else if suffix == "direct" { for _, node := range nodes { diff --git a/pkg/runtimes/docker/translate.go b/pkg/runtimes/docker/translate.go index 564b0f6f..672061bf 100644 --- a/pkg/runtimes/docker/translate.go +++ b/pkg/runtimes/docker/translate.go @@ -145,8 +145,7 @@ func TranslateNodeToContainer(node *k3d.Node) (*NodeInDocker, error) { if len(node.Networks) > 0 { netInfo, err := GetNetwork(context.Background(), node.Networks[0]) // FIXME: only considering first network here, as that's the one k3d creates for a cluster if err != nil { - log.Warnln("Failed to get network information") - log.Warnln(err) + log.Warnf("Failed to get network information: %v", err) } else if netInfo.Driver == "host" { hostConfig.NetworkMode = "host" } diff --git a/pkg/types/loadbalancer.go b/pkg/types/loadbalancer.go index 158fe61b..caaba800 100644 --- a/pkg/types/loadbalancer.go +++ b/pkg/types/loadbalancer.go @@ -34,8 +34,8 @@ package types */ type Loadbalancer struct { - Node *Node `mapstructure:",squash" yaml:",inline"` // the underlying node - Config LoadbalancerConfig `mapstructure:"config" yaml:"config"` // its configuration + Node *Node `mapstructure:",squash" yaml:",inline"` // the underlying node + Config *LoadbalancerConfig `mapstructure:"config" yaml:"config"` // its configuration } func NewLoadbalancer() *Loadbalancer { @@ -44,7 +44,12 @@ func NewLoadbalancer() *Loadbalancer { Role: LoadBalancerRole, Image: GetLoadbalancerImage(), }, - Config: LoadbalancerConfig{Ports: map[string][]string{}}, + Config: &LoadbalancerConfig{ + Ports: map[string][]string{}, + Settings: LoadBalancerSettings{ + WorkerProcesses: DefaultLoadbalancerWorkerProcesses, + }, + }, } } diff --git a/pkg/util/filter.go b/pkg/util/filter.go index 3ab76ea8..7df75314 100644 --- a/pkg/util/filter.go +++ b/pkg/util/filter.go @@ -37,7 +37,7 @@ const ( ) // Regexp pattern to match node filters -var NodeFilterRegexp = regexp.MustCompile(`^(?Pserver|servers|agent|agents|loadbalancer|all)(?P:(?P(?P(\d+,?)+)|(?P\d*-\d*)|(?P\*)))?(?P:[[:alpha:]]+)?$`) +var NodeFilterRegexp = regexp.MustCompile(`^(?Pserver|servers|agent|agents|loadbalancer|all)(?P:(?P(?P(\d+,?)+)|(?P\d*-\d*)|(?P\*)))?(?P:(?P[[:alpha:]]+))?$`) // FilterNodesBySuffix properly interprets NodeFilters with suffix func FilterNodesWithSuffix(nodes []*k3d.Node, nodefilters []string) (map[string][]*k3d.Node, error) { From 6f1f58a2c2eb5254348675cca1167df91f773735 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 6 Jul 2021 13:26:50 +0200 Subject: [PATCH 36/45] fix missing merge conflict resolutions --- go.mod | 1 + go.sum | 1 + pkg/client/node.go | 7 ------- vendor/modules.txt | 3 +++ 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index b0d20808..6893fcd8 100644 --- a/go.mod +++ b/go.mod @@ -30,6 +30,7 @@ require ( github.com/spf13/viper v1.8.1 github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonschema v1.2.0 + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 // indirect golang.org/x/text v0.3.6 // indirect gopkg.in/yaml.v2 v2.4.0 diff --git a/go.sum b/go.sum index 6f0cbba9..879f84b4 100644 --- a/go.sum +++ b/go.sum @@ -686,6 +686,7 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/pkg/client/node.go b/pkg/client/node.go index 21946798..aae7bc46 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -34,10 +34,7 @@ import ( "time" copystruct "github.com/mitchellh/copystructure" -<<<<<<< HEAD "gopkg.in/yaml.v2" -======= ->>>>>>> 9a18c3a3 ([FEATURE] add ability to add ports to an existing loadbalancer (#615)) "github.com/docker/go-connections/nat" dockerunits "github.com/docker/go-units" @@ -741,11 +738,7 @@ func NodeReplace(ctx context.Context, runtime runtimes.Runtime, old, new *k3d.No // start new node log.Infof("Starting new node %s...", new.Name) -<<<<<<< HEAD if err := NodeStart(ctx, runtime, new, k3d.NodeStartOpts{Wait: true, NodeHooks: new.HookActions}); err != nil { -======= - if err := NodeStart(ctx, runtime, new, k3d.NodeStartOpts{Wait: true}); err != nil { ->>>>>>> 9a18c3a3 ([FEATURE] add ability to add ports to an existing loadbalancer (#615)) if err := NodeDelete(ctx, runtime, new, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil { return fmt.Errorf("Failed to start new node. Also failed to rollback: %+v", err) } diff --git a/vendor/modules.txt b/vendor/modules.txt index d499b5ef..54f233ab 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -233,6 +233,9 @@ golang.org/x/net/proxy # golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 golang.org/x/oauth2 golang.org/x/oauth2/internal +# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c +## explicit +golang.org/x/sync/errgroup # golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 ## explicit golang.org/x/sys/execabs From 02ebc23efe54f4360c3b6dc7bee2e7dda05129e7 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 6 Jul 2021 13:34:56 +0200 Subject: [PATCH 37/45] add forgotten dependency --- vendor/golang.org/x/sync/AUTHORS | 3 + vendor/golang.org/x/sync/CONTRIBUTORS | 3 + vendor/golang.org/x/sync/LICENSE | 27 ++++++++ vendor/golang.org/x/sync/PATENTS | 22 +++++++ vendor/golang.org/x/sync/errgroup/errgroup.go | 66 +++++++++++++++++++ 5 files changed, 121 insertions(+) create mode 100644 vendor/golang.org/x/sync/AUTHORS create mode 100644 vendor/golang.org/x/sync/CONTRIBUTORS create mode 100644 vendor/golang.org/x/sync/LICENSE create mode 100644 vendor/golang.org/x/sync/PATENTS create mode 100644 vendor/golang.org/x/sync/errgroup/errgroup.go diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/golang.org/x/sync/AUTHORS new file mode 100644 index 00000000..15167cd7 --- /dev/null +++ b/vendor/golang.org/x/sync/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/golang.org/x/sync/CONTRIBUTORS new file mode 100644 index 00000000..1c4577e9 --- /dev/null +++ b/vendor/golang.org/x/sync/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/sync/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go new file mode 100644 index 00000000..9857fe53 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -0,0 +1,66 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errgroup provides synchronization, error propagation, and Context +// cancelation for groups of goroutines working on subtasks of a common task. +package errgroup + +import ( + "context" + "sync" +) + +// A Group is a collection of goroutines working on subtasks that are part of +// the same overall task. +// +// A zero Group is valid and does not cancel on error. +type Group struct { + cancel func() + + wg sync.WaitGroup + + errOnce sync.Once + err error +} + +// WithContext returns a new Group and an associated Context derived from ctx. +// +// The derived Context is canceled the first time a function passed to Go +// returns a non-nil error or the first time Wait returns, whichever occurs +// first. +func WithContext(ctx context.Context) (*Group, context.Context) { + ctx, cancel := context.WithCancel(ctx) + return &Group{cancel: cancel}, ctx +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. +func (g *Group) Wait() error { + g.wg.Wait() + if g.cancel != nil { + g.cancel() + } + return g.err +} + +// Go calls the given function in a new goroutine. +// +// The first call to return a non-nil error cancels the group; its error will be +// returned by Wait. +func (g *Group) Go(f func() error) { + g.wg.Add(1) + + go func() { + defer g.wg.Done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel() + } + }) + } + }() +} From 95ecaf77e0a1e54d19141e27d992746b17a28d33 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 6 Jul 2021 14:01:47 +0200 Subject: [PATCH 38/45] fix nilpointer exception when cluster loadbalancer is not specified --- pkg/client/node.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/client/node.go b/pkg/client/node.go index aae7bc46..09e83cf0 100644 --- a/pkg/client/node.go +++ b/pkg/client/node.go @@ -681,6 +681,9 @@ func NodeEdit(ctx context.Context, runtime runtimes.Runtime, existingNode, chang if err != nil { return fmt.Errorf("error updating loadbalancer config: %w", err) } + if cluster.ServerLoadBalancer == nil { + cluster.ServerLoadBalancer = k3d.NewLoadbalancer() + } cluster.ServerLoadBalancer.Node = result lbConfig, err := LoadbalancerGenerateConfig(cluster) if err != nil { From e8932d7287af92dd9fa09589823f19e51e179fac Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 6 Jul 2021 14:25:51 +0200 Subject: [PATCH 39/45] test/e2e: loadbalancer test case no suffix --- tests/test_loadbalancer.sh | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/tests/test_loadbalancer.sh b/tests/test_loadbalancer.sh index 9fbde168..664c386f 100755 --- a/tests/test_loadbalancer.sh +++ b/tests/test_loadbalancer.sh @@ -25,16 +25,26 @@ function check_container_port() { clustername="lbtest" info "Creating cluster $clustername..." -$EXE cluster create $clustername --timeout 360s --agents 1 -p 8080:80@server:0:proxy -p 1234:4321/tcp@agent:0:direct || failed "could not create cluster $clustername" +$EXE cluster create $clustername --timeout 360s --agents 1 -p 2222:3333@server:0 -p 8080:80@server:0:proxy -p 1234:4321/tcp@agent:0:direct || failed "could not create cluster $clustername" info "Checking we have access to the cluster..." check_clusters "$clustername" || failed "error checking cluster" info "Checking Container Ports..." + +info "> Checking automatic port mapping for Kube API on loadbalancer (6443)..." check_container_port k3d-$clustername-serverlb "6443/tcp" || failed "6443/tcp not on serverlb" + +info "> Checking explicit proxy port mapping of port 80 -> loadbalancer -> server-0" check_container_port k3d-$clustername-serverlb "80/tcp" || failed "80/tcp not on serverlb" + +info "> Checking explicit direct port mapping of port 4321 -> agent-0" check_container_port k3d-$clustername-agent-0 "4321/tcp" || failed "4321/tcp not on agent-0" +info "> Checking implicit proxy port mapping of port 3333 -> loadbalancer -> server-0" +check_container_port k3d-$clustername-server-0 "3333/tcp" && failed "3333/tcp on server-0 but should be on serverlb" +check_container_port k3d-$clustername-serverlb "3333/tcp" || failed "3333/tcp not on serverlb" + info "Checking Loadbalancer Config..." $EXE debug loadbalancer get-config $clustername | grep -A1 "80.tcp" | grep "k3d-$clustername-server-0" || failed "port 80.tcp not configured for server-0" From abc53113c3e6e919bf06cf0dd635b7b3c6bbe7ca Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 6 Jul 2021 14:41:18 +0200 Subject: [PATCH 40/45] changelog: add some notes about the new nodefilters and port-mapping behavior --- CHANGELOG.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index efd64333..d1716142 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,18 @@ ## v5.0.0 +### Breaking Changes + +- new syntax for nodefilters + - dropped the usage of square brackets `[]` for indexing, as it caused problems with some shells trying to interpret them + - new syntax: `@identifier[:index][:opt]` (see ) + - example for a port-mapping: `--port 8080:80@server:0:proxy` + - identifier = `server`, index = `0`, opt = `proxy` + - `opt` is an extra optional argument used for different purposes depending on the flag + - currently, only the `--port` flag has `opt`s, namely `proxy` and `direct` (see other breaking change) + +- + ### Fixes - cleaned up and properly sorted the sanitization of existing resources used to create new nodes (#638) From 1944c06dad4ea89ccbf755311dcbbb6ea34b26fa Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Wed, 7 Jul 2021 09:18:54 +0200 Subject: [PATCH 41/45] properly handle --port 1234:4321@loadbalancer:proxy style port mappings which should default to all nodes as upstream --- CHANGELOG.md | 5 +++-- Dockerfile | 2 +- pkg/config/transform.go | 21 +++++++++++++++++-- pkg/util/filter.go | 41 +++++++++++++++++++++++++++----------- tests/test_loadbalancer.sh | 17 ++++++++++++++-- 5 files changed, 67 insertions(+), 19 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d1716142..07ab6ff9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,8 +11,9 @@ - identifier = `server`, index = `0`, opt = `proxy` - `opt` is an extra optional argument used for different purposes depending on the flag - currently, only the `--port` flag has `opt`s, namely `proxy` and `direct` (see other breaking change) - -- +- port-mapping now go via the loadbalancer (serverlb) by default + - the `--port` flag has the `proxy` opt (see new nodefilter syntax above) set by default + - to leverage the old behavior of direct port-mappings, use the `direct` opt on the port flag ### Fixes diff --git a/Dockerfile b/Dockerfile index 1aa1b056..df9d5021 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,7 +5,7 @@ COPY . . RUN make build -e GIT_TAG_OVERRIDE=${GIT_TAG_OVERRIDE} && bin/k3d version FROM docker:20.10-dind as dind -RUN apk update && apk add bash curl sudo jq git make netcat-openbsd +RUN apk update && apk add bash curl sudo jq yq git make netcat-openbsd RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl && \ chmod +x ./kubectl && \ mv ./kubectl /usr/local/bin/kubectl diff --git a/pkg/config/transform.go b/pkg/config/transform.go index 417869c8..cc812374 100644 --- a/pkg/config/transform.go +++ b/pkg/config/transform.go @@ -175,16 +175,28 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim return nil, fmt.Errorf("Portmapping '%s' lacks a node filter, but there's more than one node", portWithNodeFilters.Port) } - x, err := util.FilterNodesWithSuffix(nodeList, portWithNodeFilters.NodeFilters) + filteredNodes, err := util.FilterNodesWithSuffix(nodeList, portWithNodeFilters.NodeFilters) if err != nil { return nil, err } - for suffix, nodes := range x { + for suffix, nodes := range filteredNodes { portmappings, err := nat.ParsePortSpec(portWithNodeFilters.Port) if err != nil { return nil, fmt.Errorf("error parsing port spec '%s': %+v", portWithNodeFilters.Port, err) } + + for _, n := range nodes { + if n.Role == k3d.LoadBalancerRole && n.Name == newCluster.ServerLoadBalancer.Node.Name { + log.Infoln("loadbalancer in filtered list for port mappings: defaulting to all servers and agents as upstream targets") + var err error + nodes, err = util.FilterNodes(newCluster.Nodes, []string{"agents:*", "servers:*"}) + if err != nil { + return nil, err + } + } + } + if suffix == "proxy" || suffix == util.NodeFilterSuffixNone { // proxy is the default suffix for port mappings if newCluster.ServerLoadBalancer == nil { return nil, fmt.Errorf("port-mapping of type 'proxy' specified, but loadbalancer is disabled") @@ -203,6 +215,8 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim return nil, err } } + } else if suffix != util.NodeFilterMapKeyAll { + return nil, fmt.Errorf("error adding port mappings: unknown suffix %s", suffix) } } @@ -396,6 +410,9 @@ func loadbalancerAddPortConfigs(loadbalancer *k3d.Loadbalancer, pm nat.PortMappi portconfig := fmt.Sprintf("%s.%s", pm.Port.Port(), pm.Port.Proto()) nodenames := []string{} for _, node := range nodes { + if node.Role == k3d.LoadBalancerRole { + return fmt.Errorf("error adding port config to loadbalancer: cannot add port config referencing the loadbalancer itself (loop)") + } nodenames = append(nodenames, node.Name) } diff --git a/pkg/util/filter.go b/pkg/util/filter.go index 7df75314..7ddb04ba 100644 --- a/pkg/util/filter.go +++ b/pkg/util/filter.go @@ -36,6 +36,16 @@ const ( NodeFilterMapKeyAll = "all" ) +var ( + rolesByIdentifier = map[string]k3d.Role{ + "server": k3d.ServerRole, + "servers": k3d.ServerRole, + "agent": k3d.AgentRole, + "agents": k3d.AgentRole, + "loadbalancer": k3d.LoadBalancerRole, + } +) + // Regexp pattern to match node filters var NodeFilterRegexp = regexp.MustCompile(`^(?Pserver|servers|agent|agents|loadbalancer|all)(?P:(?P(?P(\d+,?)+)|(?P\d*-\d*)|(?P\*)))?(?P:(?P[[:alpha:]]+))?$`) @@ -74,7 +84,7 @@ func FilterNodesWithSuffix(nodes []*k3d.Node, nodefilters []string) (map[string] return nil, err } - log.Tracef("Adding %d nodes for suffix >%s< (filter: %s)", len(filteredNodes), suffix, nf) + log.Tracef("Filtered %d nodes for suffix '%s' (filter: %s)", len(filteredNodes), suffix, nf) result[suffix] = filteredNodes } @@ -132,16 +142,21 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) { // Choose the group of nodes to operate on groupNodes := []*k3d.Node{} - if submatches["group"] == string(k3d.ServerRole) { - groupNodes = serverNodes - } else if submatches["group"] == string(k3d.AgentRole) { - groupNodes = agentNodes - } else if submatches["group"] == string(k3d.LoadBalancerRole) { - if serverlb == nil { - return nil, fmt.Errorf("Node filter '%s' targets a node that does not exist (disabled?)", filter) + if role, ok := rolesByIdentifier[submatches["group"]]; ok { + switch role { + case k3d.ServerRole: + groupNodes = serverNodes + break + case k3d.AgentRole: + groupNodes = agentNodes + break + case k3d.LoadBalancerRole: + if serverlb == nil { + return nil, fmt.Errorf("Node filter '%s' targets a node that does not exist (disabled?)", filter) + } + filteredNodes = append(filteredNodes, serverlb) + return filteredNodes, nil // early exit if filtered group is the loadbalancer } - filteredNodes = append(filteredNodes, serverlb) - return filteredNodes, nil // early exit if filtered group is the loadbalancer } /* Option 1) subset defined by list */ @@ -166,10 +181,10 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) { } else if submatches["subsetRange"] != "" { /* - * subset specified by a range 'START:END', where each side is optional + * subset specified by a range 'START-END', where each side is optional */ - split := strings.Split(submatches["subsetRange"], ":") + split := strings.Split(submatches["subsetRange"], "-") if len(split) != 2 { return nil, fmt.Errorf("Failed to parse subset range in '%s'", filter) } @@ -226,6 +241,8 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) { } + log.Tracef("Filtered %d nodes (filter: %s)", len(filteredNodes), filters) + return filteredNodes, nil } diff --git a/tests/test_loadbalancer.sh b/tests/test_loadbalancer.sh index 664c386f..e4f8327e 100755 --- a/tests/test_loadbalancer.sh +++ b/tests/test_loadbalancer.sh @@ -25,7 +25,12 @@ function check_container_port() { clustername="lbtest" info "Creating cluster $clustername..." -$EXE cluster create $clustername --timeout 360s --agents 1 -p 2222:3333@server:0 -p 8080:80@server:0:proxy -p 1234:4321/tcp@agent:0:direct || failed "could not create cluster $clustername" +$EXE cluster create $clustername --timeout 360s --agents 1 \ + -p 2222:3333@server:0 \ + -p 8080:80@server:0:proxy \ + -p 1234:4321/tcp@agent:0:direct \ + -p 4444:5555@loadbalancer:0:proxy \ + || failed "could not create cluster $clustername" info "Checking we have access to the cluster..." check_clusters "$clustername" || failed "error checking cluster" @@ -45,8 +50,16 @@ info "> Checking implicit proxy port mapping of port 3333 -> loadbalancer -> ser check_container_port k3d-$clustername-server-0 "3333/tcp" && failed "3333/tcp on server-0 but should be on serverlb" check_container_port k3d-$clustername-serverlb "3333/tcp" || failed "3333/tcp not on serverlb" +info "> Checking implicit proxy port mapping of port 5555 -> loadbalancer -> server-0 & agent-0" +check_container_port k3d-$clustername-server-0 "5555/tcp" && failed "5555/tcp on server-0 but should be on serverlb" +check_container_port k3d-$clustername-agent-0 "5555/tcp" && failed "5555/tcp on agent-0 but should be on serverlb" +check_container_port k3d-$clustername-serverlb "5555/tcp" || failed "5555/tcp not on serverlb" + info "Checking Loadbalancer Config..." -$EXE debug loadbalancer get-config $clustername | grep -A1 "80.tcp" | grep "k3d-$clustername-server-0" || failed "port 80.tcp not configured for server-0" +$EXE debug loadbalancer get-config $clustername | yq read - 'ports."80.tcp"' | grep "k3d-$clustername-server-0" || failed "port 80.tcp not configured for server-0" +$EXE debug loadbalancer get-config $clustername | yq read - 'ports."5555.tcp"' | grep "k3d-$clustername-server-0" || failed "port 5555.tcp not configured for server-0" +$EXE debug loadbalancer get-config $clustername | yq read - 'ports."5555.tcp"' | grep "k3d-$clustername-agent-0" || failed "port 5555.tcp not configured for agent-0" + info "Deleting clusters..." $EXE cluster delete $clustername || failed "could not delete the cluster $clustername" From 924fdfe586277bed01207c2e20a2974a798db702 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Wed, 7 Jul 2021 15:16:46 +0200 Subject: [PATCH 42/45] tests/e2e: check more cases for port-mappings that should go via the loadbalancer --- tests/test_loadbalancer.sh | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/tests/test_loadbalancer.sh b/tests/test_loadbalancer.sh index e4f8327e..c9d17045 100755 --- a/tests/test_loadbalancer.sh +++ b/tests/test_loadbalancer.sh @@ -30,6 +30,7 @@ $EXE cluster create $clustername --timeout 360s --agents 1 \ -p 8080:80@server:0:proxy \ -p 1234:4321/tcp@agent:0:direct \ -p 4444:5555@loadbalancer:0:proxy \ + -p 6666:7777 \ || failed "could not create cluster $clustername" info "Checking we have access to the cluster..." @@ -55,10 +56,18 @@ check_container_port k3d-$clustername-server-0 "5555/tcp" && failed "5555/tcp on check_container_port k3d-$clustername-agent-0 "5555/tcp" && failed "5555/tcp on agent-0 but should be on serverlb" check_container_port k3d-$clustername-serverlb "5555/tcp" || failed "5555/tcp not on serverlb" +info "> Checking implicit proxy port mapping of port 7777 -> loadbalancer -> server-0 & agent-0" +check_container_port k3d-$clustername-server-0 "7777/tcp" && failed "7777/tcp on server-0 but should be on serverlb" +check_container_port k3d-$clustername-agent-0 "7777/tcp" && failed "7777/tcp on agent-0 but should be on serverlb" +check_container_port k3d-$clustername-serverlb "7777/tcp" || failed "7777/tcp not on serverlb" + info "Checking Loadbalancer Config..." -$EXE debug loadbalancer get-config $clustername | yq read - 'ports."80.tcp"' | grep "k3d-$clustername-server-0" || failed "port 80.tcp not configured for server-0" -$EXE debug loadbalancer get-config $clustername | yq read - 'ports."5555.tcp"' | grep "k3d-$clustername-server-0" || failed "port 5555.tcp not configured for server-0" -$EXE debug loadbalancer get-config $clustername | yq read - 'ports."5555.tcp"' | grep "k3d-$clustername-agent-0" || failed "port 5555.tcp not configured for agent-0" +LOG_LEVEL=error $EXE debug loadbalancer get-config $clustername > lbconfig.yaml +yq eval '.ports."80.tcp"' lbconfig.yaml | grep -q "k3d-$clustername-server-0" || failed "port 80.tcp not configured for server-0" +yq eval '.ports."5555.tcp"' lbconfig.yaml | grep -q "k3d-$clustername-server-0" || failed "port 5555.tcp not configured for server-0" +yq eval '.ports."5555.tcp"' lbconfig.yaml | grep -q "k3d-$clustername-agent-0" || failed "port 5555.tcp not configured for agent-0" +yq eval '.ports."7777.tcp"' lbconfig.yaml | grep -q "k3d-$clustername-server-0" || failed "port 7777.tcp not configured for server-0" +yq eval '.ports."7777.tcp"' lbconfig.yaml | grep -q "k3d-$clustername-agent-0" || failed "port 7777.tcp not configured for agent-0" info "Deleting clusters..." From a2787250788ce70057687c3e0aca816788aa94ce Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Wed, 7 Jul 2021 15:17:48 +0200 Subject: [PATCH 43/45] tests/e2e: quiet grep output to pollute the terminal a little less --- tests/common.sh | 6 +++--- tests/dind.sh | 2 +- tests/test_basic.sh | 2 +- tests/test_config_file.sh | 6 +++--- tests/test_config_with_overrides.sh | 8 ++++---- tests/test_node_edit.sh | 4 ++-- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/tests/common.sh b/tests/common.sh index 4d4de061..57778213 100755 --- a/tests/common.sh +++ b/tests/common.sh @@ -117,7 +117,7 @@ check_volume_exists() { check_cluster_token_exist() { [ -n "$EXE" ] || abort "EXE is not defined" - $EXE cluster get "$1" --token | grep "TOKEN" >/dev/null 2>&1 + $EXE cluster get "$1" --token | grep -q "TOKEN" >/dev/null 2>&1 } wait_for_pod_running_by_label() { @@ -174,11 +174,11 @@ exec_in_node() { docker_assert_container_label() { # $1 = container/node name # $2 = label to assert - docker inspect --format '{{ range $k, $v := .Config.Labels }}{{ printf "%s=%s\n" $k $v }}{{ end }}' "$1" | grep -E "^$2$" + docker inspect --format '{{ range $k, $v := .Config.Labels }}{{ printf "%s=%s\n" $k $v }}{{ end }}' "$1" | grep -qE "^$2$" } k3s_assert_node_label() { # $1 = node name # $2 = label to assert - kubectl get node "$1" --output go-template='{{ range $k, $v := .metadata.labels }}{{ printf "%s=%s\n" $k $v }}{{ end }}' | grep -E "^$2$" + kubectl get node "$1" --output go-template='{{ range $k, $v := .metadata.labels }}{{ printf "%s=%s\n" $k $v }}{{ end }}' | grep -qE "^$2$" } \ No newline at end of file diff --git a/tests/dind.sh b/tests/dind.sh index edede4a7..5432ac25 100755 --- a/tests/dind.sh +++ b/tests/dind.sh @@ -38,7 +38,7 @@ trap finish EXIT # wait for the runner container to be up or exit early TIMEOUT=0 -until docker inspect "$k3de2e" | jq ".[0].State.Running" && docker logs "$k3de2e" 2>&1 | grep -i "API listen on /var/run/docker.sock"; do +until docker inspect "$k3de2e" | jq ".[0].State.Running" && docker logs "$k3de2e" 2>&1 | grep -qi "API listen on /var/run/docker.sock"; do if [[ $TIMEOUT -eq $RUNNER_START_TIMEOUT ]]; then echo "Failed to start E2E Runner Container in $RUNNER_START_TIMEOUT seconds" exit 1 diff --git a/tests/test_basic.sh b/tests/test_basic.sh index 8969c410..1c934b5f 100755 --- a/tests/test_basic.sh +++ b/tests/test_basic.sh @@ -19,7 +19,7 @@ info "Checking we have access to both clusters..." check_clusters "c1" "c2" || failed "error checking cluster" info "Checking cluster env var with escaped @ signs..." -docker exec k3d-c1-server-0 env | grep -E '^TEST_VAR=user@pass\\$' || failed "Failed to lookup proper env var in container" +docker exec k3d-c1-server-0 env | grep -qE '^TEST_VAR=user@pass\\$' || failed "Failed to lookup proper env var in container" info "Check k3s token retrieval" check_cluster_token_exist "c1" || failed "could not find cluster token c1" diff --git a/tests/test_config_file.sh b/tests/test_config_file.sh index 8793eb44..f10b988d 100755 --- a/tests/test_config_file.sh +++ b/tests/test_config_file.sh @@ -39,7 +39,7 @@ check_multi_node "$clustername" 5 || failed "failed to verify number of nodes" ## Environment Variables info "Ensuring that environment variables are present in the node containers as set in the config (with comma)" -exec_in_node "k3d-$clustername-server-0" "env" | grep "bar=baz,bob" || failed "Expected env var 'bar=baz,bob' is not present in node k3d-$clustername-server-0" +exec_in_node "k3d-$clustername-server-0" "env" | grep -q "bar=baz,bob" || failed "Expected env var 'bar=baz,bob' is not present in node k3d-$clustername-server-0" ## Container Labels info "Ensuring that container labels have been set as stated in the config" @@ -55,8 +55,8 @@ $EXE node list "k3d-$clustername-registry" || failed "Expected k3d-$clustername- ## merged registries.yaml info "Ensuring, that the registries.yaml file contains both registries" -exec_in_node "k3d-$clustername-server-0" "cat /etc/rancher/k3s/registries.yaml" | grep -i "my.company.registry" || failed "Expected 'my.company.registry' to be in the /etc/rancher/k3s/registries.yaml" -exec_in_node "k3d-$clustername-server-0" "cat /etc/rancher/k3s/registries.yaml" | grep -i "k3d-$clustername-registry" || failed "Expected 'k3d-$clustername-registry' to be in the /etc/rancher/k3s/registries.yaml" +exec_in_node "k3d-$clustername-server-0" "cat /etc/rancher/k3s/registries.yaml" | grep -qi "my.company.registry" || failed "Expected 'my.company.registry' to be in the /etc/rancher/k3s/registries.yaml" +exec_in_node "k3d-$clustername-server-0" "cat /etc/rancher/k3s/registries.yaml" | grep -qi "k3d-$clustername-registry" || failed "Expected 'k3d-$clustername-registry' to be in the /etc/rancher/k3s/registries.yaml" # Cleanup diff --git a/tests/test_config_with_overrides.sh b/tests/test_config_with_overrides.sh index 8269a326..a533c036 100755 --- a/tests/test_config_with_overrides.sh +++ b/tests/test_config_with_overrides.sh @@ -37,8 +37,8 @@ check_multi_node "$clustername" 6 || failed "failed to verify number of nodes" ## Environment Variables info "Ensuring that environment variables are present in the node containers as set in the config and overrides" -exec_in_node "k3d-$clustername-server-0" "env" | grep "bar=baz" || failed "Expected env var 'bar=baz' is not present in node k3d-$clustername-server-0" -exec_in_node "k3d-$clustername-agent-1" "env" | grep "x=y" || failed "Expected env var 'x=y' is not present in node k3d-$clustername-agent-1" +exec_in_node "k3d-$clustername-server-0" "env" | grep -q "bar=baz" || failed "Expected env var 'bar=baz' is not present in node k3d-$clustername-server-0" +exec_in_node "k3d-$clustername-agent-1" "env" | grep -q "x=y" || failed "Expected env var 'x=y' is not present in node k3d-$clustername-agent-1" ## Container Labels info "Ensuring that container labels have been set as stated in the config" @@ -54,8 +54,8 @@ $EXE node list "k3d-$clustername-registry" && failed "Expected k3d-$clustername- ## merged registries.yaml info "Ensuring, that the registries.yaml file contains both registries" -exec_in_node "k3d-$clustername-server-0" "cat /etc/rancher/k3s/registries.yaml" | grep -i "my.company.registry" || failed "Expected 'my.company.registry' to be in the /etc/rancher/k3s/registries.yaml" -exec_in_node "k3d-$clustername-server-0" "cat /etc/rancher/k3s/registries.yaml" | grep -i "k3d-$clustername-registry" && failed "Expected 'k3d-$clustername-registry' to NOT be in the /etc/rancher/k3s/registries.yaml" +exec_in_node "k3d-$clustername-server-0" "cat /etc/rancher/k3s/registries.yaml" | grep -qi "my.company.registry" || failed "Expected 'my.company.registry' to be in the /etc/rancher/k3s/registries.yaml" +exec_in_node "k3d-$clustername-server-0" "cat /etc/rancher/k3s/registries.yaml" | grep -qi "k3d-$clustername-registry" && failed "Expected 'k3d-$clustername-registry' to NOT be in the /etc/rancher/k3s/registries.yaml" # Cleanup diff --git a/tests/test_node_edit.sh b/tests/test_node_edit.sh index 80f896d3..2c6f7f2c 100755 --- a/tests/test_node_edit.sh +++ b/tests/test_node_edit.sh @@ -27,8 +27,8 @@ info "Adding port-mapping to loadbalancer..." $EXE node edit k3d-$clustername-serverlb --port-add $existingPortMappingHostPort:$existingPortMappingContainerPort --port-add $newPortMappingHostPort:$newPortMappingContainerPort || failed "failed to add port-mapping to serverlb in $clustername" info "Checking port-mappings..." -docker inspect k3d-$clustername-serverlb --format '{{ range $k, $v := .NetworkSettings.Ports }}{{ printf "%s->%s\n" $k $v }}{{ end }}' | grep -E "^$existingPortMappingContainerPort" || failed "failed to verify pre-existing port-mapping" -docker inspect k3d-$clustername-serverlb --format '{{ range $k, $v := .NetworkSettings.Ports }}{{ printf "%s->%s\n" $k $v }}{{ end }}' | grep -E "^$newPortMappingContainerPort" || failed "failed to verify pre-existing port-mapping" +docker inspect k3d-$clustername-serverlb --format '{{ range $k, $v := .NetworkSettings.Ports }}{{ printf "%s->%s\n" $k $v }}{{ end }}' | grep -qE "^$existingPortMappingContainerPort" || failed "failed to verify pre-existing port-mapping" +docker inspect k3d-$clustername-serverlb --format '{{ range $k, $v := .NetworkSettings.Ports }}{{ printf "%s->%s\n" $k $v }}{{ end }}' | grep -qE "^$newPortMappingContainerPort" || failed "failed to verify pre-existing port-mapping" info "Checking cluster access..." check_clusters "$clustername" || failed "error checking cluster access" From 2e59b45ea9fbb21f14ce5401bcdd1571b444e174 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Wed, 7 Jul 2021 15:18:29 +0200 Subject: [PATCH 44/45] dockerfile: include a specific yq version that doesn't have issues with control characters when checking the lb config --- Dockerfile | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index df9d5021..dcb93e87 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,16 +1,37 @@ +############################################################ +# builder # +# -> golang image used solely for building the k3d binary # +# -> built executable can then be copied into other stages # +############################################################ FROM golang:1.16 as builder ARG GIT_TAG_OVERRIDE WORKDIR /app COPY . . RUN make build -e GIT_TAG_OVERRIDE=${GIT_TAG_OVERRIDE} && bin/k3d version +####################################################### +# dind # +# -> k3d + some tools in a docker-in-docker container # +# -> used e.g. in our CI pipelines for testing # +####################################################### FROM docker:20.10-dind as dind -RUN apk update && apk add bash curl sudo jq yq git make netcat-openbsd -RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl && \ - chmod +x ./kubectl && \ - mv ./kubectl /usr/local/bin/kubectl + +# install some basic packages needed for testing, etc. +RUN apk update && apk add bash curl sudo jq git make netcat-openbsd + +# install kubectl to interact with the k3d cluster +RUN curl -L https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl && \ + chmod +x /usr/local/bin/kubectl + +# install yq (yaml processor) from source, as the busybox yq had some issues +RUN curl -L https://github.com/mikefarah/yq/releases/download/v4.9.6/yq_linux_amd64 -o /usr/bin/yq &&\ + chmod +x /usr/bin/yq COPY --from=builder /app/bin/k3d /bin/k3d +######################################### +# binary-only # +# -> only the k3d binary.. nothing else # +######################################### FROM scratch as binary-only COPY --from=builder /app/bin/k3d /bin/k3d ENTRYPOINT ["/bin/k3d"] \ No newline at end of file From 4132757f3461db5edf20958e0c628d1fc14ea251 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Wed, 7 Jul 2021 15:19:14 +0200 Subject: [PATCH 45/45] ports: no nodefilter or loadbalancer nodefilter should default to use all servers & agents as upstream --- CHANGELOG.md | 1 + pkg/client/loadbalancer.go | 2 +- pkg/config/transform.go | 43 +++++++++++++++++++++++++++----------- pkg/util/filter.go | 6 ++++-- 4 files changed, 37 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 07ab6ff9..54afe14e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ - port-mapping now go via the loadbalancer (serverlb) by default - the `--port` flag has the `proxy` opt (see new nodefilter syntax above) set by default - to leverage the old behavior of direct port-mappings, use the `direct` opt on the port flag + - the nodefilter `loadbalancer` will now do the same as `servers:*;agents:*` (proxied via the loadbalancer) ### Fixes diff --git a/pkg/client/loadbalancer.go b/pkg/client/loadbalancer.go index fe1989b4..c6bec707 100644 --- a/pkg/client/loadbalancer.go +++ b/pkg/client/loadbalancer.go @@ -139,7 +139,7 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste file = bytes.Trim(file[512:], "\x00") // trim control characters, etc. if err := yaml.Unmarshal(file, &cfg); err != nil { - return cfg, err + return cfg, fmt.Errorf("error unmarshalling loadbalancer config: %w", err) } return cfg, nil diff --git a/pkg/config/transform.go b/pkg/config/transform.go index cc812374..b1850d14 100644 --- a/pkg/config/transform.go +++ b/pkg/config/transform.go @@ -44,6 +44,10 @@ import ( log "github.com/sirupsen/logrus" ) +var ( + DefaultTargetsNodefiltersPortMappings = []string{"servers:*:proxy", "agents:*:proxy"} +) + // TransformSimpleToClusterConfig transforms a simple configuration to a full-fledged cluster configuration func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtime, simpleConfig conf.SimpleConfig) (*conf.ClusterConfig, error) { @@ -171,8 +175,18 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim // -> PORTS for _, portWithNodeFilters := range simpleConfig.Ports { + log.Tracef("inspecting port mapping for %s with nodefilters %s", portWithNodeFilters.Port, portWithNodeFilters.NodeFilters) if len(portWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 { - return nil, fmt.Errorf("Portmapping '%s' lacks a node filter, but there's more than one node", portWithNodeFilters.Port) + log.Infof("portmapping '%s' lacks a nodefilter, but there's more than one node: defaulting to %s", portWithNodeFilters.Port, DefaultTargetsNodefiltersPortMappings) + portWithNodeFilters.NodeFilters = DefaultTargetsNodefiltersPortMappings + } + + for _, f := range portWithNodeFilters.NodeFilters { + if strings.HasPrefix(f, "loadbalancer") { + log.Infof("portmapping '%s' targets the loadbalancer: defaulting to %s", portWithNodeFilters.Port, DefaultTargetsNodefiltersPortMappings) + portWithNodeFilters.NodeFilters = DefaultTargetsNodefiltersPortMappings + break + } } filteredNodes, err := util.FilterNodesWithSuffix(nodeList, portWithNodeFilters.NodeFilters) @@ -180,23 +194,18 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim return nil, err } + nn := "" + for _, n := range filteredNodes["proxy"] { + nn = strings.Join([]string{nn, n.Name}, ",") + } + log.Debugf("Filtered nodes: %#v", nn) + for suffix, nodes := range filteredNodes { portmappings, err := nat.ParsePortSpec(portWithNodeFilters.Port) if err != nil { return nil, fmt.Errorf("error parsing port spec '%s': %+v", portWithNodeFilters.Port, err) } - for _, n := range nodes { - if n.Role == k3d.LoadBalancerRole && n.Name == newCluster.ServerLoadBalancer.Node.Name { - log.Infoln("loadbalancer in filtered list for port mappings: defaulting to all servers and agents as upstream targets") - var err error - nodes, err = util.FilterNodes(newCluster.Nodes, []string{"agents:*", "servers:*"}) - if err != nil { - return nil, err - } - } - } - if suffix == "proxy" || suffix == util.NodeFilterSuffixNone { // proxy is the default suffix for port mappings if newCluster.ServerLoadBalancer == nil { return nil, fmt.Errorf("port-mapping of type 'proxy' specified, but loadbalancer is disabled") @@ -222,6 +231,16 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim } + // print generated loadbalancer config + if log.GetLevel() >= log.DebugLevel { + yamlized, err := yaml.Marshal(newCluster.ServerLoadBalancer.Config) + if err != nil { + log.Errorf("error printing loadbalancer config: %v", err) + } else { + log.Debugf("generated loadbalancer config:\n%s", string(yamlized)) + } + } + // -> K3S NODE LABELS for _, k3sNodeLabelWithNodeFilters := range simpleConfig.Options.K3sOptions.NodeLabels { if len(k3sNodeLabelWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 { diff --git a/pkg/util/filter.go b/pkg/util/filter.go index 7ddb04ba..1f9e5d70 100644 --- a/pkg/util/filter.go +++ b/pkg/util/filter.go @@ -77,7 +77,9 @@ func FilterNodesWithSuffix(nodes []*k3d.Node, nodefilters []string) (map[string] suffix = sf } - result[suffix] = make([]*k3d.Node, 0) // init map for this suffix + if _, ok := result[suffix]; !ok { + result[suffix] = make([]*k3d.Node, 0) // init map for this suffix, if not exists + } filteredNodes, err := FilterNodes(nodes, []string{nf}) if err != nil { @@ -86,7 +88,7 @@ func FilterNodesWithSuffix(nodes []*k3d.Node, nodefilters []string) (map[string] log.Tracef("Filtered %d nodes for suffix '%s' (filter: %s)", len(filteredNodes), suffix, nf) - result[suffix] = filteredNodes + result[suffix] = append(result[suffix], filteredNodes...) } return result, nil