Compare commits

..

3 Commits

Author SHA1 Message Date
iwilltry42
695259c988
fix naming of dind image 2021-08-18 09:30:53 +02:00
iwilltry42
6497ef6af0
ci/drone: multiarch images for everything + auto_tagged semver manifests/images 2021-08-17 08:57:49 +02:00
iwilltry42
1af92391b6
ci/drone: push additional manifest for latest tag 2021-08-16 15:21:23 +02:00
928 changed files with 26708 additions and 47957 deletions

View File

@ -121,24 +121,6 @@
"contributions": [
"doc"
]
},
{
"login": "Shanduur",
"name": "Mateusz Urbanek",
"avatar_url": "https://avatars.githubusercontent.com/u/32583062?v=4",
"profile": "http://shanduur.github.io",
"contributions": [
"code"
]
},
{
"login": "benjaminjb",
"name": "Benjamin Blattberg",
"avatar_url": "https://avatars.githubusercontent.com/u/4651855?v=4",
"profile": "https://github.com/benjaminjb",
"contributions": [
"code"
]
}
],
"contributorsPerLine": 7,

View File

@ -14,7 +14,7 @@ platform:
steps:
- name: lint
image: golang:1.17
image: golang:1.16
commands:
- make ci-setup
- make check-fmt lint
@ -40,7 +40,7 @@ steps:
- tag
- name: build
image: golang:1.17
image: golang:1.16
environment:
GIT_TAG: "${DRONE_TAG}"
commands:
@ -73,7 +73,6 @@ steps:
- tag
ref:
include:
# include only pre-release tags
- "refs/tags/*rc*"
- "refs/tags/*beta*"
- "refs/tags/*alpha*"
@ -98,7 +97,6 @@ steps:
- tag
ref:
exclude:
# exclude pre-release tags
- "refs/tags/*rc*"
- "refs/tags/*beta*"
- "refs/tags/*alpha*"
@ -118,6 +116,51 @@ volumes:
- name: dockersock
temp: {}
---
#########################
##### Documentation #####
#########################
kind: pipeline
type: docker
name: docs
platform:
os: linux
arch: amd64
steps:
- name: build
image: python:3.9
commands:
- python3 -m pip install -r docs/requirements.txt
- mkdocs build --verbose --clean --strict
when:
branch:
- main
event:
- push
- name: publish
image: plugins/gh-pages
settings:
password:
from_secret: github_token
username: rancherio-gh-m
pages_directory: site/
target_branch: gh-pages
when:
branch:
- main
event:
- push
trigger:
event:
- push
branch:
- main
---
###########################
@ -173,7 +216,7 @@ steps:
settings:
repo: rancher/k3d
auto_tag: true
auto_tag_suffix: dind-linux-amd64
auto_tag_suffix: linux-amd64
dockerfile: Dockerfile
target: dind
context: .
@ -234,6 +277,42 @@ platform:
arch: arm
steps:
- name: build_push_binary
environment:
DOCKER_BUILDKIT: "1"
image: plugins/docker
settings:
repo: rancher/k3d
auto_tag: true
auto_tag_suffix: linux-arm
dockerfile: Dockerfile
target: binary-only
context: .
username:
from_secret: docker_username
password:
from_secret: docker_password
build_args:
- GIT_TAG_OVERRIDE=${DRONE_TAG}
- name: build_push_dind
image: plugins/docker
environment:
DOCKER_BUILDKIT: "1"
settings:
repo: rancher/k3d
auto_tag: true
auto_tag_suffix: linux-arm64
dockerfile: Dockerfile
target: dind
context: .
username:
from_secret: docker_username
password:
from_secret: docker_password
build_args:
- GIT_TAG_OVERRIDE=${DRONE_TAG}
- ARCH=arm
- name: build_push_proxy
image: plugins/docker
@ -312,7 +391,7 @@ steps:
settings:
repo: rancher/k3d
auto_tag: true
auto_tag_suffix: dind-linux-arm64
auto_tag_suffix: linux-arm64
dockerfile: Dockerfile
target: dind
context: .
@ -383,7 +462,7 @@ steps:
from_secret: docker_password
spec: manifest.tmpl
auto_tag: true
ignore_missing: true # expected, as we dropped arm due to missing base image for that arch
ignore_missing: false
- name: push_manifest_dind
image: plugins/manifest
@ -394,7 +473,7 @@ steps:
from_secret: docker_password
spec: dind-manifest.tmpl
auto_tag: true
ignore_missing: true # expected, as we dropped arm due to missing base image for that arch
ignore_missing: false
- name: push_manifest_proxy
image: plugins/manifest

View File

@ -1,46 +0,0 @@
name: k3d.io
on:
push:
branches:
- main
tags:
# only run on tags for real releases and special docs releases
- 'v[0-9]+.[0-9]+.[0-9]+'
- 'v[0-9]+.[0-9]+.[0-9]+-docs.[0-9]+'
# tags-ignore:
# - "*rc*"
# - "*beta*"
# - "*alpha*"
# - "*test*"
# - "*dev*"
jobs:
build:
runs-on: ubuntu-20.04
container:
image: python:3.9
steps:
- name: Checkout Project
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Install Requirements
run: pip install -r docs/requirements.txt
- name: Build with MkDocs (validation)
run: |
mkdocs build --verbose --clean --strict
rm -r site/
- name: Configure Git
if: startsWith(github.ref, 'refs/tags/')
id: git
run: |
git config --global user.name ghaction-k3d.io
git config --global user.email ghaction@k3d.io
echo ::set-output name=tag::${GITHUB_REF#refs/tags/}
- name: Build & Deploy with Mike (versioned)
if: startsWith(github.ref, 'refs/tags/')
run: |
mike deploy --update-aliases --push --rebase ${{ steps.git.outputs.tag }} stable

View File

@ -1,61 +1,7 @@
# Changelog
## v5.0.3
### Enhancements & Fixes
- simplified way of getting a Docker API Client that works with Docker Contexts and `DOCKER_*` environment variable configuration (#829, @dragonflylee)
- fix: didn't honor `DOCKER_TLS` environment variables before
## v5.0.2
### Enhancements
- CoreDNS Configmap is now edited in the auto-deploy manifest on disk instead of relying on `kubectl patch` command (#814)
- refactor: add cmd subcommands in a single function call (#819, @moeryomenko)
- handle ready-log-messages by type and intent & check them in single log streams instead of checking whole chunks every time (#818)
### Fixes
- fix: config file check failing with env var expansion because unexpanded input file was checked
### Misc
- cleanup: ensure that connections/streams are closed once unused (#818)
- cleanup: split type definitions across multiple files to increase readability (#818)
- docs: clarify `node create` help text about cluster reference (#808, @losinggeneration)
- refactor: move from io/ioutil (deprecated) to io and os packages (#827, @Juneezee)
## v5.0.1
### Enhancement
- add `HostFromClusterNetwork` field to `LocalRegistryHosting` configmap as per KEP-1755 (#754)
### Fixes
- fix: nilpointer exception on failed exec process with no returned logreader
- make post-create cluster preparation (DNS stuff mostly) more resilient (#780)
- fix v1alpha2 -> v1alpha3 config migration (and other related issues) (#799)
### Misc
- docs: fix typo (#784)
- docs: fix usage of legacy `--k3s-agent/server-arg` flag
## v5.0.0
This release contains a whole lot of new features, breaking changes as well as smaller fixes and improvements.
The changelog shown here is likely not complete but gives a broad overview over the changes.
For more details, please check the v5 milestone (<https://github.com/rancher/k3d/milestone/27>) or even the commit history.
The docs have been updated, so you should also find the information you need there, with more to come!
The demo repository has also been updated to work with k3d v5: <https://github.com/iwilltry42/k3d-demo>.
**Info**: <https://k3d.io> is now versioned, so you can checkout different versions of the documentation by using the dropdown menu in the page title bar!
**Feedback welcome!**
### Breaking Changes
- new syntax for nodefilters
@ -69,7 +15,6 @@ The demo repository has also been updated to work with k3d v5: <https://github.c
- the `--port` flag has the `proxy` opt (see new nodefilter syntax above) set by default
- to leverage the old behavior of direct port-mappings, use the `direct` opt on the port flag
- the nodefilter `loadbalancer` will now do the same as `servers:*;agents:*` (proxied via the loadbalancer)
- flag `--registries-create` transformed from bool flag to string flag: let's you define the name and port-binding of the newly created registry, e.g. `--registry-create myregistry.localhost:5001`
### Fixes
@ -86,7 +31,6 @@ The demo repository has also been updated to work with k3d v5: <https://github.c
- updated fork of `confd` to make usage of the file backend including a file watcher for auto-reloads
- this also checks the config before applying it, so the lb doesn't crash on a faulty config
- updating the loadbalancer writes the new config file and also checks if everything's going fine afterwards
- some settings of the loadbalancer can now be configured using `--lb-config-override`, see docs at <https://k3d.io/v5.0.0/design/defaults/#k3d-loadbalancer>
- helper images can now be set explicitly via environment variables: `K3D_IMAGE_LOADBALANCER` & `K3D_IMAGE_TOOLS` (#638)
- concurrently add new nodes to an existing cluster (remove some dumb code) (#640)
- `--wait` is now the default for `k3d node create`
@ -108,7 +52,6 @@ The demo repository has also been updated to work with k3d v5: <https://github.c
- new config path `options.k3s.extraArgs`
- config file: environment variables (`$VAR`, `${VAR}` will be expanded unconditionally) (#643)
- docker context support (#601, @developer-guy & #674)
- Feature flag using the environment variable `K3D_FIX_DNS` and setting it to a true value (e.g. `export K3D_FIX_DNS=1`) to forward DNS queries to your local machine, e.g. to use your local company DNS
### Misc
@ -116,28 +59,6 @@ The demo repository has also been updated to work with k3d v5: <https://github.c
- logs: really final output when creating/deleting nodes (so far, we were not outputting a final success message and the process was still doing stuff) (#640)
- tests/e2e: add tests for v1alpha2 to v1alpha3 migration
- docs: use v1alpha3 config version
- docs: update general appearance and cleanup
## v4.4.8
## Enhancements
- Improved DroneCI Pipeline for Multiarch Images and SemVer Tags (#712)
- **Important**: New images will not have the `v` prefix in the tag anymore!
- but now real releases will use the "hierarchical" SemVer tags, so you could e.g. subscribe to rancher/k3d-proxy:4 to get v4.x.x images for the proxy container
## Fixes
- clusterCreate: do not override hostIP if hostPort is missing (#693, @lukaszo)
- imageImport: import all listed images, not only the first one (#701, @mszostok)
- clusterCreate: when memory constraints are set, only pull the image used for checking the edac folder, if it's not present on the machine
- fix: update k3d-tools dependencies and use API Version Negotiation, so it still works with older versions of the Docker Engine (#679)
### Misc
- install script: add darwin/arm64 support (#676, @colelawrence)
- docs: fix go install command (#677, @Rots)
- docs: add project overview (<https://k3d.io/internals/project/>) (#680)
## v4.4.7

View File

@ -3,7 +3,7 @@
# -> golang image used solely for building the k3d binary #
# -> built executable can then be copied into other stages #
############################################################
FROM golang:1.17 as builder
FROM golang:1.16 as builder
ARG GIT_TAG_OVERRIDE
WORKDIR /app
COPY . .

View File

@ -26,8 +26,8 @@ ifeq ($(GIT_TAG),)
GIT_TAG := $(shell git describe --always)
endif
# Docker image tag derived from Git tag (with prefix "v" stripped off)
K3D_IMAGE_TAG := $(GIT_TAG:v%=%)
# Docker image tag derived from Git tag
K3D_IMAGE_TAG := $(GIT_TAG)
# get latest k3s version: grep the tag and replace + with - (difference between git and dockerhub tags)
K3S_TAG := $(shell curl --silent "https://update.k3s.io/v1-release/channels/stable" | egrep -o '/v[^ ]+"' | sed -E 's/\/|\"//g' | sed -E 's/\+/\-/')
@ -65,7 +65,7 @@ PKG := $(shell go mod vendor)
TAGS :=
TESTS := ./...
TESTFLAGS :=
LDFLAGS := -w -s -X github.com/rancher/k3d/v5/version.Version=${GIT_TAG} -X github.com/rancher/k3d/v5/version.K3sVersion=${K3S_TAG}
LDFLAGS := -w -s -X github.com/rancher/k3d/v4/version.Version=${GIT_TAG} -X github.com/rancher/k3d/v4/version.K3sVersion=${K3S_TAG}
GCFLAGS :=
GOFLAGS :=
BINDIR := $(CURDIR)/bin
@ -74,7 +74,7 @@ BINARIES := k3d
# Set version of the k3d helper images for build
ifneq ($(K3D_HELPER_VERSION),)
$(info [INFO] Helper Image version set to ${K3D_HELPER_VERSION})
LDFLAGS += -X github.com/rancher/k3d/v5/version.HelperVersionOverride=${K3D_HELPER_VERSION}
LDFLAGS += -X github.com/rancher/k3d/v4/version.HelperVersionOverride=${K3D_HELPER_VERSION}
endif
# Rules for finding all go source files using 'DIRS' and 'REC_DIRS'
@ -129,10 +129,10 @@ build-docker-%:
# build helper images
build-helper-images:
@echo "Building docker image rancher/k3d-proxy:$(K3D_IMAGE_TAG)"
DOCKER_BUILDKIT=1 docker build proxy/ -f proxy/Dockerfile -t rancher/k3d-proxy:$(K3D_IMAGE_TAG)
@echo "Building docker image rancher/k3d-tools:$(K3D_IMAGE_TAG)"
DOCKER_BUILDKIT=1 docker build --no-cache tools/ -f tools/Dockerfile -t rancher/k3d-tools:$(K3D_IMAGE_TAG) --build-arg GIT_TAG=$(GIT_TAG)
@echo "Building docker image rancher/k3d-proxy:$(GIT_TAG)"
DOCKER_BUILDKIT=1 docker build proxy/ -f proxy/Dockerfile -t rancher/k3d-proxy:$(GIT_TAG)
@echo "Building docker image rancher/k3d-tools:$(GIT_TAG)"
DOCKER_BUILDKIT=1 docker build --no-cache tools/ -f tools/Dockerfile -t rancher/k3d-tools:$(GIT_TAG) --build-arg GIT_TAG=$(GIT_TAG)
##############################
########## Cleaning ##########

View File

@ -4,16 +4,16 @@
[![License](https://img.shields.io/github/license/rancher/k3d?style=flat-square)](./LICENSE.md)
![Downloads](https://img.shields.io/github/downloads/rancher/k3d/total.svg?style=flat-square)
[![Go Module](https://img.shields.io/badge/Go%20Module-github.com%2Francher%2Fk3d%2Fv5-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/rancher/k3d/v5)
[![Go Module](https://img.shields.io/badge/Go%20Module-github.com%2Francher%2Fk3d%2Fv4-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/rancher/k3d/v4)
[![Go version](https://img.shields.io/github/go-mod/go-version/rancher/k3d?logo=go&logoColor=white&style=flat-square)](./go.mod)
[![Go Report Card](https://goreportcard.com/badge/github.com/rancher/k3d?style=flat-square)](https://goreportcard.com/report/github.com/rancher/k3d)
<!-- ALL-CONTRIBUTORS-BADGE:START - Do not remove or modify this section -->
[![All Contributors](https://img.shields.io/badge/all_contributors-14-orange.svg?style=flat-square)](#contributors-)
[![All Contributors](https://img.shields.io/badge/all_contributors-12-orange.svg?style=flat-square)](#contributors-)
<!-- ALL-CONTRIBUTORS-BADGE:END -->
[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg)](code_of_conduct.md)
**Please Note:** `main` is now v5.0.0 and the code for v4.x can be found in the `main-v4` branch!
**Please Note:** `main` is now v4.0.0 and the code for v3.x can be found in the `main-v3` branch!
## [k3s in docker](https://k3d.io)
@ -21,7 +21,7 @@ k3s is the lightweight Kubernetes distribution by Rancher: [rancher/k3s](https:/
k3d creates containerized k3s clusters. This means, that you can spin up a multi-node k3s cluster on a single machine using docker.
[![asciicast](https://asciinema.org/a/436420.svg)](https://asciinema.org/a/436420)
[![asciicast](https://asciinema.org/a/347570.svg)](https://asciinema.org/a/347570)
## Learning
@ -35,9 +35,8 @@ k3d creates containerized k3s clusters. This means, that you can spin up a multi
## Releases
**Note**: In May 2020 we upgraded from v1.7.x to **v3.0.0** after a complete rewrite of k3d!
**Note**: In January 2021 we upgraded from v3.x.x to **v4.0.0** which includes some breaking changes!
**Note**: In September 2021 we upgraded from v4.4.8 to **v5.0.0** which includes some breaking changes!
**Note**: In May 2020 we upgraded from v1.7.x to **v3.0.0** after a complete rewrite of k3d!
**Note**: In January 2021 we upgraded from v3.x.x to **v4.0.0** which includes some breaking changes!
| Platform | Stage | Version | Release Date | |
|-----------------|--------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|---|
@ -54,8 +53,8 @@ You have several options there:
- wget: `wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash`
- curl: `curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash`
- use the install script to grab a specific release (via `TAG` environment variable):
- wget: `wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v5.0.0 bash`
- curl: `curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v5.0.0 bash`
- wget: `wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v4.0.0 bash`
- curl: `curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v4.0.0 bash`
- use [Homebrew](https://brew.sh): `brew install k3d` (Homebrew is available for MacOS and Linux)
- Formula can be found in [homebrew/homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/k3d.rb) and is mirrored to [homebrew/linuxbrew-core](https://github.com/Homebrew/linuxbrew-core/blob/master/Formula/k3d.rb)
@ -70,7 +69,7 @@ or...
## Build
1. Clone this repo, e.g. via `git clone git@github.com:rancher/k3d.git` or `go get github.com/rancher/k3d/v5@main`
1. Clone this repo, e.g. via `git clone git@github.com:rancher/k3d.git` or `go get github.com/rancher/k3d/v4@main`
2. Inside the repo run
- 'make install-tools' to make sure required go packages are installed
3. Inside the repo run one of the following commands
@ -140,8 +139,6 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
<td align="center"><a href="http://wsl.dev"><img src="https://avatars2.githubusercontent.com/u/905874?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Nuno do Carmo</b></sub></a><br /><a href="#content-nunix" title="Content">🖋</a> <a href="#tutorial-nunix" title="Tutorials"></a> <a href="#question-nunix" title="Answering Questions">💬</a></td>
<td align="center"><a href="https://github.com/erwinkersten"><img src="https://avatars0.githubusercontent.com/u/4391121?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Erwin Kersten</b></sub></a><br /><a href="https://github.com/rancher/k3d/commits?author=erwinkersten" title="Documentation">📖</a></td>
<td align="center"><a href="http://www.alexsears.com"><img src="https://avatars.githubusercontent.com/u/3712883?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Alex Sears</b></sub></a><br /><a href="https://github.com/rancher/k3d/commits?author=searsaw" title="Documentation">📖</a></td>
<td align="center"><a href="http://shanduur.github.io"><img src="https://avatars.githubusercontent.com/u/32583062?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Mateusz Urbanek</b></sub></a><br /><a href="https://github.com/rancher/k3d/commits?author=Shanduur" title="Code">💻</a></td>
<td align="center"><a href="https://github.com/benjaminjb"><img src="https://avatars.githubusercontent.com/u/4651855?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Benjamin Blattberg</b></sub></a><br /><a href="https://github.com/rancher/k3d/commits?author=benjaminjb" title="Code">💻</a></td>
</tr>
</table>

View File

@ -22,8 +22,7 @@ THE SOFTWARE.
package cluster
import (
l "github.com/rancher/k3d/v5/pkg/logger"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@ -37,19 +36,19 @@ func NewCmdCluster() *cobra.Command {
Long: `Manage cluster(s)`,
Run: func(cmd *cobra.Command, args []string) {
if err := cmd.Help(); err != nil {
l.Log().Errorln("Couldn't get help text")
l.Log().Fatalln(err)
log.Errorln("Couldn't get help text")
log.Fatalln(err)
}
},
}
// add subcommands
cmd.AddCommand(NewCmdClusterCreate(),
NewCmdClusterStart(),
NewCmdClusterStop(),
NewCmdClusterDelete(),
NewCmdClusterList(),
NewCmdClusterEdit())
cmd.AddCommand(NewCmdClusterCreate())
cmd.AddCommand(NewCmdClusterStart())
cmd.AddCommand(NewCmdClusterStop())
cmd.AddCommand(NewCmdClusterDelete())
cmd.AddCommand(NewCmdClusterList())
cmd.AddCommand(NewCmdClusterEdit())
// add flags

View File

@ -24,28 +24,28 @@ package cluster
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"github.com/docker/go-connections/nat"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"gopkg.in/yaml.v2"
cliutil "github.com/rancher/k3d/v5/cmd/util"
cliconfig "github.com/rancher/k3d/v5/cmd/util/config"
k3dCluster "github.com/rancher/k3d/v5/pkg/client"
"github.com/rancher/k3d/v5/pkg/config"
conf "github.com/rancher/k3d/v5/pkg/config/v1alpha3"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v5/version"
cliutil "github.com/rancher/k3d/v4/cmd/util"
k3dCluster "github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/config"
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/rancher/k3d/v4/version"
log "github.com/sirupsen/logrus"
)
var configFile string
@ -58,30 +58,74 @@ Every cluster will consist of one or more containers:
- (optionally) 1 (or more) agent node containers (k3s)
`
/*
* Viper for configuration handling
* we use two different instances of Viper here to handle
* - cfgViper: "static" configuration
* - ppViper: "pre-processed" configuration, where CLI input has to be pre-processed
* to be treated as part of the SImpleConfig
*/
var (
cfgViper = viper.New()
ppViper = viper.New()
)
var cfgViper = viper.New()
var ppViper = viper.New()
func initConfig() error {
func initConfig() {
// Viper for pre-processed config options
ppViper.SetEnvPrefix("K3D")
if l.Log().GetLevel() >= logrus.DebugLevel {
// viper for the general config (file, env and non pre-processed flags)
cfgViper.SetEnvPrefix("K3D")
cfgViper.AutomaticEnv()
c, _ := yaml.Marshal(ppViper.AllSettings())
l.Log().Debugf("Additional CLI Configuration:\n%s", c)
cfgViper.SetConfigType("yaml")
// Set config file, if specified
if configFile != "" {
if _, err := os.Stat(configFile); err != nil {
log.Fatalf("Failed to stat config file %s: %+v", configFile, err)
}
// create temporary file to expand environment variables in the config without writing that back to the original file
// we're doing it here, because this happens just before absolutely all other processing
tmpfile, err := os.CreateTemp(os.TempDir(), fmt.Sprintf("k3d-config-tmp-%s", filepath.Base(configFile)))
if err != nil {
log.Fatalf("error creating temp copy of configfile %s for variable expansion: %v", configFile, err)
}
defer tmpfile.Close()
originalcontent, err := ioutil.ReadFile(configFile)
if err != nil {
log.Fatalf("error reading config file %s: %v", configFile, err)
}
expandedcontent := os.ExpandEnv(string(originalcontent))
if _, err := tmpfile.WriteString(expandedcontent); err != nil {
log.Fatalf("error writing expanded config file contents to temp file %s: %v", tmpfile.Name(), err)
}
// use temp file with expanded variables
cfgViper.SetConfigFile(tmpfile.Name())
// try to read config into memory (viper map structure)
if err := cfgViper.ReadInConfig(); err != nil {
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
log.Fatalf("Config file %s not found: %+v", configFile, err)
}
// config file found but some other error happened
log.Fatalf("Failed to read config file %s: %+v", configFile, err)
}
schema, err := config.GetSchemaByVersion(cfgViper.GetString("apiVersion"))
if err != nil {
log.Fatalf("Cannot validate config file %s: %+v", configFile, err)
}
if err := config.ValidateSchemaFile(configFile, schema); err != nil {
log.Fatalf("Schema Validation failed for config file %s: %+v", configFile, err)
}
log.Infof("Using config file %s (%s#%s)", configFile, strings.ToLower(cfgViper.GetString("apiVersion")), strings.ToLower(cfgViper.GetString("kind")))
}
if log.GetLevel() >= log.DebugLevel {
c, _ := yaml.Marshal(cfgViper.AllSettings())
log.Debugf("Configuration:\n%s", c)
return cliconfig.InitViperWithConfigFile(cfgViper, configFile)
c, _ = yaml.Marshal(ppViper.AllSettings())
log.Debugf("Additional CLI Configuration:\n%s", c)
}
}
// NewCmdClusterCreate returns a new cobra command
@ -94,7 +138,8 @@ func NewCmdClusterCreate() *cobra.Command {
Long: clusterCreateDescription,
Args: cobra.RangeArgs(0, 1), // exactly one cluster name can be set (default: k3d.DefaultClusterName)
PreRunE: func(cmd *cobra.Command, args []string) error {
return initConfig()
initConfig()
return nil
},
Run: func(cmd *cobra.Command, args []string) {
@ -109,27 +154,27 @@ func NewCmdClusterCreate() *cobra.Command {
}
cfg, err := config.FromViper(cfgViper)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
if cfg.GetAPIVersion() != config.DefaultConfigApiVersion {
l.Log().Warnf("Default config apiVersion is '%s', but you're using '%s': consider migrating.", config.DefaultConfigApiVersion, cfg.GetAPIVersion())
log.Warnf("Default config apiVersion is '%s', but you're using '%s': consider migrating.", config.DefaultConfigApiVersion, cfg.GetAPIVersion())
cfg, err = config.Migrate(cfg, config.DefaultConfigApiVersion)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
}
simpleCfg := cfg.(conf.SimpleConfig)
l.Log().Debugf("========== Simple Config ==========\n%+v\n==========================\n", simpleCfg)
log.Debugf("========== Simple Config ==========\n%+v\n==========================\n", simpleCfg)
simpleCfg, err = applyCLIOverrides(simpleCfg)
if err != nil {
l.Log().Fatalf("Failed to apply CLI overrides: %+v", err)
log.Fatalf("Failed to apply CLI overrides: %+v", err)
}
l.Log().Debugf("========== Merged Simple Config ==========\n%+v\n==========================\n", simpleCfg)
log.Debugf("========== Merged Simple Config ==========\n%+v\n==========================\n", simpleCfg)
/**************************************
* Transform, Process & Validate Configuration *
@ -142,18 +187,18 @@ func NewCmdClusterCreate() *cobra.Command {
clusterConfig, err := config.TransformSimpleToClusterConfig(cmd.Context(), runtimes.SelectedRuntime, simpleCfg)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
l.Log().Debugf("===== Merged Cluster Config =====\n%+v\n===== ===== =====\n", clusterConfig)
log.Debugf("===== Merged Cluster Config =====\n%+v\n===== ===== =====\n", clusterConfig)
clusterConfig, err = config.ProcessClusterConfig(*clusterConfig)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
l.Log().Debugf("===== Processed Cluster Config =====\n%+v\n===== ===== =====\n", clusterConfig)
log.Debugf("===== Processed Cluster Config =====\n%+v\n===== ===== =====\n", clusterConfig)
if err := config.ValidateClusterConfig(cmd.Context(), runtimes.SelectedRuntime, *clusterConfig); err != nil {
l.Log().Fatalln("Failed Cluster Configuration Validation: ", err)
log.Fatalln("Failed Cluster Configuration Validation: ", err)
}
/**************************************
@ -162,44 +207,44 @@ func NewCmdClusterCreate() *cobra.Command {
// check if a cluster with that name exists already
if _, err := k3dCluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster); err == nil {
l.Log().Fatalf("Failed to create cluster '%s' because a cluster with that name already exists", clusterConfig.Cluster.Name)
log.Fatalf("Failed to create cluster '%s' because a cluster with that name already exists", clusterConfig.Cluster.Name)
}
// create cluster
if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig {
l.Log().Debugln("'--kubeconfig-update-default set: enabling wait-for-server")
log.Debugln("'--kubeconfig-update-default set: enabling wait-for-server")
clusterConfig.ClusterCreateOpts.WaitForServer = true
}
//if err := k3dCluster.ClusterCreate(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster, &clusterConfig.ClusterCreateOpts); err != nil {
if err := k3dCluster.ClusterRun(cmd.Context(), runtimes.SelectedRuntime, clusterConfig); err != nil {
// rollback if creation failed
l.Log().Errorln(err)
log.Errorln(err)
if simpleCfg.Options.K3dOptions.NoRollback { // TODO: move rollback mechanics to pkg/
l.Log().Fatalln("Cluster creation FAILED, rollback deactivated.")
log.Fatalln("Cluster creation FAILED, rollback deactivated.")
}
// rollback if creation failed
l.Log().Errorln("Failed to create cluster >>> Rolling Back")
log.Errorln("Failed to create cluster >>> Rolling Back")
if err := k3dCluster.ClusterDelete(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster, k3d.ClusterDeleteOpts{SkipRegistryCheck: true}); err != nil {
l.Log().Errorln(err)
l.Log().Fatalln("Cluster creation FAILED, also FAILED to rollback changes!")
log.Errorln(err)
log.Fatalln("Cluster creation FAILED, also FAILED to rollback changes!")
}
l.Log().Fatalln("Cluster creation FAILED, all changes have been rolled back!")
log.Fatalln("Cluster creation FAILED, all changes have been rolled back!")
}
l.Log().Infof("Cluster '%s' created successfully!", clusterConfig.Cluster.Name)
log.Infof("Cluster '%s' created successfully!", clusterConfig.Cluster.Name)
/**************
* Kubeconfig *
**************/
if !clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig && clusterConfig.KubeconfigOpts.SwitchCurrentContext {
l.Log().Infoln("--kubeconfig-update-default=false --> sets --kubeconfig-switch-context=false")
if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig && clusterConfig.KubeconfigOpts.SwitchCurrentContext {
log.Infoln("--kubeconfig-update-default=false --> sets --kubeconfig-switch-context=false")
clusterConfig.KubeconfigOpts.SwitchCurrentContext = false
}
if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig {
l.Log().Debugf("Updating default kubeconfig with a new context for cluster %s", clusterConfig.Cluster.Name)
log.Debugf("Updating default kubeconfig with a new context for cluster %s", clusterConfig.Cluster.Name)
if _, err := k3dCluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster, "", &k3dCluster.WriteKubeConfigOptions{UpdateExisting: true, OverwriteExisting: false, UpdateCurrentContext: simpleCfg.Options.KubeconfigOptions.SwitchCurrentContext}); err != nil {
l.Log().Warningln(err)
log.Warningln(err)
}
}
@ -208,7 +253,7 @@ func NewCmdClusterCreate() *cobra.Command {
*****************/
// print information on how to use the cluster with kubectl
l.Log().Infoln("You can now use it like this:")
log.Infoln("You can now use it like this:")
if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig && !clusterConfig.KubeconfigOpts.SwitchCurrentContext {
fmt.Printf("kubectl config use-context %s\n", fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, clusterConfig.Cluster.Name))
} else if !clusterConfig.KubeconfigOpts.SwitchCurrentContext {
@ -228,7 +273,7 @@ func NewCmdClusterCreate() *cobra.Command {
cmd.Flags().StringVarP(&configFile, "config", "c", "", "Path of a config file to use")
if err := cmd.MarkFlagFilename("config", "yaml", "yml"); err != nil {
l.Log().Fatalln("Failed to mark flag 'config' as filename flag")
log.Fatalln("Failed to mark flag 'config' as filename flag")
}
/***********************
@ -265,12 +310,9 @@ func NewCmdClusterCreate() *cobra.Command {
cmd.Flags().StringArrayP("runtime-label", "", nil, "Add label to container runtime (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 --runtime-label \"my.label@agent:0,1\" --runtime-label \"other.label=somevalue@server:0\"`")
_ = ppViper.BindPFlag("cli.runtime-labels", cmd.Flags().Lookup("runtime-label"))
cmd.Flags().String("registry-create", "", "Create a k3d-managed registry and connect it to the cluster (Format: `NAME[:HOST][:HOSTPORT]`\n - Example: `k3d cluster create --registry-create mycluster-registry:0.0.0.0:5432`")
_ = ppViper.BindPFlag("cli.registries.create", cmd.Flags().Lookup("registry-create"))
/* k3s */
cmd.Flags().StringArray("k3s-arg", nil, "Additional args passed to k3s command (Format: `ARG@NODEFILTER[;@NODEFILTER]`)\n - Example: `k3d cluster create --k3s-arg \"--disable=traefik@server:0\"")
_ = ppViper.BindPFlag("cli.k3sargs", cmd.Flags().Lookup("k3s-arg"))
_ = cfgViper.BindPFlag("cli.k3sargs", cmd.Flags().Lookup("k3s-arg"))
/******************
* "Normal" Flags *
@ -320,6 +362,9 @@ func NewCmdClusterCreate() *cobra.Command {
cmd.Flags().Bool("no-rollback", false, "Disable the automatic rollback actions, if anything goes wrong")
_ = cfgViper.BindPFlag("options.k3d.disablerollback", cmd.Flags().Lookup("no-rollback"))
cmd.Flags().Bool("no-hostip", false, "Disable the automatic injection of the Host IP as 'host.k3d.internal' into the containers and CoreDNS")
_ = cfgViper.BindPFlag("options.k3d.disablehostipinjection", cmd.Flags().Lookup("no-hostip"))
cmd.Flags().String("gpus", "", "GPU devices to add to the cluster node containers ('all' to pass all GPUs) [From docker]")
_ = cfgViper.BindPFlag("options.runtime.gpurequest", cmd.Flags().Lookup("gpus"))
@ -337,16 +382,15 @@ func NewCmdClusterCreate() *cobra.Command {
cmd.Flags().StringArray("registry-use", nil, "Connect to one or more k3d-managed registries running locally")
_ = cfgViper.BindPFlag("registries.use", cmd.Flags().Lookup("registry-use"))
cmd.Flags().Bool("registry-create", false, "Create a k3d-managed registry and connect it to the cluster")
_ = cfgViper.BindPFlag("registries.create", cmd.Flags().Lookup("registry-create"))
cmd.Flags().String("registry-config", "", "Specify path to an extra registries.yaml file")
_ = cfgViper.BindPFlag("registries.config", cmd.Flags().Lookup("registry-config"))
if err := cmd.MarkFlagFilename("registry-config", "yaml", "yml"); err != nil {
l.Log().Fatalln("Failed to mark flag 'config' as filename flag")
log.Fatalln("Failed to mark flag 'config' as filename flag")
}
/* Loadbalancer / Proxy */
cmd.Flags().StringSlice("lb-config-override", nil, "Use dotted YAML path syntax to override nginx loadbalancer settings")
_ = cfgViper.BindPFlag("options.k3d.loadbalancer.configoverrides", cmd.Flags().Lookup("lb-config-override"))
/* Subcommands */
// done
@ -380,25 +424,20 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
// Overwrite if cli arg is set
if ppViper.IsSet("cli.api-port") {
if cfg.ExposeAPI.HostPort != "" {
l.Log().Debugf("Overriding pre-defined kubeAPI Exposure Spec %+v with CLI argument %s", cfg.ExposeAPI, ppViper.GetString("cli.api-port"))
log.Debugf("Overriding pre-defined kubeAPI Exposure Spec %+v with CLI argument %s", cfg.ExposeAPI, ppViper.GetString("cli.api-port"))
}
exposeAPI, err = cliutil.ParsePortExposureSpec(ppViper.GetString("cli.api-port"), k3d.DefaultAPIPort)
if err != nil {
return cfg, fmt.Errorf("failed to parse API Port spec: %w", err)
return cfg, err
}
}
// Set to random port if port is empty string
if len(exposeAPI.Binding.HostPort) == 0 {
var freePort string
port, err := cliutil.GetFreePort()
freePort = strconv.Itoa(port)
if err != nil || port == 0 {
l.Log().Warnf("Failed to get random free port: %+v", err)
l.Log().Warnf("Falling back to internal port %s (may be blocked though)...", k3d.DefaultAPIPort)
freePort = k3d.DefaultAPIPort
exposeAPI, err = cliutil.ParsePortExposureSpec("random", k3d.DefaultAPIPort)
if err != nil {
return cfg, err
}
exposeAPI.Binding.HostPort = freePort
}
cfg.ExposeAPI = conf.SimpleExposureOpts{
@ -415,11 +454,11 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
// split node filter from the specified volume
volume, filters, err := cliutil.SplitFiltersFromFlag(volumeFlag)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
if strings.Contains(volume, k3d.DefaultRegistriesFilePath) && (cfg.Registries.Create != nil || cfg.Registries.Config != "" || len(cfg.Registries.Use) != 0) {
l.Log().Warnf("Seems like you're mounting a file at '%s' while also using a referenced registries config or k3d-managed registries: Your mounted file will probably be overwritten!", k3d.DefaultRegistriesFilePath)
if strings.Contains(volume, k3d.DefaultRegistriesFilePath) && (cfg.Registries.Create || cfg.Registries.Config != "" || len(cfg.Registries.Use) != 0) {
log.Warnf("Seems like you're mounting a file at '%s' while also using a referenced registries config or k3d-managed registries: Your mounted file will probably be overwritten!", k3d.DefaultRegistriesFilePath)
}
// create new entry or append filter to existing entry
@ -437,7 +476,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
})
}
l.Log().Tracef("VolumeFilterMap: %+v", volumeFilterMap)
log.Tracef("VolumeFilterMap: %+v", volumeFilterMap)
// -> PORTS
portFilterMap := make(map[string][]string, 1)
@ -445,12 +484,12 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
// split node filter from the specified volume
portmap, filters, err := cliutil.SplitFiltersFromFlag(portFlag)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
// create new entry or append filter to existing entry
if _, exists := portFilterMap[portmap]; exists {
l.Log().Fatalln("Same Portmapping can not be used for multiple nodes")
log.Fatalln("Same Portmapping can not be used for multiple nodes")
} else {
portFilterMap[portmap] = filters
}
@ -463,7 +502,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
})
}
l.Log().Tracef("PortFilterMap: %+v", portFilterMap)
log.Tracef("PortFilterMap: %+v", portFilterMap)
// --k3s-node-label
// k3sNodeLabelFilterMap will add k3s node label to applied node filters
@ -473,7 +512,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
// split node filter from the specified label
label, nodeFilters, err := cliutil.SplitFiltersFromFlag(labelFlag)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
// create new entry or append filter to existing entry
@ -491,7 +530,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
})
}
l.Log().Tracef("K3sNodeLabelFilterMap: %+v", k3sNodeLabelFilterMap)
log.Tracef("K3sNodeLabelFilterMap: %+v", k3sNodeLabelFilterMap)
// --runtime-label
// runtimeLabelFilterMap will add container runtime label to applied node filters
@ -501,7 +540,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
// split node filter from the specified label
label, nodeFilters, err := cliutil.SplitFiltersFromFlag(labelFlag)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
cliutil.ValidateRuntimeLabelKey(strings.Split(label, "=")[0])
@ -521,7 +560,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
})
}
l.Log().Tracef("RuntimeLabelFilterMap: %+v", runtimeLabelFilterMap)
log.Tracef("RuntimeLabelFilterMap: %+v", runtimeLabelFilterMap)
// --env
// envFilterMap will add container env vars to applied node filters
@ -531,7 +570,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
// split node filter from the specified env var
env, filters, err := cliutil.SplitFiltersFromFlag(envFlag)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
// create new entry or append filter to existing entry
@ -549,7 +588,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
})
}
l.Log().Tracef("EnvFilterMap: %+v", envFilterMap)
log.Tracef("EnvFilterMap: %+v", envFilterMap)
// --k3s-arg
argFilterMap := make(map[string][]string, 1)
@ -558,7 +597,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
// split node filter from the specified arg
arg, filters, err := cliutil.SplitFiltersFromFlag(argFlag)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
// create new entry or append filter to existing entry
@ -576,24 +615,5 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
})
}
// --registry-create
if ppViper.IsSet("cli.registries.create") {
flagvalue := ppViper.GetString("cli.registries.create")
fvSplit := strings.SplitN(flagvalue, ":", 2)
if cfg.Registries.Create == nil {
cfg.Registries.Create = &conf.SimpleConfigRegistryCreateConfig{}
}
cfg.Registries.Create.Name = fvSplit[0]
if len(fvSplit) > 1 {
exposeAPI, err = cliutil.ParsePortExposureSpec(fvSplit[1], "1234") // internal port is unused after all
if err != nil {
return cfg, fmt.Errorf("failed to registry port spec: %w", err)
}
cfg.Registries.Create.Host = exposeAPI.Host
cfg.Registries.Create.HostPort = exposeAPI.Binding.HostPort
}
}
return cfg, nil
}

View File

@ -26,21 +26,16 @@ import (
"os"
"path"
"github.com/rancher/k3d/v5/cmd/util"
cliconfig "github.com/rancher/k3d/v5/cmd/util/config"
"github.com/rancher/k3d/v5/pkg/client"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
k3d "github.com/rancher/k3d/v5/pkg/types"
k3dutil "github.com/rancher/k3d/v5/pkg/util"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
k3dutil "github.com/rancher/k3d/v4/pkg/util"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var clusterDeleteConfigFile string
var clusterDeleteCfgViper = viper.New()
// NewCmdClusterDelete returns a new cobra command
func NewCmdClusterDelete() *cobra.Command {
@ -52,38 +47,35 @@ func NewCmdClusterDelete() *cobra.Command {
Long: `Delete cluster(s).`,
Args: cobra.MinimumNArgs(0), // 0 or n arguments; 0 = default cluster name
ValidArgsFunction: util.ValidArgsAvailableClusters,
PreRunE: func(cmd *cobra.Command, args []string) error {
return cliconfig.InitViperWithConfigFile(clusterDeleteCfgViper, clusterDeleteConfigFile)
},
Run: func(cmd *cobra.Command, args []string) {
clusters := parseDeleteClusterCmd(cmd, args)
if len(clusters) == 0 {
l.Log().Infoln("No clusters found")
log.Infoln("No clusters found")
} else {
for _, c := range clusters {
if err := client.ClusterDelete(cmd.Context(), runtimes.SelectedRuntime, c, k3d.ClusterDeleteOpts{SkipRegistryCheck: false}); err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
l.Log().Infoln("Removing cluster details from default kubeconfig...")
log.Infoln("Removing cluster details from default kubeconfig...")
if err := client.KubeconfigRemoveClusterFromDefaultConfig(cmd.Context(), c); err != nil {
l.Log().Warnln("Failed to remove cluster details from default kubeconfig")
l.Log().Warnln(err)
log.Warnln("Failed to remove cluster details from default kubeconfig")
log.Warnln(err)
}
l.Log().Infoln("Removing standalone kubeconfig file (if there is one)...")
log.Infoln("Removing standalone kubeconfig file (if there is one)...")
configDir, err := k3dutil.GetConfigDirOrCreate()
if err != nil {
l.Log().Warnf("Failed to delete kubeconfig file: %+v", err)
log.Warnf("Failed to delete kubeconfig file: %+v", err)
} else {
kubeconfigfile := path.Join(configDir, fmt.Sprintf("kubeconfig-%s.yaml", c.Name))
if err := os.Remove(kubeconfigfile); err != nil {
if !os.IsNotExist(err) {
l.Log().Warnf("Failed to delete kubeconfig file '%s'", kubeconfigfile)
log.Warnf("Failed to delete kubeconfig file '%s'", kubeconfigfile)
}
}
}
l.Log().Infof("Successfully deleted cluster %s!", c.Name)
log.Infof("Successfully deleted cluster %s!", c.Name)
}
}
@ -95,15 +87,6 @@ func NewCmdClusterDelete() *cobra.Command {
// add flags
cmd.Flags().BoolP("all", "a", false, "Delete all existing clusters")
/***************
* Config File *
***************/
cmd.Flags().StringVarP(&clusterDeleteConfigFile, "config", "c", "", "Path of a config file to use")
if err := cmd.MarkFlagFilename("config", "yaml", "yml"); err != nil {
l.Log().Fatalln("Failed to mark flag 'config' as filename flag")
}
// done
return cmd
}
@ -111,45 +94,20 @@ func NewCmdClusterDelete() *cobra.Command {
// parseDeleteClusterCmd parses the command input into variables required to delete clusters
func parseDeleteClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
// --all
var clusters []*k3d.Cluster
// --all
all, err := cmd.Flags().GetBool("all")
if err != nil {
l.Log().Fatalln(err)
}
// --config
if clusterDeleteConfigFile != "" {
// not allowed with --all or more args
if len(args) > 0 || all {
l.Log().Fatalln("failed to delete cluster: cannot use `--config` flag with additional arguments or `--all`")
}
if clusterDeleteCfgViper.GetString("name") == "" {
l.Log().Fatalln("failed to delete cluster via config file: no name in config file")
}
c, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterDeleteCfgViper.GetString("name")})
if err != nil {
l.Log().Fatalf("failed to delete cluster '%s': %v", clusterDeleteCfgViper.GetString("name"), err)
}
clusters = append(clusters, c)
return clusters
}
// --all was set
if all {
l.Log().Infoln("Deleting all clusters...")
if all, err := cmd.Flags().GetBool("all"); err != nil {
log.Fatalln(err)
} else if all {
log.Infoln("Deleting all clusters...")
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
return clusters
}
// args only
clusternames := []string{k3d.DefaultClusterName}
if len(args) != 0 {
clusternames = args
@ -161,7 +119,7 @@ func parseDeleteClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
if err == client.ClusterGetNoNodesFoundError {
continue
}
l.Log().Fatalln(err)
log.Fatalln(err)
}
clusters = append(clusters, c)
}

View File

@ -22,13 +22,13 @@ THE SOFTWARE.
package cluster
import (
"github.com/rancher/k3d/v5/cmd/util"
cliutil "github.com/rancher/k3d/v5/cmd/util"
"github.com/rancher/k3d/v5/pkg/client"
conf "github.com/rancher/k3d/v5/pkg/config/v1alpha3"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v4/cmd/util"
cliutil "github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/client"
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@ -47,13 +47,13 @@ func NewCmdClusterEdit() *cobra.Command {
existingCluster, changeset := parseEditClusterCmd(cmd, args)
l.Log().Debugf("===== Current =====\n%+v\n===== Changeset =====\n%+v\n", existingCluster, changeset)
log.Debugf("===== Current =====\n%+v\n===== Changeset =====\n%+v\n", existingCluster, changeset)
if err := client.ClusterEditChangesetSimple(cmd.Context(), runtimes.SelectedRuntime, existingCluster, changeset); err != nil {
l.Log().Fatalf("Failed to update the cluster: %v", err)
log.Fatalf("Failed to update the cluster: %v", err)
}
l.Log().Infof("Successfully updated %s", existingCluster.Name)
log.Infof("Successfully updated %s", existingCluster.Name)
},
}
@ -72,11 +72,11 @@ func parseEditClusterCmd(cmd *cobra.Command, args []string) (*k3d.Cluster, *conf
existingCluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: args[0]})
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
if existingCluster == nil {
l.Log().Infof("Cluster %s not found", args[0])
log.Infof("Cluster %s not found", args[0])
return nil, nil
}
@ -87,7 +87,7 @@ func parseEditClusterCmd(cmd *cobra.Command, args []string) (*k3d.Cluster, *conf
*/
portFlags, err := cmd.Flags().GetStringArray("port-add")
if err != nil {
l.Log().Errorln(err)
log.Errorln(err)
return nil, nil
}
@ -100,12 +100,12 @@ func parseEditClusterCmd(cmd *cobra.Command, args []string) (*k3d.Cluster, *conf
// split node filter from the specified volume
portmap, filters, err := cliutil.SplitFiltersFromFlag(portFlag)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
// create new entry or append filter to existing entry
if _, exists := portFilterMap[portmap]; exists {
l.Log().Fatalln("Same Portmapping can not be used for multiple nodes")
log.Fatalln("Same Portmapping can not be used for multiple nodes")
} else {
portFilterMap[portmap] = filters
}
@ -118,7 +118,7 @@ func parseEditClusterCmd(cmd *cobra.Command, args []string) (*k3d.Cluster, *conf
})
}
l.Log().Tracef("PortFilterMap: %+v", portFilterMap)
log.Tracef("PortFilterMap: %+v", portFilterMap)
return existingCluster, &changeset
}

View File

@ -28,14 +28,15 @@ import (
"os"
"strings"
"github.com/rancher/k3d/v5/cmd/util"
k3cluster "github.com/rancher/k3d/v5/pkg/client"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v4/cmd/util"
k3cluster "github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
log "github.com/sirupsen/logrus"
"github.com/liggitt/tabwriter"
)
@ -82,14 +83,14 @@ func buildClusterList(ctx context.Context, args []string) []*k3d.Cluster {
// cluster name not specified : get all clusters
clusters, err = k3cluster.ClusterList(ctx, runtimes.SelectedRuntime)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
} else {
for _, clusterName := range args {
// cluster name specified : get specific cluster
retrievedCluster, err := k3cluster.ClusterGet(ctx, runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
clusters = append(clusters, retrievedCluster)
}
@ -125,7 +126,7 @@ func PrintClusters(clusters []*k3d.Cluster, flags clusterFlags) {
}
_, err := fmt.Fprintf(tabwriter, "%s\n", strings.Join(headers, "\t"))
if err != nil {
l.Log().Fatalln("Failed to print headers")
log.Fatalln("Failed to print headers")
}
}
}

View File

@ -24,22 +24,21 @@ package cluster
import (
"time"
"github.com/rancher/k3d/v5/cmd/util"
"github.com/rancher/k3d/v5/pkg/client"
"github.com/rancher/k3d/v5/pkg/runtimes"
"github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
"github.com/rancher/k3d/v4/pkg/types"
"github.com/spf13/cobra"
l "github.com/rancher/k3d/v5/pkg/logger"
k3d "github.com/rancher/k3d/v5/pkg/types"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
)
// NewCmdClusterStart returns a new cobra command
func NewCmdClusterStart() *cobra.Command {
startClusterOpts := types.ClusterStartOpts{
Intent: k3d.IntentClusterStart,
}
startClusterOpts := types.ClusterStartOpts{}
// create new command
cmd := &cobra.Command{
@ -50,18 +49,12 @@ func NewCmdClusterStart() *cobra.Command {
Run: func(cmd *cobra.Command, args []string) {
clusters := parseStartClusterCmd(cmd, args)
if len(clusters) == 0 {
l.Log().Infoln("No clusters found")
log.Infoln("No clusters found")
} else {
for _, c := range clusters {
envInfo, err := client.GatherEnvironmentInfo(cmd.Context(), runtimes.SelectedRuntime, c)
if err != nil {
l.Log().Fatalf("failed to gather info about cluster environment: %v", err)
}
startClusterOpts.EnvironmentInfo = envInfo
if err := client.ClusterStart(cmd.Context(), runtimes.SelectedRuntime, c, startClusterOpts); err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
l.Log().Infof("Started cluster '%s'", c.Name)
}
}
},
@ -84,11 +77,11 @@ func parseStartClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
var clusters []*k3d.Cluster
if all, err := cmd.Flags().GetBool("all"); err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
} else if all {
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
return clusters
}
@ -101,7 +94,7 @@ func parseStartClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
for _, name := range clusternames {
cluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
clusters = append(clusters, cluster)
}

View File

@ -24,11 +24,12 @@ package cluster
import (
"github.com/spf13/cobra"
"github.com/rancher/k3d/v5/cmd/util"
"github.com/rancher/k3d/v5/pkg/client"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
)
// NewCmdClusterStop returns a new cobra command
@ -43,11 +44,11 @@ func NewCmdClusterStop() *cobra.Command {
Run: func(cmd *cobra.Command, args []string) {
clusters := parseStopClusterCmd(cmd, args)
if len(clusters) == 0 {
l.Log().Infoln("No clusters found")
log.Infoln("No clusters found")
} else {
for _, c := range clusters {
if err := client.ClusterStop(cmd.Context(), runtimes.SelectedRuntime, c); err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
}
}
@ -69,11 +70,11 @@ func parseStopClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
var clusters []*k3d.Cluster
if all, err := cmd.Flags().GetBool("all"); err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
} else if all {
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
return clusters
}
@ -86,7 +87,7 @@ func parseStopClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
for _, name := range clusternames {
cluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
clusters = append(clusters, cluster)
}

View File

@ -22,8 +22,7 @@ THE SOFTWARE.
package config
import (
l "github.com/rancher/k3d/v5/pkg/logger"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@ -35,13 +34,14 @@ func NewCmdConfig() *cobra.Command {
Long: `Work with config file(s)`,
Run: func(cmd *cobra.Command, args []string) {
if err := cmd.Help(); err != nil {
l.Log().Errorln("Couldn't get help text")
l.Log().Fatalln(err)
log.Errorln("Couldn't get help text")
log.Fatalln(err)
}
},
}
cmd.AddCommand(NewCmdConfigInit(), NewCmdConfigMigrate())
cmd.AddCommand(NewCmdConfigInit())
cmd.AddCommand(NewCmdConfigMigrate())
return cmd
}

View File

@ -25,8 +25,8 @@ import (
"fmt"
"os"
config "github.com/rancher/k3d/v5/pkg/config/v1alpha3"
l "github.com/rancher/k3d/v5/pkg/logger"
config "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@ -39,7 +39,7 @@ func NewCmdConfigInit() *cobra.Command {
Use: "init",
Aliases: []string{"create"},
Run: func(cmd *cobra.Command, args []string) {
l.Log().Infoln("COMING SOON: print a basic k3d config with default pre-filled.")
log.Infoln("COMING SOON: print a basic k3d config with default pre-filled.")
if output == "-" {
fmt.Println(config.DefaultConfig)
} else {
@ -51,16 +51,16 @@ func NewCmdConfigInit() *cobra.Command {
// create/overwrite file
file, err = os.Create(output)
if err != nil {
l.Log().Fatalf("Failed to create/overwrite output file: %s", err)
log.Fatalf("Failed to create/overwrite output file: %s", err)
}
// write content
if _, err = file.WriteString(config.DefaultConfig); err != nil {
l.Log().Fatalf("Failed to write to output file: %+v", err)
log.Fatalf("Failed to write to output file: %+v", err)
}
} else if err != nil {
l.Log().Fatalf("Failed to stat output file: %+v", err)
log.Fatalf("Failed to stat output file: %+v", err)
} else {
l.Log().Errorln("Output file exists and --force was not set")
log.Errorln("Output file exists and --force was not set")
os.Exit(1)
}
}
@ -69,7 +69,7 @@ func NewCmdConfigInit() *cobra.Command {
cmd.Flags().StringVarP(&output, "output", "o", "k3d-default.yaml", "Write a default k3d config")
if err := cmd.MarkFlagFilename("output", "yaml", "yml"); err != nil {
l.Log().Fatalf("Failed to mark flag 'output' as filename flag: %v", err)
log.Fatalf("Failed to mark flag 'output' as filename flag: %v", err)
}
cmd.Flags().BoolVarP(&force, "force", "f", false, "Force overwrite of target file")

View File

@ -25,8 +25,8 @@ import (
"os"
"strings"
"github.com/rancher/k3d/v5/pkg/config"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v4/pkg/config"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"gopkg.in/yaml.v2"
@ -44,7 +44,7 @@ func NewCmdConfigMigrate() *cobra.Command {
configFile := args[0]
if _, err := os.Stat(configFile); err != nil {
l.Log().Fatalf("Failed to stat config file %s: %+v", configFile, err)
log.Fatalf("Failed to stat config file %s: %+v", configFile, err)
}
cfgViper := viper.New()
@ -55,38 +55,38 @@ func NewCmdConfigMigrate() *cobra.Command {
// try to read config into memory (viper map structure)
if err := cfgViper.ReadInConfig(); err != nil {
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
l.Log().Fatalf("Config file %s not found: %+v", configFile, err)
log.Fatalf("Config file %s not found: %+v", configFile, err)
}
// config file found but some other error happened
l.Log().Fatalf("Failed to read config file %s: %+v", configFile, err)
log.Fatalf("Failed to read config file %s: %+v", configFile, err)
}
schema, err := config.GetSchemaByVersion(cfgViper.GetString("apiVersion"))
if err != nil {
l.Log().Fatalf("Cannot validate config file %s: %+v", configFile, err)
log.Fatalf("Cannot validate config file %s: %+v", configFile, err)
}
if err := config.ValidateSchemaFile(configFile, schema); err != nil {
l.Log().Fatalf("Schema Validation failed for config file %s: %+v", configFile, err)
log.Fatalf("Schema Validation failed for config file %s: %+v", configFile, err)
}
l.Log().Infof("Using config file %s (%s#%s)", cfgViper.ConfigFileUsed(), strings.ToLower(cfgViper.GetString("apiVersion")), strings.ToLower(cfgViper.GetString("kind")))
log.Infof("Using config file %s (%s#%s)", cfgViper.ConfigFileUsed(), strings.ToLower(cfgViper.GetString("apiVersion")), strings.ToLower(cfgViper.GetString("kind")))
cfg, err := config.FromViper(cfgViper)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
if cfg.GetAPIVersion() != config.DefaultConfigApiVersion {
cfg, err = config.Migrate(cfg, config.DefaultConfigApiVersion)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
}
yamlout, err := yaml.Marshal(cfg)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
output := "-"
@ -97,11 +97,11 @@ func NewCmdConfigMigrate() *cobra.Command {
if output == "-" {
if _, err := os.Stdout.Write(yamlout); err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
} else {
if err := os.WriteFile(output, yamlout, os.ModePerm); err != nil {
l.Log().Fatalln(err)
if err := os.WriteFile(output, yamlout, os.ModeAppend); err != nil {
log.Fatalln(err)
}
}

View File

@ -24,11 +24,11 @@ package debug
import (
"fmt"
"github.com/rancher/k3d/v5/cmd/util"
"github.com/rancher/k3d/v5/pkg/client"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
"github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
"github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
)
@ -42,8 +42,8 @@ func NewCmdDebug() *cobra.Command {
Long: `Debug k3d cluster(s)`,
Run: func(cmd *cobra.Command, args []string) {
if err := cmd.Help(); err != nil {
l.Log().Errorln("Couldn't get help text")
l.Log().Fatalln(err)
log.Errorln("Couldn't get help text")
log.Fatalln(err)
}
},
}
@ -61,8 +61,8 @@ func NewCmdDebugLoadbalancer() *cobra.Command {
Long: `Debug the loadbalancer`,
Run: func(cmd *cobra.Command, args []string) {
if err := cmd.Help(); err != nil {
l.Log().Errorln("Couldn't get help text")
l.Log().Fatalln(err)
log.Errorln("Couldn't get help text")
log.Fatalln(err)
}
},
}
@ -74,16 +74,16 @@ func NewCmdDebugLoadbalancer() *cobra.Command {
Run: func(cmd *cobra.Command, args []string) {
c, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &types.Cluster{Name: args[0]})
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
lbconf, err := client.GetLoadbalancerConfig(cmd.Context(), runtimes.SelectedRuntime, c)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
yamlized, err := yaml.Marshal(lbconf)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
fmt.Println(string(yamlized))
},

View File

@ -22,7 +22,7 @@ THE SOFTWARE.
package image
import (
l "github.com/rancher/k3d/v5/pkg/logger"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@ -37,8 +37,8 @@ func NewCmdImage() *cobra.Command {
Long: `Handle container images.`,
Run: func(cmd *cobra.Command, args []string) {
if err := cmd.Help(); err != nil {
l.Log().Errorln("Couldn't get help text")
l.Log().Fatalln(err)
log.Errorln("Couldn't get help text")
log.Fatalln(err)
}
},
}

View File

@ -26,11 +26,12 @@ import (
"github.com/spf13/cobra"
"github.com/rancher/k3d/v5/cmd/util"
"github.com/rancher/k3d/v5/pkg/client"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/runtimes"
"github.com/rancher/k3d/v4/pkg/tools"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
)
// NewCmdImageImport returns a new cobra command
@ -59,20 +60,20 @@ So if a file './rancher/k3d-tools' exists, k3d will try to import it instead of
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
images, clusters := parseLoadImageCmd(cmd, args)
l.Log().Debugf("Importing image(s) [%+v] from runtime [%s] into cluster(s) [%+v]...", images, runtimes.SelectedRuntime, clusters)
log.Debugf("Importing image(s) [%+v] from runtime [%s] into cluster(s) [%+v]...", images, runtimes.SelectedRuntime, clusters)
errOccured := false
for _, cluster := range clusters {
l.Log().Infof("Importing image(s) into cluster '%s'", cluster.Name)
if err := client.ImageImportIntoClusterMulti(cmd.Context(), runtimes.SelectedRuntime, images, &cluster, loadImageOpts); err != nil {
l.Log().Errorf("Failed to import image(s) into cluster '%s': %+v", cluster.Name, err)
log.Infof("Importing image(s) into cluster '%s'", cluster.Name)
if err := tools.ImageImportIntoClusterMulti(cmd.Context(), runtimes.SelectedRuntime, images, &cluster, loadImageOpts); err != nil {
log.Errorf("Failed to import image(s) into cluster '%s': %+v", cluster.Name, err)
errOccured = true
}
}
if errOccured {
l.Log().Warnln("At least one error occured while trying to import the image(s) into the selected cluster(s)")
log.Warnln("At least one error occured while trying to import the image(s) into the selected cluster(s)")
os.Exit(1)
}
l.Log().Infof("Successfully imported %d image(s) into %d cluster(s)", len(images), len(clusters))
log.Infof("Successfully imported %d image(s) into %d cluster(s)", len(images), len(clusters))
},
}
@ -81,7 +82,7 @@ So if a file './rancher/k3d-tools' exists, k3d will try to import it instead of
*********/
cmd.Flags().StringArrayP("cluster", "c", []string{k3d.DefaultClusterName}, "Select clusters to load the image to.")
if err := cmd.RegisterFlagCompletionFunc("cluster", util.ValidArgsAvailableClusters); err != nil {
l.Log().Fatalln("Failed to register flag completion for '--cluster'", err)
log.Fatalln("Failed to register flag completion for '--cluster'", err)
}
cmd.Flags().BoolVarP(&loadImageOpts.KeepTar, "keep-tarball", "k", false, "Do not delete the tarball containing the saved images from the shared volume")
@ -99,7 +100,7 @@ func parseLoadImageCmd(cmd *cobra.Command, args []string) ([]string, []k3d.Clust
// --cluster
clusterNames, err := cmd.Flags().GetStringArray("cluster")
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
clusters := []k3d.Cluster{}
for _, clusterName := range clusterNames {
@ -109,7 +110,7 @@ func parseLoadImageCmd(cmd *cobra.Command, args []string) ([]string, []k3d.Clust
// images
images := args
if len(images) == 0 {
l.Log().Fatalln("No images specified!")
log.Fatalln("No images specified!")
}
return images, clusters

View File

@ -22,7 +22,7 @@ THE SOFTWARE.
package kubeconfig
import (
l "github.com/rancher/k3d/v5/pkg/logger"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@ -36,14 +36,15 @@ func NewCmdKubeconfig() *cobra.Command {
Long: `Manage kubeconfig(s)`,
Run: func(cmd *cobra.Command, args []string) {
if err := cmd.Help(); err != nil {
l.Log().Errorln("Couldn't get help text")
l.Log().Fatalln(err)
log.Errorln("Couldn't get help text")
log.Fatalln(err)
}
},
}
// add subcommands
cmd.AddCommand(NewCmdKubeconfigGet(), NewCmdKubeconfigMerge())
cmd.AddCommand(NewCmdKubeconfigGet())
cmd.AddCommand(NewCmdKubeconfigMerge())
// add flags

View File

@ -25,12 +25,13 @@ import (
"fmt"
"os"
"github.com/rancher/k3d/v5/cmd/util"
"github.com/rancher/k3d/v5/pkg/client"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/spf13/cobra"
log "github.com/sirupsen/logrus"
)
type getKubeconfigFlags struct {
@ -69,13 +70,13 @@ func NewCmdKubeconfigGet() *cobra.Command {
if getKubeconfigFlags.all {
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
} else {
for _, clusterName := range args {
retrievedCluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
clusters = append(clusters, retrievedCluster)
}
@ -84,10 +85,10 @@ func NewCmdKubeconfigGet() *cobra.Command {
// get kubeconfigs from all clusters
errorGettingKubeconfig := false
for _, c := range clusters {
l.Log().Debugf("Getting kubeconfig for cluster '%s'", c.Name)
log.Debugf("Getting kubeconfig for cluster '%s'", c.Name)
fmt.Println("---") // YAML document separator
if _, err := client.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, c, "-", &writeKubeConfigOptions); err != nil {
l.Log().Errorln(err)
log.Errorln(err)
errorGettingKubeconfig = true
}
}

View File

@ -27,14 +27,15 @@ import (
"path"
"strings"
"github.com/rancher/k3d/v5/cmd/util"
"github.com/rancher/k3d/v5/pkg/client"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
k3d "github.com/rancher/k3d/v5/pkg/types"
k3dutil "github.com/rancher/k3d/v5/pkg/util"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
k3dutil "github.com/rancher/k3d/v4/pkg/util"
"github.com/spf13/cobra"
"k8s.io/client-go/tools/clientcmd"
log "github.com/sirupsen/logrus"
)
type mergeKubeconfigFlags struct {
@ -63,14 +64,14 @@ func NewCmdKubeconfigMerge() *cobra.Command {
var err error
if mergeKubeconfigFlags.targetDefault && mergeKubeconfigFlags.output != "" {
l.Log().Fatalln("Cannot use both '--output' and '--kubeconfig-merge-default' at the same time")
log.Fatalln("Cannot use both '--output' and '--kubeconfig-merge-default' at the same time")
}
// generate list of clusters
if mergeKubeconfigFlags.all {
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
} else {
@ -82,7 +83,7 @@ func NewCmdKubeconfigMerge() *cobra.Command {
for _, clusterName := range clusternames {
retrievedCluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
clusters = append(clusters, retrievedCluster)
}
@ -93,18 +94,18 @@ func NewCmdKubeconfigMerge() *cobra.Command {
var outputs []string
outputDir, err := k3dutil.GetConfigDirOrCreate()
if err != nil {
l.Log().Errorln(err)
l.Log().Fatalln("Failed to save kubeconfig to local directory")
log.Errorln(err)
log.Fatalln("Failed to save kubeconfig to local directory")
}
for _, c := range clusters {
l.Log().Debugf("Getting kubeconfig for cluster '%s'", c.Name)
log.Debugf("Getting kubeconfig for cluster '%s'", c.Name)
output := mergeKubeconfigFlags.output
if output == "" && !mergeKubeconfigFlags.targetDefault {
output = path.Join(outputDir, fmt.Sprintf("kubeconfig-%s.yaml", c.Name))
}
output, err = client.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, c, output, &writeKubeConfigOptions)
if err != nil {
l.Log().Errorln(err)
log.Errorln(err)
errorGettingKubeconfig = true
} else {
outputs = append(outputs, output)
@ -126,7 +127,7 @@ func NewCmdKubeconfigMerge() *cobra.Command {
// add flags
cmd.Flags().StringVarP(&mergeKubeconfigFlags.output, "output", "o", "", fmt.Sprintf("Define output [ - | FILE ] (default from $KUBECONFIG or %s", clientcmd.RecommendedHomeFile))
if err := cmd.MarkFlagFilename("output"); err != nil {
l.Log().Fatalln("Failed to mark flag --output as filename")
log.Fatalln("Failed to mark flag --output as filename")
}
cmd.Flags().BoolVarP(&mergeKubeconfigFlags.targetDefault, "kubeconfig-merge-default", "d", false, fmt.Sprintf("Merge into the default kubeconfig ($KUBECONFIG or %s)", clientcmd.RecommendedHomeFile))
cmd.Flags().BoolVarP(&writeKubeConfigOptions.UpdateExisting, "update", "u", true, "Update conflicting fields in existing kubeconfig")

View File

@ -22,7 +22,7 @@ THE SOFTWARE.
package node
import (
l "github.com/rancher/k3d/v5/pkg/logger"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@ -36,19 +36,19 @@ func NewCmdNode() *cobra.Command {
Long: `Manage node(s)`,
Run: func(cmd *cobra.Command, args []string) {
if err := cmd.Help(); err != nil {
l.Log().Errorln("Couldn't get help text")
l.Log().Fatalln(err)
log.Errorln("Couldn't get help text")
log.Fatalln(err)
}
},
}
// add subcommands
cmd.AddCommand(NewCmdNodeCreate(),
NewCmdNodeStart(),
NewCmdNodeStop(),
NewCmdNodeDelete(),
NewCmdNodeList(),
NewCmdNodeEdit())
cmd.AddCommand(NewCmdNodeCreate())
cmd.AddCommand(NewCmdNodeStart())
cmd.AddCommand(NewCmdNodeStop())
cmd.AddCommand(NewCmdNodeDelete())
cmd.AddCommand(NewCmdNodeList())
cmd.AddCommand(NewCmdNodeEdit())
// add flags

View File

@ -29,13 +29,13 @@ import (
"github.com/spf13/cobra"
dockerunits "github.com/docker/go-units"
"github.com/rancher/k3d/v5/cmd/util"
cliutil "github.com/rancher/k3d/v5/cmd/util"
k3dc "github.com/rancher/k3d/v5/pkg/client"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v5/version"
"github.com/rancher/k3d/v4/cmd/util"
cliutil "github.com/rancher/k3d/v4/cmd/util"
k3dc "github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/rancher/k3d/v4/version"
log "github.com/sirupsen/logrus"
)
// NewCmdNodeCreate returns a new cobra command
@ -50,19 +50,12 @@ func NewCmdNodeCreate() *cobra.Command {
Long: `Create a new containerized k3s node (k3s in docker).`,
Args: cobra.ExactArgs(1), // exactly one name accepted // TODO: if not specified, inherit from cluster that the node shall belong to, if that is specified
Run: func(cmd *cobra.Command, args []string) {
nodes, clusterName := parseCreateNodeCmd(cmd, args)
if strings.HasPrefix(clusterName, "https://") {
l.Log().Infof("Adding %d node(s) to the remote cluster '%s'...", len(nodes), clusterName)
if err := k3dc.NodeAddToClusterMultiRemote(cmd.Context(), runtimes.SelectedRuntime, nodes, clusterName, createNodeOpts); err != nil {
l.Log().Fatalf("failed to add %d node(s) to the remote cluster '%s': %v", len(nodes), clusterName, err)
}
} else {
l.Log().Infof("Adding %d node(s) to the runtime local cluster '%s'...", len(nodes), clusterName)
if err := k3dc.NodeAddToClusterMulti(cmd.Context(), runtimes.SelectedRuntime, nodes, &k3d.Cluster{Name: clusterName}, createNodeOpts); err != nil {
l.Log().Fatalf("failed to add %d node(s) to the runtime local cluster '%s': %v", len(nodes), clusterName, err)
}
nodes, cluster := parseCreateNodeCmd(cmd, args)
if err := k3dc.NodeAddToClusterMulti(cmd.Context(), runtimes.SelectedRuntime, nodes, cluster, createNodeOpts); err != nil {
log.Errorf("Failed to add nodes to cluster '%s'", cluster.Name)
log.Fatalln(err)
}
l.Log().Infof("Successfully created %d node(s)!", len(nodes))
log.Infof("Successfully created %d node(s)!", len(nodes))
},
}
@ -70,11 +63,11 @@ func NewCmdNodeCreate() *cobra.Command {
cmd.Flags().Int("replicas", 1, "Number of replicas of this node specification.")
cmd.Flags().String("role", string(k3d.AgentRole), "Specify node role [server, agent]")
if err := cmd.RegisterFlagCompletionFunc("role", util.ValidArgsNodeRoles); err != nil {
l.Log().Fatalln("Failed to register flag completion for '--role'", err)
log.Fatalln("Failed to register flag completion for '--role'", err)
}
cmd.Flags().StringP("cluster", "c", k3d.DefaultClusterName, "Cluster URL or k3d cluster name to connect to.")
cmd.Flags().StringP("cluster", "c", k3d.DefaultClusterName, "Select the cluster that the node shall connect to.")
if err := cmd.RegisterFlagCompletionFunc("cluster", util.ValidArgsAvailableClusters); err != nil {
l.Log().Fatalln("Failed to register flag completion for '--cluster'", err)
log.Fatalln("Failed to register flag completion for '--cluster'", err)
}
cmd.Flags().StringP("image", "i", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), "Specify k3s image used for the node(s)")
@ -86,70 +79,69 @@ func NewCmdNodeCreate() *cobra.Command {
cmd.Flags().StringSliceP("runtime-label", "", []string{}, "Specify container runtime labels in format \"foo=bar\"")
cmd.Flags().StringSliceP("k3s-node-label", "", []string{}, "Specify k3s node labels in format \"foo=bar\"")
cmd.Flags().StringSliceP("network", "n", []string{}, "Add node to (another) runtime network")
cmd.Flags().StringVarP(&createNodeOpts.ClusterToken, "token", "t", "", "Override cluster token (required when connecting to an external cluster)")
// done
return cmd
}
// parseCreateNodeCmd parses the command input into variables required to create a node
func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, string) {
// parseCreateNodeCmd parses the command input into variables required to create a cluster
func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, *k3d.Cluster) {
// --replicas
replicas, err := cmd.Flags().GetInt("replicas")
if err != nil {
l.Log().Errorln("No replica count specified")
l.Log().Fatalln(err)
log.Errorln("No replica count specified")
log.Fatalln(err)
}
// --role
roleStr, err := cmd.Flags().GetString("role")
if err != nil {
l.Log().Errorln("No node role specified")
l.Log().Fatalln(err)
log.Errorln("No node role specified")
log.Fatalln(err)
}
if _, ok := k3d.NodeRoles[roleStr]; !ok {
l.Log().Fatalf("Unknown node role '%s'\n", roleStr)
log.Fatalf("Unknown node role '%s'\n", roleStr)
}
role := k3d.NodeRoles[roleStr]
// --image
image, err := cmd.Flags().GetString("image")
if err != nil {
l.Log().Errorln("No image specified")
l.Log().Fatalln(err)
log.Errorln("No image specified")
log.Fatalln(err)
}
// --cluster
clusterName, err := cmd.Flags().GetString("cluster")
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
cluster := &k3d.Cluster{
Name: clusterName,
}
// --memory
memory, err := cmd.Flags().GetString("memory")
if err != nil {
l.Log().Errorln("No memory specified")
l.Log().Fatalln(err)
log.Errorln("No memory specified")
log.Fatalln(err)
}
if _, err := dockerunits.RAMInBytes(memory); memory != "" && err != nil {
l.Log().Errorf("Provided memory limit value is invalid")
log.Errorf("Provided memory limit value is invalid")
}
// --runtime-label
runtimeLabelsFlag, err := cmd.Flags().GetStringSlice("runtime-label")
if err != nil {
l.Log().Errorln("No runtime-label specified")
l.Log().Fatalln(err)
log.Errorln("No runtime-label specified")
log.Fatalln(err)
}
runtimeLabels := make(map[string]string, len(runtimeLabelsFlag)+1)
for _, label := range runtimeLabelsFlag {
labelSplitted := strings.Split(label, "=")
if len(labelSplitted) != 2 {
l.Log().Fatalf("unknown runtime-label format format: %s, use format \"foo=bar\"", label)
log.Fatalf("unknown runtime-label format format: %s, use format \"foo=bar\"", label)
}
cliutil.ValidateRuntimeLabelKey(labelSplitted[0])
runtimeLabels[labelSplitted[0]] = labelSplitted[1]
@ -161,25 +153,19 @@ func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, string)
// --k3s-node-label
k3sNodeLabelsFlag, err := cmd.Flags().GetStringSlice("k3s-node-label")
if err != nil {
l.Log().Errorln("No k3s-node-label specified")
l.Log().Fatalln(err)
log.Errorln("No k3s-node-label specified")
log.Fatalln(err)
}
k3sNodeLabels := make(map[string]string, len(k3sNodeLabelsFlag))
for _, label := range k3sNodeLabelsFlag {
labelSplitted := strings.Split(label, "=")
if len(labelSplitted) != 2 {
l.Log().Fatalf("unknown k3s-node-label format format: %s, use format \"foo=bar\"", label)
log.Fatalf("unknown k3s-node-label format format: %s, use format \"foo=bar\"", label)
}
k3sNodeLabels[labelSplitted[0]] = labelSplitted[1]
}
// --network
networks, err := cmd.Flags().GetStringSlice("network")
if err != nil {
l.Log().Fatalf("failed to get --network string slice flag: %v", err)
}
// generate list of nodes
nodes := []*k3d.Node{}
for i := 0; i < replicas; i++ {
@ -191,10 +177,9 @@ func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, string)
RuntimeLabels: runtimeLabels,
Restart: true,
Memory: memory,
Networks: networks,
}
nodes = append(nodes, node)
}
return nodes, clusterName
return nodes, cluster
}

View File

@ -22,11 +22,11 @@ THE SOFTWARE.
package node
import (
"github.com/rancher/k3d/v5/cmd/util"
"github.com/rancher/k3d/v5/pkg/client"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@ -52,14 +52,14 @@ func NewCmdNodeDelete() *cobra.Command {
nodeDeleteOpts := k3d.NodeDeleteOpts{SkipLBUpdate: flags.All} // do not update LB, if we're deleting all nodes anyway
if len(nodes) == 0 {
l.Log().Infoln("No nodes found")
log.Infoln("No nodes found")
} else {
for _, node := range nodes {
if err := client.NodeDelete(cmd.Context(), runtimes.SelectedRuntime, node, nodeDeleteOpts); err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
}
l.Log().Infof("Successfully deleted %d node(s)!", len(nodes))
log.Infof("Successfully deleted %d node(s)!", len(nodes))
}
},
}
@ -83,11 +83,11 @@ func parseDeleteNodeCmd(cmd *cobra.Command, args []string, flags *nodeDeleteFlag
// --all
if flags.All {
if !flags.IncludeRegistries {
l.Log().Infoln("Didn't set '--registries', so won't delete registries.")
log.Infoln("Didn't set '--registries', so won't delete registries.")
}
nodes, err = client.NodeList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
include := k3d.ClusterInternalNodeRoles
exclude := []k3d.Role{}
@ -99,13 +99,13 @@ func parseDeleteNodeCmd(cmd *cobra.Command, args []string, flags *nodeDeleteFlag
}
if !flags.All && len(args) < 1 {
l.Log().Fatalln("Expecting at least one node name if `--all` is not set")
log.Fatalln("Expecting at least one node name if `--all` is not set")
}
for _, name := range args {
node, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: name})
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
nodes = append(nodes, node)
}

View File

@ -23,11 +23,11 @@ package node
import (
"github.com/docker/go-connections/nat"
"github.com/rancher/k3d/v5/cmd/util"
"github.com/rancher/k3d/v5/pkg/client"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@ -46,13 +46,13 @@ func NewCmdNodeEdit() *cobra.Command {
existingNode, changeset := parseEditNodeCmd(cmd, args)
l.Log().Debugf("===== Current =====\n%+v\n===== Changeset =====\n%+v\n", existingNode, changeset)
log.Debugf("===== Current =====\n%+v\n===== Changeset =====\n%+v\n", existingNode, changeset)
if err := client.NodeEdit(cmd.Context(), runtimes.SelectedRuntime, existingNode, changeset); err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
l.Log().Infof("Successfully updated %s", existingNode.Name)
log.Infof("Successfully updated %s", existingNode.Name)
},
}
@ -71,16 +71,16 @@ func parseEditNodeCmd(cmd *cobra.Command, args []string) (*k3d.Node, *k3d.Node)
existingNode, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: args[0]})
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
if existingNode == nil {
l.Log().Infof("Node %s not found", args[0])
log.Infof("Node %s not found", args[0])
return nil, nil
}
if existingNode.Role != k3d.LoadBalancerRole {
l.Log().Fatalln("Currently only the loadbalancer can be updated!")
log.Fatalln("Currently only the loadbalancer can be updated!")
}
changeset := &k3d.Node{}
@ -90,7 +90,7 @@ func parseEditNodeCmd(cmd *cobra.Command, args []string) (*k3d.Node, *k3d.Node)
*/
portFlags, err := cmd.Flags().GetStringArray("port-add")
if err != nil {
l.Log().Errorln(err)
log.Errorln(err)
return nil, nil
}
@ -101,7 +101,7 @@ func parseEditNodeCmd(cmd *cobra.Command, args []string) (*k3d.Node, *k3d.Node)
portmappings, err := nat.ParsePortSpec(flag)
if err != nil {
l.Log().Fatalf("Failed to parse port spec '%s': %+v", flag, err)
log.Fatalf("Failed to parse port spec '%s': %+v", flag, err)
}
for _, pm := range portmappings {

View File

@ -26,12 +26,13 @@ import (
"strings"
"github.com/liggitt/tabwriter"
"github.com/rancher/k3d/v5/cmd/util"
"github.com/rancher/k3d/v5/pkg/client"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/spf13/cobra"
log "github.com/sirupsen/logrus"
)
type nodeListFlags struct {
@ -63,14 +64,14 @@ func NewCmdNodeList() *cobra.Command {
if len(nodes) == 0 { // Option a) no name specified -> get all nodes
found, err := client.NodeList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
existingNodes = append(existingNodes, found...)
} else { // Option b) cluster name specified -> get specific cluster
for _, node := range nodes {
found, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, node)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
existingNodes = append(existingNodes, found)
}

View File

@ -22,11 +22,12 @@ THE SOFTWARE.
package node
import (
"github.com/rancher/k3d/v5/cmd/util"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/spf13/cobra"
log "github.com/sirupsen/logrus"
)
// NewCmdNodeStart returns a new cobra command
@ -41,7 +42,7 @@ func NewCmdNodeStart() *cobra.Command {
Run: func(cmd *cobra.Command, args []string) {
node := parseStartNodeCmd(cmd, args)
if err := runtimes.SelectedRuntime.StartNode(cmd.Context(), node); err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
},
}
@ -54,7 +55,7 @@ func NewCmdNodeStart() *cobra.Command {
func parseStartNodeCmd(cmd *cobra.Command, args []string) *k3d.Node {
// node name // TODO: startNode: allow node filters, e.g. `k3d node start mycluster@agent` to start all agent nodes of cluster 'mycluster'
if len(args) == 0 || len(args[0]) == 0 {
l.Log().Fatalln("No node name given")
log.Fatalln("No node name given")
}
return &k3d.Node{Name: args[0]}

View File

@ -22,12 +22,13 @@ THE SOFTWARE.
package node
import (
"github.com/rancher/k3d/v5/cmd/util"
"github.com/rancher/k3d/v5/pkg/runtimes"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/runtimes"
"github.com/spf13/cobra"
l "github.com/rancher/k3d/v5/pkg/logger"
k3d "github.com/rancher/k3d/v5/pkg/types"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
)
// NewCmdNodeStop returns a new cobra command
@ -42,7 +43,7 @@ func NewCmdNodeStop() *cobra.Command {
Run: func(cmd *cobra.Command, args []string) {
node := parseStopNodeCmd(cmd, args)
if err := runtimes.SelectedRuntime.StopNode(cmd.Context(), node); err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
},
}
@ -55,7 +56,7 @@ func NewCmdNodeStop() *cobra.Command {
func parseStopNodeCmd(cmd *cobra.Command, args []string) *k3d.Node {
// node name // TODO: allow node filters, e.g. `k3d node stop mycluster@agent` to stop all agent nodes of cluster 'mycluster'
if len(args) == 0 || len(args[0]) == 0 {
l.Log().Fatalln("No node name given")
log.Fatalln("No node name given")
}
return &k3d.Node{Name: args[0]}

View File

@ -22,7 +22,7 @@ THE SOFTWARE.
package registry
import (
l "github.com/rancher/k3d/v5/pkg/logger"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@ -37,18 +37,18 @@ func NewCmdRegistry() *cobra.Command {
Long: `Manage registry/registries`,
Run: func(cmd *cobra.Command, args []string) {
if err := cmd.Help(); err != nil {
l.Log().Errorln("Couldn't get help text")
l.Log().Fatalln(err)
log.Errorln("Couldn't get help text")
log.Fatalln(err)
}
},
}
// add subcommands
cmd.AddCommand(NewCmdRegistryCreate(),
NewCmdRegistryStart(),
NewCmdRegistryStop(),
NewCmdRegistryDelete(),
NewCmdRegistryList())
cmd.AddCommand(NewCmdRegistryCreate())
cmd.AddCommand(NewCmdRegistryStart())
cmd.AddCommand(NewCmdRegistryStop())
cmd.AddCommand(NewCmdRegistryDelete())
cmd.AddCommand(NewCmdRegistryList())
// add flags

View File

@ -24,14 +24,14 @@ package registry
import (
"fmt"
l "github.com/rancher/k3d/v5/pkg/logger"
log "github.com/sirupsen/logrus"
"github.com/rancher/k3d/v5/pkg/runtimes"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/rancher/k3d/v5/pkg/client"
"github.com/rancher/k3d/v4/pkg/client"
cliutil "github.com/rancher/k3d/v5/cmd/util"
cliutil "github.com/rancher/k3d/v4/cmd/util"
"github.com/spf13/cobra"
)
@ -75,12 +75,12 @@ func NewCmdRegistryCreate() *cobra.Command {
reg, clusters := parseCreateRegistryCmd(cmd, args, flags, ppFlags)
regNode, err := client.RegistryRun(cmd.Context(), runtimes.SelectedRuntime, reg)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
if err := client.RegistryConnectClusters(cmd.Context(), runtimes.SelectedRuntime, regNode, clusters); err != nil {
l.Log().Errorln(err)
log.Errorln(err)
}
l.Log().Infof("Successfully created registry '%s'", reg.Host)
log.Infof("Successfully created registry '%s'", reg.Host)
regString := fmt.Sprintf("%s:%s", reg.Host, reg.ExposureOpts.Binding.HostPort)
if !flags.NoHelp {
fmt.Println(fmt.Sprintf(helptext, regString, regString, regString, regString))
@ -93,10 +93,10 @@ func NewCmdRegistryCreate() *cobra.Command {
// TODO: connecting to clusters requires non-existing config reload functionality in containerd
cmd.Flags().StringArrayVarP(&ppFlags.Clusters, "cluster", "c", nil, "[NotReady] Select the cluster(s) that the registry shall connect to.")
if err := cmd.RegisterFlagCompletionFunc("cluster", cliutil.ValidArgsAvailableClusters); err != nil {
l.Log().Fatalln("Failed to register flag completion for '--cluster'", err)
log.Fatalln("Failed to register flag completion for '--cluster'", err)
}
if err := cmd.Flags().MarkHidden("cluster"); err != nil {
l.Log().Fatalln("Failed to hide --cluster flag on registry create command")
log.Fatalln("Failed to hide --cluster flag on registry create command")
}
cmd.Flags().StringVarP(&flags.Image, "image", "i", fmt.Sprintf("%s:%s", k3d.DefaultRegistryImageRepo, k3d.DefaultRegistryImageTag), "Specify image used for the registry")
@ -125,8 +125,8 @@ func parseCreateRegistryCmd(cmd *cobra.Command, args []string, flags *regCreateF
// --port
exposePort, err := cliutil.ParsePortExposureSpec(ppFlags.Port, k3d.DefaultRegistryPort)
if err != nil {
l.Log().Errorln("Failed to parse registry port")
l.Log().Fatalln(err)
log.Errorln("Failed to parse registry port")
log.Fatalln(err)
}
// set the name for the registry node

View File

@ -22,11 +22,11 @@ THE SOFTWARE.
package registry
import (
"github.com/rancher/k3d/v5/cmd/util"
"github.com/rancher/k3d/v5/pkg/client"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@ -51,11 +51,11 @@ func NewCmdRegistryDelete() *cobra.Command {
nodes := parseRegistryDeleteCmd(cmd, args, &flags)
if len(nodes) == 0 {
l.Log().Infoln("No registries found")
log.Infoln("No registries found")
} else {
for _, node := range nodes {
if err := client.NodeDelete(cmd.Context(), runtimes.SelectedRuntime, node, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
}
}
@ -80,18 +80,18 @@ func parseRegistryDeleteCmd(cmd *cobra.Command, args []string, flags *registryDe
if flags.All {
nodes, err = client.NodeList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
}
if !flags.All && len(args) < 1 {
l.Log().Fatalln("Expecting at least one registry name if `--all` is not set")
log.Fatalln("Expecting at least one registry name if `--all` is not set")
}
for _, name := range args {
node, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: name})
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
nodes = append(nodes, node)
}

View File

@ -26,11 +26,11 @@ import (
"strings"
"github.com/liggitt/tabwriter"
"github.com/rancher/k3d/v5/cmd/util"
"github.com/rancher/k3d/v5/pkg/client"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@ -64,15 +64,15 @@ func NewCmdRegistryList() *cobra.Command {
if len(nodes) == 0 { // Option a) no name specified -> get all registries
found, err := client.NodeList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
existingNodes = append(existingNodes, found...)
} else { // Option b) registry name(s) specified -> get specific registries
for _, node := range nodes {
l.Log().Tracef("Node %s", node.Name)
log.Tracef("Node %s", node.Name)
found, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, node)
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
existingNodes = append(existingNodes, found)
}

View File

@ -25,24 +25,24 @@ import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"github.com/rancher/k3d/v5/cmd/cluster"
cfg "github.com/rancher/k3d/v5/cmd/config"
"github.com/rancher/k3d/v5/cmd/debug"
"github.com/rancher/k3d/v5/cmd/image"
"github.com/rancher/k3d/v5/cmd/kubeconfig"
"github.com/rancher/k3d/v5/cmd/node"
"github.com/rancher/k3d/v5/cmd/registry"
cliutil "github.com/rancher/k3d/v5/cmd/util"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
"github.com/rancher/k3d/v5/version"
"github.com/sirupsen/logrus"
"github.com/rancher/k3d/v4/cmd/cluster"
cfg "github.com/rancher/k3d/v4/cmd/config"
"github.com/rancher/k3d/v4/cmd/debug"
"github.com/rancher/k3d/v4/cmd/image"
"github.com/rancher/k3d/v4/cmd/kubeconfig"
"github.com/rancher/k3d/v4/cmd/node"
"github.com/rancher/k3d/v4/cmd/registry"
cliutil "github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/runtimes"
"github.com/rancher/k3d/v4/version"
log "github.com/sirupsen/logrus"
"github.com/sirupsen/logrus/hooks/writer"
)
@ -71,7 +71,7 @@ All Nodes of a k3d cluster are part of the same docker network.`,
printVersion()
} else {
if err := cmd.Usage(); err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
}
},
@ -85,38 +85,40 @@ All Nodes of a k3d cluster are part of the same docker network.`,
rootCmd.Flags().BoolVar(&flags.version, "version", false, "Show k3d and default k3s version")
// add subcommands
rootCmd.AddCommand(NewCmdCompletion(rootCmd),
cluster.NewCmdCluster(),
kubeconfig.NewCmdKubeconfig(),
node.NewCmdNode(),
image.NewCmdImage(),
cfg.NewCmdConfig(),
registry.NewCmdRegistry(),
debug.NewCmdDebug(),
&cobra.Command{
Use: "version",
Short: "Show k3d and default k3s version",
Long: "Show k3d and default k3s version",
Run: func(cmd *cobra.Command, args []string) {
printVersion()
},
rootCmd.AddCommand(NewCmdCompletion(rootCmd))
rootCmd.AddCommand(cluster.NewCmdCluster())
rootCmd.AddCommand(kubeconfig.NewCmdKubeconfig())
rootCmd.AddCommand(node.NewCmdNode())
rootCmd.AddCommand(image.NewCmdImage())
rootCmd.AddCommand(cfg.NewCmdConfig())
rootCmd.AddCommand(registry.NewCmdRegistry())
rootCmd.AddCommand(debug.NewCmdDebug())
rootCmd.AddCommand(&cobra.Command{
Use: "version",
Short: "Show k3d and default k3s version",
Long: "Show k3d and default k3s version",
Run: func(cmd *cobra.Command, args []string) {
printVersion()
},
&cobra.Command{
Use: "runtime-info",
Short: "Show runtime information",
Long: "Show some information about the runtime environment (e.g. docker info)",
Run: func(cmd *cobra.Command, args []string) {
info, err := runtimes.SelectedRuntime.Info()
if err != nil {
l.Log().Fatalln(err)
}
err = yaml.NewEncoder(os.Stdout).Encode(info)
if err != nil {
l.Log().Fatalln(err)
}
},
Hidden: true,
})
})
rootCmd.AddCommand(&cobra.Command{
Use: "runtime-info",
Short: "Show runtime information",
Long: "Show some information about the runtime environment (e.g. docker info)",
Run: func(cmd *cobra.Command, args []string) {
info, err := runtimes.SelectedRuntime.Info()
if err != nil {
log.Fatalln(err)
}
err = yaml.NewEncoder(os.Stdout).Encode(info)
if err != nil {
log.Fatalln(err)
}
},
Hidden: true,
})
// Init
cobra.OnInitialize(initLogging, initRuntime)
@ -134,58 +136,58 @@ func Execute() {
if _, _, err := cmd.Find(parts); err != nil {
pluginFound, err := cliutil.HandlePlugin(context.Background(), parts)
if err != nil {
l.Log().Errorf("Failed to execute plugin '%+v'", parts)
l.Log().Fatalln(err)
log.Errorf("Failed to execute plugin '%+v'", parts)
log.Fatalln(err)
} else if pluginFound {
os.Exit(0)
}
}
}
if err := cmd.Execute(); err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
}
// initLogging initializes the logger
func initLogging() {
if flags.traceLogging {
l.Log().SetLevel(logrus.TraceLevel)
log.SetLevel(log.TraceLevel)
} else if flags.debugLogging {
l.Log().SetLevel(logrus.DebugLevel)
log.SetLevel(log.DebugLevel)
} else {
switch logLevel := strings.ToUpper(os.Getenv("LOG_LEVEL")); logLevel {
case "TRACE":
l.Log().SetLevel(logrus.TraceLevel)
log.SetLevel(log.TraceLevel)
case "DEBUG":
l.Log().SetLevel(logrus.DebugLevel)
log.SetLevel(log.DebugLevel)
case "WARN":
l.Log().SetLevel(logrus.WarnLevel)
log.SetLevel(log.WarnLevel)
case "ERROR":
l.Log().SetLevel(logrus.ErrorLevel)
log.SetLevel(log.ErrorLevel)
default:
l.Log().SetLevel(logrus.InfoLevel)
log.SetLevel(log.InfoLevel)
}
}
l.Log().SetOutput(io.Discard)
l.Log().AddHook(&writer.Hook{
log.SetOutput(ioutil.Discard)
log.AddHook(&writer.Hook{
Writer: os.Stderr,
LogLevels: []logrus.Level{
logrus.PanicLevel,
logrus.FatalLevel,
logrus.ErrorLevel,
logrus.WarnLevel,
LogLevels: []log.Level{
log.PanicLevel,
log.FatalLevel,
log.ErrorLevel,
log.WarnLevel,
},
})
l.Log().AddHook(&writer.Hook{
log.AddHook(&writer.Hook{
Writer: os.Stdout,
LogLevels: []logrus.Level{
logrus.InfoLevel,
logrus.DebugLevel,
logrus.TraceLevel,
LogLevels: []log.Level{
log.InfoLevel,
log.DebugLevel,
log.TraceLevel,
},
})
formatter := &logrus.TextFormatter{
formatter := &log.TextFormatter{
ForceColors: true,
}
@ -193,18 +195,18 @@ func initLogging() {
formatter.FullTimestamp = true
}
l.Log().SetFormatter(formatter)
log.SetFormatter(formatter)
}
func initRuntime() {
runtime, err := runtimes.GetRuntime("docker")
if err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
runtimes.SelectedRuntime = runtime
if rtinfo, err := runtime.Info(); err == nil {
l.Log().Debugf("Runtime Info:\n%+v", rtinfo)
log.Debugf("Runtime Info:\n%+v", rtinfo)
}
}
@ -284,11 +286,11 @@ PowerShell:
Run: func(cmd *cobra.Command, args []string) {
if completionFunc, ok := completionFunctions[args[0]]; ok {
if err := completionFunc(os.Stdout); err != nil {
l.Log().Fatalf("Failed to generate completion script for shell '%s'", args[0])
log.Fatalf("Failed to generate completion script for shell '%s'", args[0])
}
return
}
l.Log().Fatalf("Shell '%s' not supported for completion", args[0])
log.Fatalf("Shell '%s' not supported for completion", args[0])
},
}
return cmd

View File

@ -25,10 +25,10 @@ import (
"context"
"strings"
k3dcluster "github.com/rancher/k3d/v5/pkg/client"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
k3d "github.com/rancher/k3d/v5/pkg/types"
k3dcluster "github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@ -39,7 +39,7 @@ func ValidArgsAvailableClusters(cmd *cobra.Command, args []string, toComplete st
var clusters []*k3d.Cluster
clusters, err := k3dcluster.ClusterList(context.Background(), runtimes.SelectedRuntime)
if err != nil {
l.Log().Errorln("Failed to get list of clusters for shell completion")
log.Errorln("Failed to get list of clusters for shell completion")
return nil, cobra.ShellCompDirectiveError
}
@ -64,7 +64,7 @@ func ValidArgsAvailableNodes(cmd *cobra.Command, args []string, toComplete strin
var nodes []*k3d.Node
nodes, err := k3dcluster.NodeList(context.Background(), runtimes.SelectedRuntime)
if err != nil {
l.Log().Errorln("Failed to get list of nodes for shell completion")
log.Errorln("Failed to get list of nodes for shell completion")
return nil, cobra.ShellCompDirectiveError
}
@ -89,7 +89,7 @@ func ValidArgsAvailableRegistries(cmd *cobra.Command, args []string, toComplete
var nodes []*k3d.Node
nodes, err := k3dcluster.NodeList(context.Background(), runtimes.SelectedRuntime)
if err != nil {
l.Log().Errorln("Failed to get list of nodes for shell completion")
log.Errorln("Failed to get list of nodes for shell completion")
return nil, cobra.ShellCompDirectiveError
}

View File

@ -1,97 +0,0 @@
/*
Copyright © 2020-2021 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package config
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/rancher/k3d/v5/pkg/config"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/sirupsen/logrus"
"github.com/spf13/viper"
"gopkg.in/yaml.v2"
)
func InitViperWithConfigFile(cfgViper *viper.Viper, configFile string) error {
// viper for the general config (file, env and non pre-processed flags)
cfgViper.SetEnvPrefix("K3D")
cfgViper.AutomaticEnv()
cfgViper.SetConfigType("yaml")
// Set config file, if specified
if configFile != "" {
if _, err := os.Stat(configFile); err != nil {
l.Log().Fatalf("Failed to stat config file %s: %+v", configFile, err)
}
// create temporary file to expand environment variables in the config without writing that back to the original file
// we're doing it here, because this happens just before absolutely all other processing
tmpfile, err := os.CreateTemp(os.TempDir(), fmt.Sprintf("k3d-config-tmp-%s", filepath.Base(configFile)))
if err != nil {
l.Log().Fatalf("error creating temp copy of configfile %s for variable expansion: %v", configFile, err)
}
defer tmpfile.Close()
originalcontent, err := os.ReadFile(configFile)
if err != nil {
l.Log().Fatalf("error reading config file %s: %v", configFile, err)
}
expandedcontent := os.ExpandEnv(string(originalcontent))
if _, err := tmpfile.WriteString(expandedcontent); err != nil {
l.Log().Fatalf("error writing expanded config file contents to temp file %s: %v", tmpfile.Name(), err)
}
// use temp file with expanded variables
cfgViper.SetConfigFile(tmpfile.Name())
// try to read config into memory (viper map structure)
if err := cfgViper.ReadInConfig(); err != nil {
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
l.Log().Fatalf("Config file %s not found: %+v", configFile, err)
}
// config file found but some other error happened
l.Log().Fatalf("Failed to read config file %s: %+v", configFile, err)
}
schema, err := config.GetSchemaByVersion(cfgViper.GetString("apiVersion"))
if err != nil {
l.Log().Fatalf("Cannot validate config file %s: %+v", configFile, err)
}
if err := config.ValidateSchemaFile(tmpfile.Name(), schema); err != nil {
l.Log().Fatalf("Schema Validation failed for config file %s: %+v", configFile, err)
}
l.Log().Infof("Using config file %s (%s#%s)", configFile, strings.ToLower(cfgViper.GetString("apiVersion")), strings.ToLower(cfgViper.GetString("kind")))
}
if l.Log().GetLevel() >= logrus.DebugLevel {
c, _ := yaml.Marshal(cfgViper.AllSettings())
l.Log().Debugf("Configuration:\n%s", c)
}
return nil
}

View File

@ -25,7 +25,7 @@ import (
"fmt"
"strings"
l "github.com/rancher/k3d/v5/pkg/logger"
log "github.com/sirupsen/logrus"
)
// SplitFiltersFromFlag separates a flag's value from the node filter, if there is one
@ -50,10 +50,10 @@ func SplitFiltersFromFlag(flag string) (string, []string, error) {
// Case 1.1: Escaped backslash
if strings.HasSuffix(it, "\\\\") {
it = strings.TrimSuffix(it, "\\")
l.Log().Warnf("The part '%s' of the flag input '%s' ends with a double backslash, so we assume you want to escape the backslash before the '@'. That's the only time we do this.", it, flag)
log.Warnf("The part '%s' of the flag input '%s' ends with a double backslash, so we assume you want to escape the backslash before the '@'. That's the only time we do this.", it, flag)
} else {
// Case 1.2: Unescaped backslash -> Escaping the '@' -> remove suffix and append it to buffer, followed by the escaped @ sign
l.Log().Tracef("Item '%s' just before an '@' ends with '\\', so we assume it's escaping a literal '@'", it)
log.Tracef("Item '%s' just before an '@' ends with '\\', so we assume it's escaping a literal '@'", it)
buffer += strings.TrimSuffix(it, "\\") + "@"
continue
}

View File

@ -29,8 +29,8 @@ import (
"strings"
"github.com/liggitt/tabwriter"
l "github.com/rancher/k3d/v5/pkg/logger"
k3d "github.com/rancher/k3d/v5/pkg/types"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
)
@ -55,7 +55,7 @@ func PrintNodes(nodes []*k3d.Node, outputFormat string, headers *[]string, nodeP
if headers != nil {
_, err := fmt.Fprintf(tabwriter, "%s\n", strings.Join(*headers, "\t"))
if err != nil {
l.Log().Fatalln("Failed to print headers")
log.Fatalln("Failed to print headers")
}
}
}

View File

@ -28,7 +28,7 @@ import (
"os/exec"
"strings"
k3d "github.com/rancher/k3d/v5/pkg/types"
k3d "github.com/rancher/k3d/v4/pkg/types"
)
// HandlePlugin takes care of finding and executing a plugin based on the longest prefix

View File

@ -28,9 +28,9 @@ import (
"strconv"
"github.com/docker/go-connections/nat"
l "github.com/rancher/k3d/v5/pkg/logger"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v5/pkg/util"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/rancher/k3d/v4/pkg/util"
log "github.com/sirupsen/logrus"
)
var apiPortRegexp = regexp.MustCompile(`^(?P<hostref>(?P<hostip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})|(?P<hostname>\S+):)?(?P<port>(\d{1,5}|random))$`)
@ -55,7 +55,7 @@ func ParsePortExposureSpec(exposedPortSpec, internalPort string) (*k3d.ExposureO
// check if there's a host reference
if submatches["hostname"] != "" {
l.Log().Tracef("Port Exposure: found hostname: %s", submatches["hostname"])
log.Tracef("Port Exposure: found hostname: %s", submatches["hostname"])
addrs, err := net.LookupHost(submatches["hostname"])
if err != nil {
return nil, fmt.Errorf("Failed to lookup host '%s' specified for Port Exposure: %+v", submatches["hostname"], err)
@ -77,15 +77,15 @@ func ParsePortExposureSpec(exposedPortSpec, internalPort string) (*k3d.ExposureO
// port: get a free one if there's none defined or set to random
if submatches["port"] == "" || submatches["port"] == "random" {
l.Log().Debugf("Port Exposure Mapping didn't specify hostPort, choosing one randomly...")
log.Debugf("Port Exposure Mapping didn't specify hostPort, choosing one randomly...")
freePort, err := GetFreePort()
if err != nil || freePort == 0 {
l.Log().Warnf("Failed to get random free port: %+v", err)
l.Log().Warnf("Falling back to internal port %s (may be blocked though)...", internalPort)
log.Warnf("Failed to get random free port: %+v", err)
log.Warnf("Falling back to internal port %s (may be blocked though)...", internalPort)
submatches["port"] = internalPort
} else {
submatches["port"] = strconv.Itoa(freePort)
l.Log().Debugf("Got free port for Port Exposure: '%d'", freePort)
log.Debugf("Got free port for Port Exposure: '%d'", freePort)
}
}
@ -93,7 +93,7 @@ func ParsePortExposureSpec(exposedPortSpec, internalPort string) (*k3d.ExposureO
portMapping, err := nat.ParsePortSpec(realPortString)
if err != nil {
return nil, fmt.Errorf("failed to parse port spec for Port Exposure '%s': %+v", realPortString, err)
return nil, fmt.Errorf("Failed to parse port spec for Port Exposure '%s': %+v", realPortString, err)
}
api.Port = portMapping[0].Port // there can be only one due to our regexp
@ -112,12 +112,14 @@ func ValidatePortMap(portmap string) (string, error) {
func GetFreePort() (int, error) {
tcpAddress, err := net.ResolveTCPAddr("tcp", "localhost:0")
if err != nil {
return 0, fmt.Errorf("failed to resolve address 'localhost:0': %w", err)
log.Errorln("Failed to resolve address")
return 0, err
}
tcpListener, err := net.ListenTCP("tcp", tcpAddress)
if err != nil {
return 0, fmt.Errorf("failed to create tcp listener: %w", err)
log.Errorln("Failed to create TCP Listener")
return 0, err
}
defer tcpListener.Close()

View File

@ -24,12 +24,12 @@ package util
import (
"strings"
l "github.com/rancher/k3d/v5/pkg/logger"
log "github.com/sirupsen/logrus"
)
// validateRuntimeLabelKey validates a given label key is not reserved for internal k3d usage
func ValidateRuntimeLabelKey(labelKey string) {
if strings.HasPrefix(labelKey, "k3s.") || strings.HasPrefix(labelKey, "k3d.") || labelKey == "app" {
l.Log().Fatalf("runtime label \"%s\" is reserved for internal usage", labelKey)
log.Fatalf("runtime label \"%s\" is reserved for internal usage", labelKey)
}
}

View File

@ -27,9 +27,9 @@ import (
rt "runtime"
"strings"
"github.com/rancher/k3d/v5/pkg/runtimes"
"github.com/rancher/k3d/v4/pkg/runtimes"
l "github.com/rancher/k3d/v5/pkg/logger"
log "github.com/sirupsen/logrus"
)
// ValidateVolumeMount checks, if the source of volume mounts exists and if the destination is an absolute path
@ -81,7 +81,7 @@ func ValidateVolumeMount(runtime runtimes.Runtime, volumeMount string) (string,
}
if !isNamedVolume {
if _, err := os.Stat(src); err != nil {
l.Log().Warnf("Failed to stat file/directory/named volume that you're trying to mount: '%s' in '%s' -> Please make sure it exists", src, volumeMount)
log.Warnf("Failed to stat file/directory/named volume that you're trying to mount: '%s' in '%s' -> Please make sure it exists", src, volumeMount)
}
}
}
@ -98,7 +98,7 @@ func ValidateVolumeMount(runtime runtimes.Runtime, volumeMount string) (string,
func verifyNamedVolume(runtime runtimes.Runtime, volumeName string) error {
volumeName, err := runtime.GetVolume(volumeName)
if err != nil {
return fmt.Errorf("Failed to verify named volume: %w", err)
return err
}
if volumeName == "" {
return fmt.Errorf("Failed to find named volume '%s'", volumeName)

View File

@ -1,4 +1,4 @@
image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}{{else}}latest{{/if}}-dind
image: rancher/k3d:{{#if build.tag}}{{build.tag}}{{else}}latest{{/if}}dind
{{#if build.tags}}
tags:
{{#each build.tags}}
@ -6,21 +6,21 @@ tags:
{{/each}}
{{/if}}
manifests:
- image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}dind-linux-amd64
- image: rancher/k3d:{{#if build.tag}}{{build.tag}}-{{/if}}dind-linux-amd64
platform:
architecture: amd64
os: linux
- image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}dind-linux-arm64
- image: rancher/k3d:{{#if build.tag}}{{build.tag}}-{{/if}}dind-linux-arm64
platform:
variant: v8
architecture: arm64
os: linux
- image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}dind-linux-arm
- image: rancher/k3d:{{#if build.tag}}{{build.tag}}-{{/if}}dind-linux-arm
platform:
variant: v7
architecture: arm
os: linux
- image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}dind-linux-arm
- image: rancher/k3d:{{#if build.tag}}{{build.tag}}-{{/if}}dind-linux-arm
platform:
variant: v6
architecture: arm

View File

@ -3,11 +3,20 @@ module github.com/rancher/k3d/docgen
go 1.16
require (
github.com/Microsoft/go-winio v0.4.17 // indirect
github.com/containerd/cgroups v0.0.0-20210414074453-680c246289fb // indirect
github.com/containerd/containerd v1.5.0-rc.1 // indirect
github.com/containerd/continuity v0.0.0-20210315143101-93e15499afd5 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/rancher/k3d/v5 v5.0.0-00010101000000-000000000000
github.com/rancher/k3d/v4 v4.4.7-0.20210709062205-c5f7884f7870
github.com/sirupsen/logrus v1.8.1
github.com/spf13/cobra v1.2.1
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1 // indirect
golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78 // indirect
golang.org/x/term v0.0.0-20210406210042-72f3dc4e9b72 // indirect
k8s.io/utils v0.0.0-20210305010621-2afb4311ab10 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.1.1 // indirect
)
replace github.com/rancher/k3d/v5 => /PATH/TO/YOUR/REPO/DIRECTORY
replace github.com/rancher/k3d/v4 => /PATH/TO/YOUR/REPO/DIRECTORY

View File

@ -44,15 +44,13 @@ github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
@ -71,9 +69,8 @@ github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEY
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
github.com/Microsoft/hcsshim v0.8.16 h1:8/auA4LFIZFTGrqfKhGBSXwM6/4X1fHa/xniyEHu8ac=
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
github.com/Microsoft/hcsshim v0.8.18 h1:cYnKADiM1869gvBpos3YCteeT6sZLB48lB5dmMMs8Tg=
github.com/Microsoft/hcsshim v0.8.18/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
@ -145,18 +142,16 @@ github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:z
github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ=
github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
github.com/containerd/cgroups v0.0.0-20210414074453-680c246289fb h1:cq9suWES/pQHVg1N4u8ltT30HWScFmcAz4sB/wJyp/I=
github.com/containerd/cgroups v0.0.0-20210414074453-680c246289fb/go.mod h1:sgGgnAnNasYdJ1ypnikP2SO7SM0Lfgkgwk3TUc9bDO4=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
@ -170,41 +165,37 @@ github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX
github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.4.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
github.com/containerd/containerd v1.5.5 h1:q1gxsZsGZ8ddVe98yO6pR21b5xQSMiR61lD0W96pgQo=
github.com/containerd/containerd v1.5.5/go.mod h1:oSTh0QpT1w6jYcGmbiSbxv9OSQYaa88mPyWIuU79zyo=
github.com/containerd/containerd v1.5.0-rc.1 h1:7n80DQm69wXXjLGQo8sytMPC9Z+kG6B4s95hfbFLiXQ=
github.com/containerd/containerd v1.5.0-rc.1/go.mod h1:kAwhYasTYKvQWPnWf8CoRDu3vikb17YocPLvHMQhBn4=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8=
github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
github.com/containerd/continuity v0.0.0-20210315143101-93e15499afd5 h1:k6Dn7shF+i1q4utvCyW4+o9REsCMAeRyORM5IhXMCnw=
github.com/containerd/continuity v0.0.0-20210315143101-93e15499afd5/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
github.com/containerd/imgcrypt v1.1.1-0.20210412181126-0bed51b9522c/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
@ -213,12 +204,10 @@ github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8h
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
@ -254,7 +243,6 @@ github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1S
github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -264,17 +252,17 @@ github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
github.com/docker/cli v20.10.8+incompatible h1:/zO/6y9IOpcehE49yMRTV9ea0nBpb8OeqSskXLNfH1E=
github.com/docker/cli v20.10.8+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v20.10.7+incompatible h1:pv/3NqibQKphWZiAskMzdz8w0PRbtTaEB+f6NwdU7Is=
github.com/docker/cli v20.10.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v0.0.0-20171011171712-7484e51bf6af/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v20.10.8+incompatible h1:RVqD337BgQicVCzYrrlhLDWhq6OAD2PJDUg2LsEUvKM=
github.com/docker/docker v20.10.8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o=
github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c=
github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ=
github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c/go.mod h1:CADgU4DSXK5QUlFslkQu2yW2TKzFZcXq/leZfM0UH5Q=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
@ -307,11 +295,9 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
@ -433,7 +419,6 @@ github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunE
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93 h1:jc2UWq7CbdszqeH6qu1ougXMIUBfSy8Pbh/anURYbGI=
github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@ -470,12 +455,9 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
@ -624,7 +606,6 @@ github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM=
github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM=
@ -665,14 +646,12 @@ github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@ -825,7 +804,6 @@ github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/y
github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44=
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@ -1005,7 +983,6 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
@ -1017,11 +994,12 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023 h1:ADo5wSpq2gqaCGQWzk7S5vd//0iyyLeAratkEoG5dLE=
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1 h1:4qWs8cYYH6PoEFy4dfhDFgoMGkwAcETd+MmPdCPMzUc=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1099,7 +1077,6 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1121,16 +1098,15 @@ golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
@ -1153,8 +1129,8 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -1287,7 +1263,6 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
@ -1376,7 +1351,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
@ -1391,44 +1365,38 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
inet.af/netaddr v0.0.0-20210903134321-85fa6c94624e h1:tvgqez5ZQoBBiBAGNU/fmJy247yB/7++kcLOEoMYup0=
inet.af/netaddr v0.0.0-20210903134321-85fa6c94624e/go.mod h1:z0nx+Dh+7N7CC8V5ayHtHGpZpxLQZZxkIaaz6HN65Ls=
inet.af/netaddr v0.0.0-20210421205553-78c777480f22 h1:TX8hopxzHycFVkIsvu6DSpCWUCqDqOvyyPj/5IK1fUQ=
inet.af/netaddr v0.0.0-20210421205553-78c777480f22/go.mod h1:z0nx+Dh+7N7CC8V5ayHtHGpZpxLQZZxkIaaz6HN65Ls=
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY=
k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY=
k8s.io/api v0.21.0 h1:gu5iGF4V6tfVCQ/R+8Hc0h7H1JuEhzyEi9S4R5LM8+Y=
k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU=
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM=
k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
k8s.io/apimachinery v0.21.0 h1:3Fx+41if+IRavNcKOz09FwEXDBG6ORh6iMsTSelhkMA=
k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY=
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
k8s.io/client-go v0.22.1 h1:jW0ZSHi8wW260FvcXHkIa0NLxFBQszTlhiAVsU5mopw=
k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk=
k8s.io/client-go v0.21.0 h1:n0zzzJsAQmJngpC0IhgFcApZyoGXPrDIAD601HD09ag=
k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA=
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM=
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts=
k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 h1:imL9YgXQ9p7xmPzHFm/vVd/cF78jad+n4wK1ABwYtMM=
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210305010621-2afb4311ab10 h1:u5rPykqiCpL+LBfjRkXvnK71gOgIdmq3eHUEkPrbeTI=
k8s.io/utils v0.0.0-20210305010621-2afb4311ab10/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY=
@ -1436,11 +1404,10 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno=
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.1.1 h1:nYqY2A6oy37sKLYuSBXuQhbj4JVclzJK13BOIvJG5XU=
sigs.k8s.io/structured-merge-diff/v4 v4.1.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=

View File

@ -1,8 +1,9 @@
package main
import (
"github.com/rancher/k3d/v5/cmd"
l "github.com/rancher/k3d/v5/pkg/logger"
log "github.com/sirupsen/logrus"
"github.com/rancher/k3d/v4/cmd"
"github.com/spf13/cobra/doc"
)
@ -11,6 +12,6 @@ func main() {
k3d.DisableAutoGenTag = true
if err := doc.GenMarkdownTree(k3d, "../docs/usage/commands"); err != nil {
l.Log().Fatalln(err)
log.Fatalln(err)
}
}

View File

@ -16,7 +16,3 @@ go mod tidy
go mod vendor
go run ./main.go
sed -i "s%$REPO_DIR%$REPLACE_PLACEHOLDER%" "$CURR_DIR/go.mod"
rm -r "$CURR_DIR/vendor"

View File

@ -1,6 +1,6 @@
nav:
- index.md
- usage
- design
- internals
- faq
collapse: false
collapse: false

View File

@ -1,60 +0,0 @@
# Defaults
## k3d reserved settings
When you create a K3s cluster in Docker using k3d, we make use of some K3s configuration options, making them "reserved" for k3d.
This means, that overriding those options with your own may break the cluster setup.
### Environment Variables
The following K3s environment variables are used to configure the cluster:
| Variable | K3d Default | Configurable? |
|----------|-------------|---------------|
| `K3S_URL`| `https://$CLUSTERNAME-server-0:6443` | no |
| `K3S_TOKEN`| random | yes (`--token`) |
| `K3S_KUBECONFIG_OUTPUT`| `/output/kubeconfig.yaml` | no |
## k3d Loadbalancer
By default, k3d creates an Nginx loadbalancer alongside the clusters it creates to handle the port-forwarding.
The loadbalancer can partly be configured using k3d-defined settings.
| Nginx setting | k3d default | k3d setting |
|-------------|-------------|-------------|
| `proxy_timeout` (default for all server stanzas) | `600` (s) | `settings.defaultProxyTimeout` | |
|`worker_connections` | `1024` | `settings.workerConnections` |
### Overrides
- Example via CLI: `k3d cluster create --lb-config-override settings.defaultProxyTimeout=900`
- Example via Config File:
```yaml
# ... truncated ...
k3d:
loadbalancer:
configOverrides:
- settings.workerConnections=2048
```
## Multiple server nodes
- by default, when `--server` > 1 and no `--datastore-x` option is set, the first server node (server-0) will be the initializing server node
- the initializing server node will have the `--cluster-init` flag appended
- all other server nodes will refer to the initializing server node via `--server https://<init-node>:6443`
## API-Ports
- by default, we expose the API-Port (`6443`) by forwarding traffic from the default server loadbalancer (nginx container) to the server node(s)
- port `6443` of the loadbalancer is then mapped to a specific (`--api-port` flag) or a random (default) port on the host system
## Kubeconfig
- if `--kubeconfig-update-default` is set, we use the default loading rules to get the default kubeconfig:
- First: kubeconfig specified via the KUBECONFIG environment variable (error out if multiple are specified)
- Second: default kubeconfig in home directory (e.g. `$HOME/.kube/config`)
## Networking
- [by default, k3d creates a new (docker) network for every cluster](./networking)

View File

@ -1,4 +1,3 @@
title: FAQ
nav:
- faq.md
collapse: true
- faq.md

View File

@ -1,4 +1,4 @@
# FAQ
# FAQ / Nice to know
## Issues with BTRFS
@ -28,8 +28,8 @@
```bash
k3d cluster create \
--k3s-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%@agent:*' \
--k3s-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%@agent:*'
--k3s-agent-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%' \
--k3s-agent-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%'
```
## Restarting a multi-server cluster or the initializing server node fails
@ -44,7 +44,7 @@
- The Problem: Passing a feature flag to the Kubernetes API Server running inside k3s.
- Example: you want to enable the EphemeralContainers feature flag in Kubernetes
- Solution: `#!bash k3d cluster create --k3s-arg '--kube-apiserver-arg=feature-gates=EphemeralContainers=true@server:*'`
- Solution: `#!bash k3d cluster create --k3s-server-arg '--kube-apiserver-arg=feature-gates=EphemeralContainers=true'`
- **Note**: Be aware of where the flags require dashes (`--`) and where not.
- the k3s flag (`--kube-apiserver-arg`) has the dashes
- the kube-apiserver flag `feature-gates` doesn't have them (k3s adds them internally)
@ -53,10 +53,10 @@
```bash
k3d cluster create k3d-one \
--k3s-arg "--cluster-cidr=10.118.0.0/17@server:*" \
--k3s-arg "--service-cidr=10.118.128.0/17@server:*" \
--k3s-arg "--disable=servicelb@server:*" \
--k3s-arg "--disable=traefik@server:*" \
--k3s-server-arg --cluster-cidr="10.118.0.0/17" \
--k3s-server-arg --service-cidr="10.118.128.0/17" \
--k3s-server-arg --disable=servicelb \
--k3s-server-arg --disable=traefik \
--verbose
```
@ -105,8 +105,8 @@ Some can be fixed by passing the `HTTP_PROXY` environment variables to k3d, some
```bash
k3d cluster create \
--k3s-arg "--kube-proxy-arg=conntrack-max-per-core=0@server:*" \
--k3s-arg "--kube-proxy-arg=conntrack-max-per-core=0@agent:*" \
--k3s-server-arg "--kube-proxy-arg=conntrack-max-per-core=0" \
--k3s-agent-arg "--kube-proxy-arg=conntrack-max-per-core=0" \
--image rancher/k3s:v1.20.6-k3s
```

View File

@ -2,16 +2,14 @@
![k3d](static/img/k3d_logo_black_blue.svg)
## What is k3d?
**This page is targeting k3d v4.0.0 and newer!**
k3d is a lightweight wrapper to run [k3s](https://github.com/rancher/k3s) (Rancher Lab's minimal Kubernetes distribution) in docker.
k3d makes it very easy to create single- and multi-node [k3s](https://github.com/rancher/k3s) clusters in docker, e.g. for local development on Kubernetes.
**Note:** k3d is a **community-driven project**, that is supported by Rancher (SUSE) and it's not an official Rancher (SUSE) project.
??? Tip "View a quick demo"
<asciinema-player src="/static/asciicast/20210917_k3d_v5.0.0_01.cast" cols=200 rows=32></asciinema-player>
<asciinema-player src="/static/asciicast/20200715_k3d.01.cast" cols=200 rows=32></asciinema-player>
## Learning
@ -27,8 +25,7 @@ k3d makes it very easy to create single- and multi-node [k3s](https://github.com
## Requirements
- [**docker**](https://docs.docker.com/install/) to be able to use k3d at all
- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) to interact with the Kubernetes cluster
- [docker](https://docs.docker.com/install/)
## Releases
@ -54,8 +51,8 @@ You have several options there:
Use the install script to grab a specific release (via `TAG` environment variable):
- wget: `#!bash wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v5.0.0 bash`
- curl: `#!bash curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v5.0.0 bash`
- wget: `#!bash wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v4.0.0 bash`
- curl: `#!bash curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v4.0.0 bash`
### Other Installers
@ -81,7 +78,7 @@ Use the install script to grab a specific release (via `TAG` environment variabl
- [asdf](https://asdf-vm.com): `asdf plugin-add k3d && asdf install k3d latest`
*Note*: `asdf plugin-add k3d`, then `asdf install k3d <tag>` with `<tag> = latest` or `5.x.x` for a specific version (maintained by [spencergilbert/asdf-k3d](https://github.com/spencergilbert/asdf-k3d))
*Note*: `asdf plugin-add k3d`, then `asdf install k3d <tag>` with `<tag> = latest` or `4.x.x` for a specific version (maintained by [spencergilbert/asdf-k3d](https://github.com/spencergilbert/asdf-k3d))
- Others
- install via go: `#!bash go install github.com/rancher/k3d@latest` (**Note**: this will give you unreleased/bleeding-edge changes)

View File

@ -1,5 +1,5 @@
title: Design
title: Internals
nav:
- project.md
- defaults.md
- project.md
- networking.md

View File

@ -0,0 +1,22 @@
# Defaults
## Multiple server nodes
- by default, when `--server` > 1 and no `--datastore-x` option is set, the first server node (server-0) will be the initializing server node
- the initializing server node will have the `--cluster-init` flag appended
- all other server nodes will refer to the initializing server node via `--server https://<init-node>:6443`
## API-Ports
- by default, we expose the API-Port (`6443`) by forwarding traffic from the default server loadbalancer (nginx container) to the server node(s)
- port `6443` of the loadbalancer is then mapped to a specific (`--api-port` flag) or a random (default) port on the host system
## Kubeconfig
- if `--kubeconfig-update-default` is set, we use the default loading rules to get the default kubeconfig:
- First: kubeconfig specified via the KUBECONFIG environment variable (error out if multiple are specified)
- Second: default kubeconfig in home directory (e.g. `$HOME/.kube/config`)
## Networking
- [by default, k3d creates a new (docker) network for every cluster](./networking)

View File

@ -1,8 +1,7 @@
mkdocs==1.2.2
mkdocs-material==7.2.6
pymdown-extensions==8.2
mkdocs-git-revision-date-localized-plugin==0.9.3
mkdocs-awesome-pages-plugin==2.5.0
mdx_truly_sane_lists==1.2 # https://github.com/radude/mdx_truly_sane_lists
mkdocs-include-markdown-plugin==3.2.2 # https://github.com/mondeja/mkdocs-include-markdown-plugin
mike==1.1.0 # versioned docs: https://github.com/jimporter/mike
mkdocs
mkdocs-material
pymdown-extensions
mkdocs-git-revision-date-localized-plugin
mkdocs-awesome-pages-plugin
mdx_truly_sane_lists
mkdocs-include-markdown-plugin # https://github.com/mondeja/mkdocs-include-markdown-plugin

View File

@ -1,162 +0,0 @@
{"version": 2, "width": 213, "height": 45, "timestamp": 1631908903, "env": {"SHELL": "bash", "TERM": "xterm-256color"}}
[0.018381, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
[1.636481, "o", "k"]
[1.702291, "o", "3"]
[1.835268, "o", "d"]
[2.024007, "o", " "]
[2.111734, "o", "v"]
[2.210891, "o", "e"]
[2.343441, "o", "r"]
[2.516933, "o", "s"]
[2.583471, "o", "i"]
[2.773563, "o", "o"]
[2.927568, "o", "n"]
[3.159219, "o", "\r\n\u001b[?2004l\r"]
[3.179508, "o", "k3d version v5.0.0\r\nk3s version v1.21.4-k3s1 (default)\r\n"]
[3.180754, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
[4.57973, "o", "k"]
[4.656235, "o", "3"]
[4.763252, "o", "d"]
[4.865396, "o", " "]
[4.986278, "o", "c"]
[5.051494, "o", "l"]
[5.238737, "o", "u"]
[5.292747, "o", "s"]
[5.381595, "o", "t"]
[5.503508, "o", "e"]
[5.578881, "o", "r"]
[5.666704, "o", " "]
[5.766742, "o", "c"]
[5.962787, "o", "r"]
[6.029469, "o", "e"]
[6.061464, "o", "a"]
[6.184275, "o", "t"]
[6.281805, "o", "e"]
[6.445508, "o", " "]
[6.666863, "o", "-"]
[7.20248, "o", "-"]
[7.334019, "o", "a"]
[7.490134, "o", "g"]
[7.566087, "o", "e"]
[7.631634, "o", "n"]
[7.729597, "o", "t"]
[7.897099, "o", "s"]
[8.049496, "o", " "]
[8.280178, "o", "3"]
[8.499599, "o", " "]
[8.631147, "o", "d"]
[8.707104, "o", "e"]
[8.773508, "o", "m"]
[8.91407, "o", "o"]
[9.113612, "o", "\r\n\u001b[?2004l\r"]
[9.132118, "o", "\u001b[36mINFO\u001b[0m[0000] Prep: Network \r\n"]
[9.183203, "o", "\u001b[36mINFO\u001b[0m[0000] Created network 'k3d-demo' \r\n"]
[9.187229, "o", "\u001b[36mINFO\u001b[0m[0000] Created volume 'k3d-demo-images' \r\n"]
[10.187972, "o", "\u001b[36mINFO\u001b[0m[0001] Creating node 'k3d-demo-server-0' \r\n"]
[10.281058, "o", "\u001b[36mINFO\u001b[0m[0001] Creating node 'k3d-demo-agent-0' \r\n"]
[10.368708, "o", "\u001b[36mINFO\u001b[0m[0001] Creating node 'k3d-demo-agent-1' \r\n"]
[10.455282, "o", "\u001b[36mINFO\u001b[0m[0001] Creating node 'k3d-demo-agent-2' \r\n"]
[10.536337, "o", "\u001b[36mINFO\u001b[0m[0001] Creating LoadBalancer 'k3d-demo-serverlb' \r\n"]
[10.609539, "o", "\u001b[36mINFO\u001b[0m[0001] Using the k3d-tools node to gather environment information \r\n"]
[10.628592, "o", "\u001b[36mINFO\u001b[0m[0001] Starting new tools node... \r\n"]
[10.702678, "o", "\u001b[36mINFO\u001b[0m[0001] Starting Node 'k3d-demo-tools' \r\n"]
[11.394216, "o", "\u001b[36mINFO\u001b[0m[0002] Deleted k3d-demo-tools \r\n"]
[11.394427, "o", "\u001b[36mINFO\u001b[0m[0002] Starting cluster 'demo' \r\n\u001b[36mINFO\u001b[0m[0002] Starting servers... \r\n"]
[11.404635, "o", "\u001b[36mINFO\u001b[0m[0002] Starting Node 'k3d-demo-server-0' \r\n"]
[16.378372, "o", "\u001b[36mINFO\u001b[0m[0007] Starting agents... \r\n"]
[16.388922, "o", "\u001b[36mINFO\u001b[0m[0007] Starting Node 'k3d-demo-agent-0' \r\n"]
[16.389848, "o", "\u001b[36mINFO\u001b[0m[0007] Starting Node 'k3d-demo-agent-1' \r\n"]
[16.397254, "o", "\u001b[36mINFO\u001b[0m[0007] Starting Node 'k3d-demo-agent-2' \r\n"]
[31.590126, "o", "\u001b[36mINFO\u001b[0m[0022] Starting helpers... \r\n"]
[31.637947, "o", "\u001b[36mINFO\u001b[0m[0022] Starting Node 'k3d-demo-serverlb' \r\n"]
[38.185432, "o", "\u001b[36mINFO\u001b[0m[0029] Trying to get IP of the docker host and inject it into the cluster as 'host.k3d.internal' for easy access \r\n"]
[50.256861, "o", "\u001b[36mINFO\u001b[0m[0041] Cluster 'demo' created successfully! \r\n\u001b[36mINFO\u001b[0m[0041] --kubeconfig-update-default=false --> sets --kubeconfig-switch-context=false \r\n"]
[50.295453, "o", "\u001b[36mINFO\u001b[0m[0041] You can now use it like this: \r\nkubectl config use-context k3d-demo\r\nkubectl cluster-info\r\n"]
[50.299281, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
[52.777117, "o", "k"]
[52.873341, "o", "3"]
[53.006105, "o", "d"]
[53.147707, "o", " "]
[53.245736, "o", "c"]
[53.343772, "o", "l"]
[53.551038, "o", "u"]
[53.617941, "o", "s"]
[53.724853, "o", "t"]
[53.878933, "o", "e"]
[53.956281, "o", "r"]
[54.076303, "o", " "]
[54.21845, "o", "l"]
[54.339561, "o", "s"]
[54.447647, "o", "\r\n\u001b[?2004l\r"]
[54.47118, "o", "NAME SERVERS AGENTS LOADBALANCER\r\ndemo 1/1 3/3 true\r\n"]
[54.472506, "o", "\u001b[?2004h"]
[54.472562, "o", "\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
[54.838629, "o", "k"]
[54.918551, "o", "3"]
[55.015846, "o", "d"]
[55.115834, "o", " "]
[55.290514, "o", "n"]
[55.378089, "o", "o"]
[55.454292, "o", "d"]
[55.508669, "o", "e"]
[55.869687, "o", " "]
[56.05605, "o", "l"]
[56.176004, "o", "s"]
[56.31685, "o", "\r\n\u001b[?2004l\r"]
[56.341161, "o", "NAME ROLE CLUSTER STATUS\r\nk3d-demo-agent-0 agent demo running\r\nk3d-demo-agent-1 agent demo running\r\nk3d-demo-agent-2 agent demo running\r\nk3d-demo-server-0 server demo running\r\nk3d-demo-serverlb loadbalancer demo running\r\n"]
[56.34231, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
[57.733293, "o", "k"]
[57.932149, "o", "u"]
[58.059135, "o", "b"]
[58.137901, "o", "e"]
[58.23908, "o", "c"]
[58.418996, "o", "t"]
[58.496899, "o", "l"]
[58.687091, "o", " "]
[58.740349, "o", "g"]
[58.832322, "o", "e"]
[58.955499, "o", "t"]
[59.067944, "o", " "]
[59.246223, "o", "n"]
[59.344781, "o", "o"]
[59.426918, "o", "d"]
[59.493282, "o", "e"]
[59.672248, "o", "s"]
[59.772331, "o", "\r\n\u001b[?2004l\r"]
[60.41166, "o", "NAME STATUS ROLES AGE VERSION\r\nk3d-demo-agent-2 Ready <none> 29s v1.21.4+k3s1\r\nk3d-demo-server-0 Ready control-plane,master 41s v1.21.4+k3s1\r\nk3d-demo-agent-0 Ready <none> 31s v1.21.4+k3s1\r\nk3d-demo-agent-1 Ready <none> 31s v1.21.4+k3s1\r\n"]
[60.414302, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
[61.301105, "o", "k"]
[61.534792, "o", "u"]
[61.723192, "o", "b"]
[61.800647, "o", "e"]
[61.912191, "o", "c"]
[62.111433, "o", "t"]
[62.220654, "o", "l"]
[62.400417, "o", " "]
[62.434071, "o", "g"]
[62.523052, "o", "e"]
[62.634216, "o", "t"]
[62.700412, "o", " "]
[62.923073, "o", "p"]
[63.120958, "o", "o"]
[63.231192, "o", "d"]
[63.287011, "o", "s"]
[63.497854, "o", " "]
[63.642017, "o", "-"]
[63.896056, "o", "A"]
[64.129633, "o", "\r\n\u001b[?2004l\r"]
[64.180813, "o", "NAMESPACE NAME READY STATUS RESTARTS AGE\r\nkube-system coredns-7448499f4d-rrmh5 1/1 Running 0 34s\r\nkube-system metrics-server-86cbb8457f-6hkns 1/1 Running 0 34s\r\nkube-system local-path-provisioner-5ff76fc89d-ltzd4 1/1 Running 0 34s\r\nkube-system helm-install-traefik-crd-st9fm 0/1 Completed 0 34s\r\nkube-system traefik-97b44b794-lgljm 0/1 ContainerCreating 0 11s\r\nkube-system helm-install-traefik-6t7fr 0/1 Completed 1 "]
[64.181, "o", "34s\r\nkube-system svclb-traefik-wztvf 2/2 Running 0 11s\r\nkube-system svclb-traefik-ksk54 2/2 Running 0 11s\r\nkube-system svclb-traefik-s286b 2/2 Running 0 11s\r\nkube-system svclb-traefik-ksbmz 2/2 Running 0 11s\r\n"]
[64.182931, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
[66.050907, "o", "#"]
[66.160953, "o", " "]
[66.559434, "o", "P"]
[66.768444, "o", "r"]
[66.844975, "o", "o"]
[67.022583, "o", "f"]
[67.098851, "o", "i"]
[67.286285, "o", "t"]
[67.921864, "o", "."]
[69.59588, "o", "\r\n\u001b[?2004l\r"]
[69.596126, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
[70.123764, "o", "\u001b[?2004l\r\r\nexit\r\n"]

View File

@ -23,28 +23,10 @@
position: relative;
}
/* This is equal to light mode */
[data-md-color-primary=black] .md-tabs {
/* Set color of the tab bar */
background-color: #0DCEFF;
}
/* Dark Mode */
[data-md-color-scheme="slate"] .md-header {
/* keep black backgroud of title bar (header) */
background-color: black;
}
/* Tab Bar */
.md-tabs {
color: black;
}
.md-tabs__item {
font-weight: bolder;
}
.md-tabs__link--active {
text-decoration: underline;
}

View File

@ -1,9 +1,7 @@
title: Guides
title: Usage
nav:
- commands
- configfile.md
- kubeconfig.md
- multiserver.md
- registries.md
- exposing_services.md
- advanced
- commands
- guides

View File

@ -1,4 +0,0 @@
title: Advanced Guides
nav:
- calico.md
- cuda.md

View File

@ -16,7 +16,8 @@ k3d
-e, --env # add environment variables to the nodes (quoted string, format: 'KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]', use flag multiple times)
--gpus # [from docker CLI] add GPU devices to the node containers (string, e.g. 'all')
-i, --image # specify which k3s image should be used for the nodes (string, default: 'docker.io/rancher/k3s:v1.20.0-k3s2', tag changes per build)
--k3s-arg # add additional arguments to the k3s server/agent (quoted string, use flag multiple times) (see https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/#k3s-server-cli-help & https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/#k3s-agent-cli-help)
--k3s-agent-arg # add additional arguments to the k3s agent (quoted string, use flag multiple times) (see https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/#k3s-agent-cli-help)
--k3s-server-arg # add additional arguments to the k3s server (quoted string, use flag multiple times) (see https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/#k3s-server-cli-help)
--kubeconfig-switch-context # (implies --kubeconfig-update-default) automatically sets the current-context of your default kubeconfig to the new cluster's context (default: true)
--kubeconfig-update-default # enable the automated update of the default kubeconfig with the details of the newly created cluster (also sets '--wait=true') (default: true)
-l, --label # add (docker) labels to the node containers (format: 'KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]', use flag multiple times)

View File

@ -35,16 +35,15 @@ k3d cluster create NAME [flags]
- Example: `k3d cluster create --agents 2 --k3s-node-label "my.label@agent:0,1" --k3s-node-label "other.label=somevalue@server:0"`
--kubeconfig-switch-context Directly switch the default kubeconfig's current-context to the new cluster's context (requires --kubeconfig-update-default) (default true)
--kubeconfig-update-default Directly update the default kubeconfig with the new cluster's context (default true)
--lb-config-override strings Use dotted YAML path syntax to override nginx loadbalancer settings
--network string Join an existing network
--no-hostip Disable the automatic injection of the Host IP as 'host.k3d.internal' into the containers and CoreDNS
--no-image-volume Disable the creation of a volume for importing images
--no-lb Disable the creation of a LoadBalancer in front of the server nodes
--no-rollback Disable the automatic rollback actions, if anything goes wrong
-p, --port [HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER] Map ports from the node containers (via the serverlb) to the host (Format: [HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER])
- Example: `k3d cluster create --agents 2 -p 8080:80@agent:0 -p 8081@agent:1`
--registry-config string Specify path to an extra registries.yaml file
--registry-create NAME[:HOST][:HOSTPORT] Create a k3d-managed registry and connect it to the cluster (Format: NAME[:HOST][:HOSTPORT]
- Example: `k3d cluster create --registry-create mycluster-registry:0.0.0.0:5432`
--registry-create Create a k3d-managed registry and connect it to the cluster
--registry-use stringArray Connect to one or more k3d-managed registries running locally
--runtime-label KEY[=VALUE][@NODEFILTER[;NODEFILTER...]] Add label to container runtime (Format: KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]
- Example: `k3d cluster create --agents 2 --runtime-label "my.label@agent:0,1" --runtime-label "other.label=somevalue@server:0"`

View File

@ -13,9 +13,8 @@ k3d cluster delete [NAME [NAME ...] | --all] [flags]
### Options
```
-a, --all Delete all existing clusters
-c, --config string Path of a config file to use
-h, --help help for delete
-a, --all Delete all existing clusters
-h, --help help for delete
```
### Options inherited from parent commands

View File

@ -13,17 +13,15 @@ k3d node create NAME [flags]
### Options
```
-c, --cluster string Cluster URL or k3d cluster name to connect to. (default "k3s-default")
-c, --cluster string Select the cluster that the node shall connect to. (default "k3s-default")
-h, --help help for create
-i, --image string Specify k3s image used for the node(s) (default "docker.io/rancher/k3s:v1.21.4-k3s2")
-i, --image string Specify k3s image used for the node(s) (default "docker.io/rancher/k3s:v1.20.0-k3s2")
--k3s-node-label strings Specify k3s node labels in format "foo=bar"
--memory string Memory limit imposed on the node [From docker]
-n, --network strings Add node to (another) runtime network
--replicas int Number of replicas of this node specification. (default 1)
--role string Specify node role [server, agent] (default "agent")
--runtime-label strings Specify container runtime labels in format "foo=bar"
--timeout duration Maximum waiting time for '--wait' before canceling/returning.
-t, --token string Override cluster token (required when connecting to an external cluster)
--wait Wait for the node(s) to be ready before returning. (default true)
```

View File

@ -1,9 +1,10 @@
# Using Config Files
The config file feature is **available as of k3d v4.0.0**
# Config File
## Introduction
As of k3d v4.0.0, released in January 2021, k3d ships with configuration file support for the `k3d cluster create` command.
This allows you to define all the things that you defined with CLI flags before in a nice and tidy YAML (as a Kubernetes user, we know you love it ;) ).
!!! info "Syntax & Semantics"
The options defined in the config file are not 100% the same as the CLI flags.
This concerns naming and style/usage/structure, e.g.
@ -61,7 +62,6 @@ kubeAPI: # same as `--api-port myhost.my.domain:6445` (where the name would reso
hostPort: "6445" # where the Kubernetes API listening port will be mapped to on your host system
image: rancher/k3s:v1.20.4-k3s1 # same as `--image rancher/k3s:v1.20.4-k3s1`
network: my-custom-net # same as `--network my-custom-net`
subnet: "172.28.0.0/16" # same as `--subnet 172.28.0.0/16`
token: superSecretToken # same as `--token superSecretToken`
volumes: # repeatable flags are represented as YAML lists
- volume: /my/host/path:/path/in/node # same as `--volume '/my/host/path:/path/in/node@server:0;agent:*'`
@ -77,10 +77,7 @@ env:
nodeFilters:
- server:0
registries: # define how registries should be created or used
create: # creates a default registry to be used with the cluster; same as `--registry-create registry.localhost`
name: registry.localhost
host: "0.0.0.0"
hostPort: "5000"
create: true # creates a default registry to be used with the cluster; same as `--registry-create`
use:
- k3d-myotherregistry:5000 # some other k3d-managed registry; same as `--registry-use 'k3d-myotherregistry:5000'`
config: | # define contents of the `registries.yaml` file (or reference a file); same as `--registry-config /path/to/config.yaml`
@ -95,9 +92,7 @@ options:
disableLoadbalancer: false # same as `--no-lb`
disableImageVolume: false # same as `--no-image-volume`
disableRollback: false # same as `--no-Rollback`
loadbalancer:
configOverrides:
- settings.workerConnections=2048
disableHostIPInjection: false # same as `--no-hostip`
k3s: # options passed on to K3s itself
extraArgs: # additional arguments passed to the `k3s server|agent` command; same as `--k3s-arg`
- arg: --tls-san=my.host.domain
@ -134,4 +129,4 @@ For example, you use the same config file to create three clusters which only ha
## References
- k3d demo repository: <https://github.com/iwilltry42/k3d-demo/blob/main/README.md#config-file-support>
- SUSE Blog: <https://www.suse.com/c/introduction-k3d-run-k3s-docker-src/> (Search for `The “Configuration as Code” Way`)
- SUSE Blog: <https://www.suse.com/c/introduction-k3d-run-k3s-docker-src/> (Search fo `The “Configuration as Code” Way`)

6
docs/usage/guides/.pages Normal file
View File

@ -0,0 +1,6 @@
title: Guides
nav:
- exposing_services.md
- registries.md
- calico.md
- cuda.md

View File

@ -20,14 +20,14 @@ Or you can directly use this [calico.yaml](calico.yaml) manifest
On the k3s cluster creation :
- add the flag `--flannel-backend=none`. For this, on k3d you need to forward this flag to k3s with the option `--k3s-arg`.
- add the flag `--flannel-backend=none`. For this, on k3d you need to forward this flag to k3s with the option `--k3s-server-arg`.
- mount (`--volume`) the calico descriptor in the auto deploy manifest directory of k3s `/var/lib/rancher/k3s/server/manifests/`
So the command of the cluster creation is (when you are at root of the k3d repository)
```bash
k3d cluster create "${clustername}" \
--k3s-arg '--flannel-backend=none@server:*' \
--k3s-server-arg '--flannel-backend=none' \
--volume "$(pwd)/docs/usage/guides/calico.yaml:/var/lib/rancher/k3s/server/manifests/calico.yaml"
```

View File

@ -1,10 +1,13 @@
# Using Image Registries
# Registries
## Registries configuration file
You can add registries by specifying them in a `registries.yaml` and referencing it at creation time:
`#!bash k3d cluster create mycluster --registry-config "/home/YOU/my-registries.yaml"`.
??? Tip "Pre v4.0.0 solution"
Before we added the `--registry-config` flag in k3d v4.0.0, you had to bind-mount the file to the correct location: `--volume "/home/YOU/my-registries.yaml:/etc/rancher/k3s/registries.yaml"`
This file is a regular [k3s registries configuration file](https://rancher.com/docs/k3s/latest/en/installation/private-registry/), and looks like this:
```yaml
@ -14,7 +17,10 @@ mirrors:
- http://my.company.registry:5000
```
In this example, an image with a name like `my.company.registry:5000/nginx:latest` would be _pulled_ from the registry running at `http://my.company.registry:5000`.
In this example, an image with a name like `my.company.registry:5000/nginx:latest` would be
_pulled_ from the registry running at `http://my.company.registry:5000`.
Note well there is an important limitation: **this configuration file will only work with k3s >= v0.10.0**. It will fail silently with previous versions of k3s, but you find in the [section below](#k3s-old) an alternative solution.
This file can also be used for providing additional information necessary for accessing some registries, like [authentication](#authenticated-registries) and [certificates](#secure-registries).
@ -29,8 +35,7 @@ name: test
servers: 1
agents: 2
registries:
create:
name: myregistry
create: true
config: |
mirrors:
"my.company.registry":
@ -38,7 +43,7 @@ registries:
- http://my.company.registry:5000
```
Here, the config for the k3d-managed registry, created by the `create: {...}` option will be merged with the config specified under `config: |`.
Here, the config for the k3d-managed registry, created by the `create: true` flag will be merged with the config specified under `config: |`.
### Authenticated registries
@ -90,21 +95,24 @@ k3d cluster create \
### Using k3d-managed registries
!!! info "Just ported!"
The k3d-managed registry is available again as of k3d v4.0.0 (January 2021)
#### Create a dedicated registry together with your cluster
1. `#!bash k3d cluster create mycluster --registry-create mycluster-registry`: This creates your cluster `mycluster` together with a registry container called `mycluster-registry`
1. `#!bash k3d cluster create mycluster --registry-create`: This creates your cluster `mycluster` together with a registry container called `k3d-mycluster-registry`
- k3d sets everything up in the cluster for containerd to be able to pull images from that registry (using the `registries.yaml` file)
- the port, which the registry is listening on will be mapped to a random port on your host system
2. Check the k3d command output or `#!bash docker ps -f name=mycluster-registry` to find the exposed port (let's use `12345` here)
3. Pull some image (optional) `#!bash docker pull alpine:latest`, re-tag it to reference your newly created registry `#!bash docker tag alpine:latest mycluster-registry:12345/testimage:local` and push it `#!bash docker push mycluster-registry:12345/testimage:local`
4. Use kubectl to create a new pod in your cluster using that image to see, if the cluster can pull from the new registry: `#!bash kubectl run --image mycluster-registry:12345/testimage:local testimage --command -- tail -f /dev/null` (creates a container that will not do anything but keep on running)
2. Check the k3d command output or `#!bash docker ps -f name=k3d-mycluster-registry` to find the exposed port (let's use `12345` here)
3. Pull some image (optional) `#!bash docker pull alpine:latest`, re-tag it to reference your newly created registry `#!bash docker tag alpine:latest k3d-mycluster-registry:12345/testimage:local` and push it `#!bash docker push k3d-mycluster-registry:12345/testimage:local`
4. Use kubectl to create a new pod in your cluster using that image to see, if the cluster can pull from the new registry: `#!bash kubectl run --image k3d-mycluster-registry:12345/testimage:local testimage --command -- tail -f /dev/null` (creates a container that will not do anything but keep on running)
#### Create a customized k3d-managed registry
1. `#!bash k3d registry create myregistry.localhost --port 12345` creates a new registry called `k3d-myregistry.localhost` (could be used with automatic resolution of `*.localhost`, see next section - also, **note the `k3d-` prefix** that k3d adds to all resources it creates)
2. `#!bash k3d cluster create newcluster --registry-use k3d-myregistry.localhost:12345` (make sure you use the **`k3d-` prefix** here) creates a new cluster set up to use that registry
2. `#!bash k3d cluster create newcluster --registry-use k3d-myregistry.localhost:12345` (make sure you use the **`k3d-` prefix** here) creates a new cluster set up to us that registry
3. continue with step 3 and 4 from the last section for testing
<!-- Admonition to describe usage of a non-k3d-managed registry -->
@ -121,13 +129,13 @@ k3d cluster create \
docker container run -d --name registry.localhost -v local_registry:/var/lib/registry --restart always -p 5000:5000 registry:2
```
These commands will start your registry container with name and port `registry.localhost:5000`. In order to push to this registry, you will need to make it accessible as described in the next section.
These commands will start your registry in `registry.localhost:5000`. In order to push to this registry, you will need to make it accessible as described in the next section.
Once your registry is up and running, we will need to add it to your `registries.yaml` configuration file.
Finally, you have to connect the registry network to the k3d cluster network: `#!bash docker network connect k3d-k3s-default registry.localhost`. And then you can [test your local registry](#testing-your-registry).
### Pushing to your local registry address
As per the guide above, the registry will be available as `registry.localhost:5000`.
As per the guide above, the registry will be available at `registry.localhost:5000`.
All the nodes in your k3d cluster can resolve this hostname (thanks to the DNS server provided by the Docker daemon) but, in order to be able to push to this registry, this hostname also has to be resolved by your host.
@ -142,9 +150,7 @@ If your system does not provide/support tools that can auto-resolve specific nam
127.0.0.1 k3d-registry.localhost
```
!!! info "Just use localhost"
Alternatively, if you don't care about pretty names, just push directly to `localhost:5000` (or whatever port you used) and it will work.
If you later pull the image from the registry, only the repository path (e.g. `myrepo/myimage:mytag` in `registry.localhost:5000/myrepo/myimage:mytag`) matters to find your image in the targeted registry.
Once again, this will only work with k3s >= v0.10.0 (see the some sections below when using k3s <= v0.9.1)
## Testing your registry
@ -193,3 +199,44 @@ EOF
```
Then you should check that the pod is running with `kubectl get pods -l "app=nginx-test-registry"`.
## Configuring registries for k3s <= v0.9.1
k3s servers below v0.9.1 do not recognize the `registries.yaml` file as described in the in the beginning, so you will need to embed the contents of that file in a `containerd` configuration file.
You will have to create your own `containerd` configuration file at some well-known path like `${HOME}/.k3d/config.toml.tmpl`, like this:
??? registriesprev091 "config.toml.tmpl"
```toml
# Original section: no changes
[plugins.opt]
path = "{{ .NodeConfig.Containerd.Opt }}"
[plugins.cri]
stream_server_address = "{{ .NodeConfig.AgentConfig.NodeName }}"
stream_server_port = "10010"
{{- if .IsRunningInUserNS }}
disable_cgroup = true
disable_apparmor = true
restrict_oom_score_adj = true
{{ end -}}
{{- if .NodeConfig.AgentConfig.PauseImage }}
sandbox_image = "{{ .NodeConfig.AgentConfig.PauseImage }}"
{{ end -}}
{{- if not .NodeConfig.NoFlannel }}
[plugins.cri.cni]
bin_dir = "{{ .NodeConfig.AgentConfig.CNIBinDir }}"
conf_dir = "{{ .NodeConfig.AgentConfig.CNIConfDir }}"
{{ end -}}
# Added section: additional registries and the endpoints
[plugins.cri.registry.mirrors]
[plugins.cri.registry.mirrors."<b>registry.localhost:5000</b>"]
endpoint = ["http://<b>registry.localhost:5000</b>"]
```
and then mount it at `/var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl` (where `containerd` in your k3d nodes will load it) when creating the k3d cluster:
```bash
k3d cluster create mycluster \
--volume ${HOME}/.k3d/config.toml.tmpl:/var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl
```

View File

@ -1,12 +1,12 @@
# Creating multi-server clusters
!!! info "Important note"
For the best results (and less unexpected issues), choose 1, 3, 5, ... server nodes. (Read more on etcd quorum on [etcd.io](https://etcd.io/docs/v3.3/faq/#why-an-odd-number-of-cluster-members))
For the best results (and less unexpected issues), choose 1, 3, 5, ... server nodes.
At least 2 cores and 4GiB of RAM are recommended.
## Embedded etcd
## Embedded etcd (old: dqlite)
Create a cluster with 3 server nodes using k3s' embedded etcd database.
Create a cluster with 3 server nodes using k3s' embedded etcd (old: dqlite) database.
The first server to be created will use the `--cluster-init` flag and k3d will wait for it to be up and running before creating (and connecting) the other server nodes.
```bash
@ -23,4 +23,4 @@ k3d node create newserver --cluster multiserver --role server
!!! important "There's a trap!"
If your cluster was initially created with only a single server node, then this will fail.
That's because the initial server node was not started with the `--cluster-init` flag and thus is not using the etcd backend.
That's because the initial server node was not started with the `--cluster-init` flag and thus is not using the etcd (old: dqlite) backend.

85
go.mod
View File

@ -1,15 +1,16 @@
module github.com/rancher/k3d/v5
module github.com/rancher/k3d/v4
go 1.17
go 1.16
require (
github.com/Microsoft/go-winio v0.4.17 // indirect
github.com/Microsoft/hcsshim v0.8.18 // indirect
github.com/containerd/cgroups v1.0.1 // indirect
github.com/containerd/containerd v1.5.5
github.com/docker/cli v20.10.8+incompatible
github.com/docker/docker v20.10.8+incompatible
github.com/docker/docker-credential-helpers v0.6.4 // indirect
github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3 // indirect
github.com/Microsoft/hcsshim v0.8.14 // indirect
github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68 // indirect
github.com/containerd/containerd v1.4.4 // indirect
github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e // indirect
github.com/docker/cli v20.10.7+incompatible
github.com/docker/docker v20.10.7+incompatible
github.com/docker/docker-credential-helpers v0.6.3 // indirect
github.com/docker/go-connections v0.4.0
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/go-units v0.4.0
@ -36,68 +37,8 @@ require (
golang.org/x/text v0.3.6 // indirect
gopkg.in/yaml.v2 v2.4.0
gotest.tools v2.2.0+incompatible
inet.af/netaddr v0.0.0-20210903134321-85fa6c94624e
k8s.io/client-go v0.22.1
gotest.tools/v3 v3.0.3 // indirect
inet.af/netaddr v0.0.0-20210421205553-78c777480f22
k8s.io/client-go v0.21.0
sigs.k8s.io/yaml v1.2.0
)
require github.com/spf13/pflag v1.0.5
require (
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/go-logr/logr v0.4.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.5 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/gorilla/mux v1.7.3 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/json-iterator/go v1.1.11 // indirect
github.com/magiconair/properties v1.8.5 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/miekg/pkcs11 v1.0.3 // indirect
github.com/mitchellh/mapstructure v1.4.1 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/sys/mountinfo v0.4.1 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/pelletier/go-toml v1.9.3 // indirect
github.com/prometheus/client_golang v1.7.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.10.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
github.com/spf13/afero v1.6.0 // indirect
github.com/spf13/cast v1.3.1 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/subosito/gotenv v1.2.0 // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
go.opencensus.io v0.23.0 // indirect
go4.org/intern v0.0.0-20210108033219-3eb7198706b2 // indirect
go4.org/unsafe/assume-no-moving-gc v0.0.0-20201222180813-1025295fd063 // indirect
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 // indirect
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023 // indirect
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 // indirect
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c // indirect
google.golang.org/grpc v1.38.0 // indirect
google.golang.org/protobuf v1.26.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.62.0 // indirect
k8s.io/apimachinery v0.22.1 // indirect
k8s.io/klog/v2 v2.9.0 // indirect
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect
)

456
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -21,7 +21,7 @@ THE SOFTWARE.
*/
package main
import "github.com/rancher/k3d/v5/cmd"
import "github.com/rancher/k3d/v4/cmd"
func main() {
cmd.Execute()

View File

@ -1,4 +1,4 @@
image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}{{else}}latest{{/if}}
image: rancher/k3d:{{#if build.tag}}{{build.tag}}{{else}}latest{{/if}}
{{#if build.tags}}
tags:
{{#each build.tags}}
@ -6,21 +6,21 @@ tags:
{{/each}}
{{/if}}
manifests:
- image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-amd64
- image: rancher/k3d:{{#if build.tag}}{{build.tag}}-{{/if}}linux-amd64
platform:
architecture: amd64
os: linux
- image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm64
- image: rancher/k3d:{{#if build.tag}}{{build.tag}}-{{/if}}linux-arm64
platform:
variant: v8
architecture: arm64
os: linux
- image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm
- image: rancher/k3d:{{#if build.tag}}{{build.tag}}-{{/if}}linux-arm
platform:
variant: v7
architecture: arm
os: linux
- image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm
- image: rancher/k3d:{{#if build.tag}}{{build.tag}}-{{/if}}linux-arm
platform:
variant: v6
architecture: arm

View File

@ -25,11 +25,7 @@ theme:
name: material
language: en
features:
- navigation.top # show back to top button
- search.suggest # search suggestions: https://squidfunk.github.io/mkdocs-material/setup/setting-up-site-search/#search-suggestions
- search.highlight # highlight search term on target page: https://squidfunk.github.io/mkdocs-material/setup/setting-up-site-search/#search-suggestions
- navigation.expand
- navigation.tabs
- tabs
palette:
- media: "(prefers-color-scheme: light)"
scheme: default
@ -75,16 +71,6 @@ plugins:
type: date
- awesome-pages # https://squidfunk.github.io/mkdocs-material/plugins/awesome-pages/
- include-markdown # https://github.com/mondeja/mkdocs-include-markdown-plugin
- mike: # Versioned Docs: https://github.com/jimporter/mike
version_selector: true # set to false to leave out the version selector
css_dir: static/css # the directory to put the version selector's CSS
javascript_dir: static/js # the directory to put the version selector's JS
canonical_version: null # the version for <link rel="canonical">; `null` uses the version specified via `mike deploy`
# Extra mkdocs-material settings
extra:
version:
provider: mike
# Other Settings
strict: true # halt processing when a warning is raised
strict: true # halt processing when a warning is raised

View File

@ -22,16 +22,11 @@ THE SOFTWARE.
package actions
import (
"bytes"
"context"
"fmt"
"io"
"os"
"github.com/rancher/k3d/v5/pkg/runtimes"
k3d "github.com/rancher/k3d/v5/pkg/types"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
)
type WriteFileAction struct {
@ -44,35 +39,3 @@ type WriteFileAction struct {
func (act WriteFileAction) Run(ctx context.Context, node *k3d.Node) error {
return act.Runtime.WriteToNode(ctx, act.Content, act.Dest, act.Mode, node)
}
type RewriteFileAction struct {
Runtime runtimes.Runtime
Path string
RewriteFunc func([]byte) ([]byte, error)
Mode os.FileMode
}
func (act RewriteFileAction) Run(ctx context.Context, node *k3d.Node) error {
reader, err := act.Runtime.ReadFromNode(ctx, act.Path, node)
if err != nil {
return fmt.Errorf("runtime failed to read '%s' from node '%s': %w", act.Path, node.Name, err)
}
defer reader.Close()
file, err := io.ReadAll(reader)
if err != nil {
return fmt.Errorf("failed to read file: %w", err)
}
file = bytes.Trim(file[512:], "\x00") // trim control characters, etc.
file, err = act.RewriteFunc(file)
if err != nil {
return fmt.Errorf("error while rewriting %s in %s: %w", act.Path, node.Name, err)
}
l.Log().Tracef("Rewritten:\n%s", string(file))
return act.Runtime.WriteToNode(ctx, file, act.Path, act.Mode, node)
}

View File

@ -22,16 +22,13 @@ THE SOFTWARE.
package client
import (
"bytes"
"context"
_ "embed"
"errors"
"fmt"
"io"
"os"
"io/ioutil"
"sort"
"strconv"
"strings"
"time"
gort "runtime"
@ -39,17 +36,16 @@ import (
"github.com/docker/go-connections/nat"
"github.com/imdario/mergo"
copystruct "github.com/mitchellh/copystructure"
"github.com/rancher/k3d/v5/pkg/actions"
config "github.com/rancher/k3d/v5/pkg/config/v1alpha3"
l "github.com/rancher/k3d/v5/pkg/logger"
k3drt "github.com/rancher/k3d/v5/pkg/runtimes"
"github.com/rancher/k3d/v5/pkg/runtimes/docker"
runtimeErr "github.com/rancher/k3d/v5/pkg/runtimes/errors"
"github.com/rancher/k3d/v5/pkg/types"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v5/pkg/types/k3s"
"github.com/rancher/k3d/v5/pkg/util"
"github.com/sirupsen/logrus"
"github.com/rancher/k3d/v4/pkg/actions"
config "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
k3drt "github.com/rancher/k3d/v4/pkg/runtimes"
"github.com/rancher/k3d/v4/pkg/runtimes/docker"
runtimeErr "github.com/rancher/k3d/v4/pkg/runtimes/errors"
"github.com/rancher/k3d/v4/pkg/types"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/rancher/k3d/v4/pkg/types/k3s"
"github.com/rancher/k3d/v4/pkg/util"
log "github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"gopkg.in/yaml.v2"
)
@ -63,9 +59,6 @@ func ClusterRun(ctx context.Context, runtime k3drt.Runtime, clusterConfig *confi
return fmt.Errorf("Failed Cluster Preparation: %+v", err)
}
// Create tools-node for later steps
go EnsureToolsNode(ctx, runtime, &clusterConfig.Cluster)
/*
* Step 1: Create Containers
*/
@ -76,20 +69,15 @@ func ClusterRun(ctx context.Context, runtime k3drt.Runtime, clusterConfig *confi
/*
* Step 2: Pre-Start Configuration
*/
envInfo, err := GatherEnvironmentInfo(ctx, runtime, &clusterConfig.Cluster)
if err != nil {
return fmt.Errorf("failed to gather environment information used for cluster creation: %w", err)
}
// TODO: ClusterRun: add cluster configuration step here
/*
* Step 3: Start Containers
*/
if err := ClusterStart(ctx, runtime, &clusterConfig.Cluster, k3d.ClusterStartOpts{
WaitForServer: clusterConfig.ClusterCreateOpts.WaitForServer,
Timeout: clusterConfig.ClusterCreateOpts.Timeout, // TODO: here we should consider the time used so far
NodeHooks: clusterConfig.ClusterCreateOpts.NodeHooks,
EnvironmentInfo: envInfo,
Intent: k3d.IntentClusterCreate,
WaitForServer: clusterConfig.ClusterCreateOpts.WaitForServer,
Timeout: clusterConfig.ClusterCreateOpts.Timeout, // TODO: here we should consider the time used so far
NodeHooks: clusterConfig.ClusterCreateOpts.NodeHooks,
}); err != nil {
return fmt.Errorf("Failed Cluster Start: %+v", err)
}
@ -101,10 +89,19 @@ func ClusterRun(ctx context.Context, runtime k3drt.Runtime, clusterConfig *confi
* Additional Cluster Preparation *
**********************************/
/*
* Networking Magic
*/
// add /etc/hosts and CoreDNS entry for host.k3d.internal, referring to the host system
if !clusterConfig.ClusterCreateOpts.PrepDisableHostIPInjection {
prepInjectHostIP(ctx, runtime, &clusterConfig.Cluster)
}
// create the registry hosting configmap
if len(clusterConfig.ClusterCreateOpts.Registries.Use) > 0 {
if err := prepCreateLocalRegistryHostingConfigMap(ctx, runtime, &clusterConfig.Cluster); err != nil {
l.Log().Warnf("Failed to create LocalRegistryHosting ConfigMap: %+v", err)
log.Warnf("Failed to create LocalRegistryHosting ConfigMap: %+v", err)
}
}
@ -151,14 +148,14 @@ func ClusterPrep(ctx context.Context, runtime k3drt.Runtime, clusterConfig *conf
// Ensure referenced registries
for _, reg := range clusterConfig.ClusterCreateOpts.Registries.Use {
l.Log().Debugf("Trying to find registry %s", reg.Host)
log.Debugf("Trying to find registry %s", reg.Host)
regNode, err := runtime.GetNode(ctx, &k3d.Node{Name: reg.Host})
if err != nil {
return fmt.Errorf("Failed to find registry node '%s': %+v", reg.Host, err)
}
regFromNode, err := RegistryFromNode(regNode)
if err != nil {
return fmt.Errorf("failed to translate node to registry spec: %w", err)
return err
}
*reg = *regFromNode
}
@ -176,7 +173,7 @@ func ClusterPrep(ctx context.Context, runtime k3drt.Runtime, clusterConfig *conf
}
// Use existing registries (including the new one, if created)
l.Log().Tracef("Using Registries: %+v", clusterConfig.ClusterCreateOpts.Registries.Use)
log.Tracef("Using Registries: %+v", clusterConfig.ClusterCreateOpts.Registries.Use)
var registryConfig *k3s.Registry
@ -203,7 +200,7 @@ func ClusterPrep(ctx context.Context, runtime k3drt.Runtime, clusterConfig *conf
if err != nil {
return fmt.Errorf("Failed to generate LocalRegistryHosting configmap: %+v", err)
}
l.Log().Tracef("Writing LocalRegistryHosting YAML:\n%s", string(regCm))
log.Tracef("Writing LocalRegistryHosting YAML:\n%s", string(regCm))
clusterConfig.ClusterCreateOpts.NodeHooks = append(clusterConfig.ClusterCreateOpts.NodeHooks, k3d.NodeHook{
Stage: k3d.LifecycleStagePreStart,
Action: actions.WriteFileAction{
@ -223,7 +220,7 @@ func ClusterPrep(ctx context.Context, runtime k3drt.Runtime, clusterConfig *conf
if err := RegistryMergeConfig(ctx, registryConfig, clusterConfig.ClusterCreateOpts.Registries.Config); err != nil {
return err
}
l.Log().Tracef("Merged registry config: %+v", registryConfig)
log.Tracef("Merged registry config: %+v", registryConfig)
} else {
registryConfig = clusterConfig.ClusterCreateOpts.Registries.Config
}
@ -250,7 +247,7 @@ func ClusterPrep(ctx context.Context, runtime k3drt.Runtime, clusterConfig *conf
// ClusterPrepNetwork creates a new cluster network, if needed or sets everything up to re-use an existing network
func ClusterPrepNetwork(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, clusterCreateOpts *k3d.ClusterCreateOpts) error {
l.Log().Infoln("Prep: Network")
log.Infoln("Prep: Network")
// error out if external cluster network should be used but no name was set
if cluster.Network.Name == "" && cluster.Network.External {
@ -276,7 +273,8 @@ func ClusterPrepNetwork(ctx context.Context, runtime k3drt.Runtime, cluster *k3d
// create cluster network or use an existing one
network, networkExists, err := runtime.CreateNetworkIfNotPresent(ctx, &cluster.Network)
if err != nil {
return fmt.Errorf("failed to create cluster network: %w", err)
log.Errorln("Failed to create cluster network")
return err
}
cluster.Network = *network
clusterCreateOpts.GlobalLabels[k3d.LabelNetworkID] = network.ID
@ -284,7 +282,7 @@ func ClusterPrepNetwork(ctx context.Context, runtime k3drt.Runtime, cluster *k3d
clusterCreateOpts.GlobalLabels[k3d.LabelNetworkIPRange] = cluster.Network.IPAM.IPPrefix.String()
clusterCreateOpts.GlobalLabels[k3d.LabelNetworkExternal] = strconv.FormatBool(cluster.Network.External)
if networkExists {
l.Log().Infof("Re-using existing network '%s' (%s)", network.Name, network.ID)
log.Infof("Re-using existing network '%s' (%s)", network.Name, network.ID)
clusterCreateOpts.GlobalLabels[k3d.LabelNetworkExternal] = "true" // if the network wasn't created, we say that it's managed externally (important for cluster deletion)
}
@ -298,11 +296,11 @@ func ClusterPrepImageVolume(ctx context.Context, runtime k3drt.Runtime, cluster
*/
imageVolumeName := fmt.Sprintf("%s-%s-images", k3d.DefaultObjectNamePrefix, cluster.Name)
if err := runtime.CreateVolume(ctx, imageVolumeName, map[string]string{k3d.LabelClusterName: cluster.Name}); err != nil {
return fmt.Errorf("failed to create image volume '%s' for cluster '%s': %w", imageVolumeName, cluster.Name, err)
log.Errorf("Failed to create image volume '%s' for cluster '%s'", imageVolumeName, cluster.Name)
return err
}
clusterCreateOpts.GlobalLabels[k3d.LabelImageVolume] = imageVolumeName
cluster.ImageVolume = imageVolumeName
// attach volume to nodes
for _, node := range cluster.Nodes {
@ -316,7 +314,7 @@ func ClusterPrepImageVolume(ctx context.Context, runtime k3drt.Runtime, cluster
// - a docker network
func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, clusterCreateOpts *k3d.ClusterCreateOpts) error {
l.Log().Tracef(`
log.Tracef(`
===== Creating Cluster =====
Runtime:
@ -347,16 +345,16 @@ ClusterCreatOpts:
*/
if cluster.KubeAPI.Host == k3d.DefaultAPIHost && runtime == k3drt.Docker {
if gort.GOOS == "windows" || gort.GOOS == "darwin" {
l.Log().Tracef("Running on %s: checking if it's using docker-machine", gort.GOOS)
log.Tracef("Running on %s: checking if it's using docker-machine", gort.GOOS)
machineIP, err := runtime.(docker.Docker).GetDockerMachineIP()
if err != nil {
l.Log().Warnf("Using docker-machine, but failed to get it's IP: %+v", err)
log.Warnf("Using docker-machine, but failed to get it's IP: %+v", err)
} else if machineIP != "" {
l.Log().Infof("Using the docker-machine IP %s to connect to the Kubernetes API", machineIP)
log.Infof("Using the docker-machine IP %s to connect to the Kubernetes API", machineIP)
cluster.KubeAPI.Host = machineIP
cluster.KubeAPI.Binding.HostIP = machineIP
} else {
l.Log().Traceln("Not using docker-machine")
log.Traceln("Not using docker-machine")
}
}
}
@ -380,7 +378,7 @@ ClusterCreatOpts:
// connection url is always the name of the first server node (index 0) // TODO: change this to the server loadbalancer
connectionURL := fmt.Sprintf("https://%s:%s", GenerateNodeName(cluster.Name, k3d.ServerRole, 0), k3d.DefaultAPIPort)
clusterCreateOpts.GlobalLabels[k3d.LabelClusterURL] = connectionURL
clusterCreateOpts.GlobalEnv = append(clusterCreateOpts.GlobalEnv, fmt.Sprintf("%s=%s", k3s.EnvClusterToken, cluster.Token))
clusterCreateOpts.GlobalEnv = append(clusterCreateOpts.GlobalEnv, fmt.Sprintf("K3S_TOKEN=%s", cluster.Token))
nodeSetup := func(node *k3d.Node) error {
// cluster specific settings
@ -402,7 +400,7 @@ ClusterCreatOpts:
if cluster.Network.IPAM.Managed {
ip, err := GetIP(ctx, runtime, &cluster.Network)
if err != nil {
return fmt.Errorf("failed to find free IP in network %s: %w", cluster.Network.Name, err)
return err
}
cluster.Network.IPAM.IPsUsed = append(cluster.Network.IPAM.IPsUsed, ip) // make sure that we're not reusing the same IP next time
node.IP.Static = true
@ -414,12 +412,12 @@ ClusterCreatOpts:
// the cluster has an init server node, but its not this one, so connect it to the init node
if cluster.InitNode != nil && !node.ServerOpts.IsInit {
node.Env = append(node.Env, fmt.Sprintf("%s=%s", k3s.EnvClusterConnectURL, connectionURL))
node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", connectionURL))
node.RuntimeLabels[k3d.LabelServerIsInit] = "false" // set label, that this server node is not the init server
}
} else if node.Role == k3d.AgentRole {
node.Env = append(node.Env, fmt.Sprintf("%s=%s", k3s.EnvClusterConnectURL, connectionURL))
node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", connectionURL))
}
node.Networks = []string{cluster.Network.Name}
@ -427,11 +425,12 @@ ClusterCreatOpts:
node.GPURequest = clusterCreateOpts.GPURequest
// create node
l.Log().Infof("Creating node '%s'", node.Name)
log.Infof("Creating node '%s'", node.Name)
if err := NodeCreate(clusterCreateCtx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
return fmt.Errorf("failed to create node: %w", err)
log.Errorln("Failed to create node")
return err
}
l.Log().Debugf("Created node '%s'", node.Name)
log.Debugf("Created node '%s'", node.Name)
// start node
//return NodeStart(clusterCreateCtx, runtime, node, k3d.NodeStartOpts{PreStartActions: clusterCreateOpts.NodeHookActions})
@ -443,7 +442,7 @@ ClusterCreatOpts:
// create init node first
if cluster.InitNode != nil {
l.Log().Infoln("Creating initializing server node")
log.Infoln("Creating initializing server node")
cluster.InitNode.Args = append(cluster.InitNode.Args, "--cluster-init")
if cluster.InitNode.RuntimeLabels == nil {
cluster.InitNode.RuntimeLabels = map[string]string{}
@ -459,7 +458,7 @@ ClusterCreatOpts:
}
if err := nodeSetup(cluster.InitNode); err != nil {
return fmt.Errorf("failed init node setup: %w", err)
return err
}
serverCount++
@ -487,14 +486,14 @@ ClusterCreatOpts:
}
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
if err := nodeSetup(node); err != nil {
return fmt.Errorf("failed setup of server/agent node %s: %w", node.Name, err)
return err
}
}
}
// WARN, if there are exactly two server nodes: that means we're using etcd, but don't have fault tolerance
if serverCount == 2 {
l.Log().Warnln("You're creating 2 server nodes: Please consider creating at least 3 to achieve etcd quorum & fault tolerance")
log.Warnln("You're creating 2 server nodes: Please consider creating at least 3 to achieve etcd quorum & fault tolerance")
}
/*
@ -503,10 +502,9 @@ ClusterCreatOpts:
// *** ServerLoadBalancer ***
if !clusterCreateOpts.DisableLoadBalancer {
if cluster.ServerLoadBalancer == nil {
l.Log().Infof("No loadbalancer specified, creating a default one...")
lbNode, err := LoadbalancerPrepare(ctx, runtime, cluster, &k3d.LoadbalancerCreateOpts{Labels: clusterCreateOpts.GlobalLabels})
if err != nil {
return fmt.Errorf("failed to prepare loadbalancer: %w", err)
return err
}
cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback
}
@ -524,7 +522,7 @@ ClusterCreatOpts:
// prepare to write config to lb container
configyaml, err := yaml.Marshal(cluster.ServerLoadBalancer.Config)
if err != nil {
return fmt.Errorf("failed to marshal loadbalancer config: %w", err)
return err
}
writeLbConfigAction := k3d.NodeHook{
@ -539,11 +537,12 @@ ClusterCreatOpts:
cluster.ServerLoadBalancer.Node.HookActions = append(cluster.ServerLoadBalancer.Node.HookActions, writeLbConfigAction)
l.Log().Infof("Creating LoadBalancer '%s'", cluster.ServerLoadBalancer.Node.Name)
log.Infof("Creating LoadBalancer '%s'", cluster.ServerLoadBalancer.Node.Name)
if err := NodeCreate(ctx, runtime, cluster.ServerLoadBalancer.Node, k3d.NodeCreateOpts{}); err != nil {
return fmt.Errorf("error creating loadbalancer: %v", err)
}
l.Log().Debugf("Created loadbalancer '%s'", cluster.ServerLoadBalancer.Node.Name)
log.Debugf("Created loadbalancer '%s'", cluster.ServerLoadBalancer.Node.Name)
return err
}
return nil
@ -552,18 +551,18 @@ ClusterCreatOpts:
// ClusterDelete deletes an existing cluster
func ClusterDelete(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, opts k3d.ClusterDeleteOpts) error {
l.Log().Infof("Deleting cluster '%s'", cluster.Name)
log.Infof("Deleting cluster '%s'", cluster.Name)
cluster, err := ClusterGet(ctx, runtime, cluster)
if err != nil {
return fmt.Errorf("failed to get cluster: %w", err)
return err
}
l.Log().Debugf("Cluster Details: %+v", cluster)
log.Debugf("Cluster Details: %+v", cluster)
failed := 0
for _, node := range cluster.Nodes {
// registry: only delete, if not connected to other networks
if node.Role == k3d.RegistryRole && !opts.SkipRegistryCheck {
l.Log().Tracef("Registry Node has %d networks: %+v", len(node.Networks), node)
log.Tracef("Registry Node has %d networks: %+v", len(node.Networks), node)
// check if node is connected to other networks, that are not
// - the cluster network
@ -577,21 +576,21 @@ func ClusterDelete(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
if net == "bridge" || net == "host" {
continue
}
l.Log().Tracef("net: %s", net)
log.Tracef("net: %s", net)
connectedToOtherNet = true
break
}
if connectedToOtherNet {
l.Log().Infof("Registry %s is also connected to other (non-default) networks (%+v), not deleting it...", node.Name, node.Networks)
log.Infof("Registry %s is also connected to other (non-default) networks (%+v), not deleting it...", node.Name, node.Networks)
if err := runtime.DisconnectNodeFromNetwork(ctx, node, cluster.Network.Name); err != nil {
l.Log().Warnf("Failed to disconnect registry %s from cluster network %s", node.Name, cluster.Network.Name)
log.Warnf("Failed to disconnect registry %s from cluster network %s", node.Name, cluster.Network.Name)
}
continue
}
}
if err := NodeDelete(ctx, runtime, node, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil {
l.Log().Warningf("Failed to delete node '%s': Try to delete it manually", node.Name)
log.Warningf("Failed to delete node '%s': Try to delete it manually", node.Name)
failed++
continue
}
@ -600,48 +599,48 @@ func ClusterDelete(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
// Delete the cluster network, if it was created for/by this cluster (and if it's not in use anymore)
if cluster.Network.Name != "" {
if !cluster.Network.External {
l.Log().Infof("Deleting cluster network '%s'", cluster.Network.Name)
log.Infof("Deleting cluster network '%s'", cluster.Network.Name)
if err := runtime.DeleteNetwork(ctx, cluster.Network.Name); err != nil {
if errors.Is(err, runtimeErr.ErrRuntimeNetworkNotEmpty) { // there are still containers connected to that network
connectedNodes, err := runtime.GetNodesInNetwork(ctx, cluster.Network.Name) // check, if there are any k3d nodes connected to the cluster
if err != nil {
l.Log().Warningf("Failed to check cluster network for connected nodes: %+v", err)
log.Warningf("Failed to check cluster network for connected nodes: %+v", err)
}
if len(connectedNodes) > 0 { // there are still k3d-managed containers (aka nodes) connected to the network
connectedRegistryNodes := util.FilterNodesByRole(connectedNodes, k3d.RegistryRole)
if len(connectedRegistryNodes) == len(connectedNodes) { // only registry node(s) left in the network
for _, node := range connectedRegistryNodes {
l.Log().Debugf("Disconnecting registry node %s from the network...", node.Name)
log.Debugf("Disconnecting registry node %s from the network...", node.Name)
if err := runtime.DisconnectNodeFromNetwork(ctx, node, cluster.Network.Name); err != nil {
l.Log().Warnf("Failed to disconnect registry %s from network %s", node.Name, cluster.Network.Name)
log.Warnf("Failed to disconnect registry %s from network %s", node.Name, cluster.Network.Name)
} else {
if err := runtime.DeleteNetwork(ctx, cluster.Network.Name); err != nil {
l.Log().Warningf("Failed to delete cluster network, even after disconnecting registry node(s): %+v", err)
log.Warningf("Failed to delete cluster network, even after disconnecting registry node(s): %+v", err)
}
}
}
} else { // besides the registry node(s), there are still other nodes... maybe they still need a registry
l.Log().Debugf("There are some non-registry nodes left in the network")
log.Debugf("There are some non-registry nodes left in the network")
}
} else {
l.Log().Warningf("Failed to delete cluster network '%s' because it's still in use: is there another cluster using it?", cluster.Network.Name)
log.Warningf("Failed to delete cluster network '%s' because it's still in use: is there another cluster using it?", cluster.Network.Name)
}
} else {
l.Log().Warningf("Failed to delete cluster network '%s': '%+v'", cluster.Network.Name, err)
log.Warningf("Failed to delete cluster network '%s': '%+v'", cluster.Network.Name, err)
}
}
} else if cluster.Network.External {
l.Log().Debugf("Skip deletion of cluster network '%s' because it's managed externally", cluster.Network.Name)
log.Debugf("Skip deletion of cluster network '%s' because it's managed externally", cluster.Network.Name)
}
}
// delete image volume
if cluster.ImageVolume != "" {
l.Log().Infof("Deleting image volume '%s'", cluster.ImageVolume)
log.Infof("Deleting image volume '%s'", cluster.ImageVolume)
if err := runtime.DeleteVolume(ctx, cluster.ImageVolume); err != nil {
l.Log().Warningf("Failed to delete image volume '%s' of cluster '%s': Try to delete it manually", cluster.ImageVolume, cluster.Name)
log.Warningf("Failed to delete image volume '%s' of cluster '%s': Try to delete it manually", cluster.ImageVolume, cluster.Name)
}
}
@ -654,25 +653,26 @@ func ClusterDelete(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
// ClusterList returns a list of all existing clusters
func ClusterList(ctx context.Context, runtime k3drt.Runtime) ([]*k3d.Cluster, error) {
l.Log().Traceln("Listing Clusters...")
log.Traceln("Listing Clusters...")
nodes, err := runtime.GetNodesByLabel(ctx, k3d.DefaultRuntimeLabels)
if err != nil {
return nil, fmt.Errorf("runtime failed to list nodes: %w", err)
log.Errorln("Failed to get clusters")
return nil, err
}
l.Log().Debugf("Found %d nodes", len(nodes))
if l.Log().GetLevel() == logrus.TraceLevel {
log.Debugf("Found %d nodes", len(nodes))
if log.GetLevel() == log.TraceLevel {
for _, node := range nodes {
l.Log().Tracef("Found node %s of role %s", node.Name, node.Role)
log.Tracef("Found node %s of role %s", node.Name, node.Role)
}
}
nodes = NodeFilterByRoles(nodes, k3d.ClusterInternalNodeRoles, k3d.ClusterExternalNodeRoles)
l.Log().Tracef("Found %d cluster-internal nodes", len(nodes))
if l.Log().GetLevel() == logrus.TraceLevel {
log.Tracef("Found %d cluster-internal nodes", len(nodes))
if log.GetLevel() == log.TraceLevel {
for _, node := range nodes {
l.Log().Tracef("Found cluster-internal node %s of role %s belonging to cluster %s", node.Name, node.Role, node.RuntimeLabels[k3d.LabelClusterName])
log.Tracef("Found cluster-internal node %s of role %s belonging to cluster %s", node.Name, node.Role, node.RuntimeLabels[k3d.LabelClusterName])
}
}
@ -699,11 +699,11 @@ func ClusterList(ctx context.Context, runtime k3drt.Runtime) ([]*k3d.Cluster, er
// enrich cluster structs with label values
for _, cluster := range clusters {
if err := populateClusterFieldsFromLabels(cluster); err != nil {
l.Log().Warnf("Failed to populate cluster fields from node label values for cluster '%s'", cluster.Name)
l.Log().Warnln(err)
log.Warnf("Failed to populate cluster fields from node label values for cluster '%s'", cluster.Name)
log.Warnln(err)
}
}
l.Log().Debugf("Found %d clusters", len(clusters))
log.Debugf("Found %d clusters", len(clusters))
return clusters, nil
}
@ -756,7 +756,7 @@ func ClusterGet(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster
// get nodes that belong to the selected cluster
nodes, err := runtime.GetNodesByLabel(ctx, map[string]string{k3d.LabelClusterName: cluster.Name})
if err != nil {
l.Log().Errorf("Failed to get nodes for cluster '%s': %v", cluster.Name, err)
log.Errorf("Failed to get nodes for cluster '%s': %v", cluster.Name, err)
}
if len(nodes) == 0 {
@ -797,14 +797,15 @@ func ClusterGet(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster
if cluster.ServerLoadBalancer != nil && cluster.ServerLoadBalancer.Node != nil {
lbcfg, err := GetLoadbalancerConfig(ctx, runtime, cluster)
if err != nil {
l.Log().Errorf("error getting loadbalancer config from %s: %v", cluster.ServerLoadBalancer.Node.Name, err)
log.Errorf("error getting loadbalancer config from %s: %v", cluster.ServerLoadBalancer.Node.Name, err)
}
cluster.ServerLoadBalancer.Config = &lbcfg
}
}
if err := populateClusterFieldsFromLabels(cluster); err != nil {
l.Log().Warnf("Failed to populate cluster fields from node labels: %v", err)
log.Warnf("Failed to populate cluster fields from node labels")
log.Warnln(err)
}
return cluster, nil
@ -820,16 +821,12 @@ func GenerateNodeName(cluster string, role k3d.Role, suffix int) string {
}
// ClusterStart starts a whole cluster (i.e. all nodes of the cluster)
func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, clusterStartOpts types.ClusterStartOpts) error {
l.Log().Infof("Starting cluster '%s'", cluster.Name)
func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, startClusterOpts types.ClusterStartOpts) error {
log.Infof("Starting cluster '%s'", cluster.Name)
if clusterStartOpts.Intent == "" {
clusterStartOpts.Intent = k3d.IntentClusterStart
}
if clusterStartOpts.Timeout > 0*time.Second {
if startClusterOpts.Timeout > 0*time.Second {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, clusterStartOpts.Timeout)
ctx, cancel = context.WithTimeout(ctx, startClusterOpts.Timeout)
defer cancel()
}
@ -852,21 +849,28 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
}
}
// sort list of servers for properly ordered sequential start
// TODO: remove trace logs below
log.Traceln("Servers before sort:")
for i, n := range servers {
log.Tracef("Server %d - %s", i, n.Name)
}
sort.Slice(servers, func(i, j int) bool {
return servers[i].Name < servers[j].Name
})
log.Traceln("Servers after sort:")
for i, n := range servers {
log.Tracef("Server %d - %s", i, n.Name)
}
/*
* Init Node
*/
if initNode != nil {
l.Log().Infoln("Starting the initializing server...")
if err := NodeStart(ctx, runtime, initNode, &k3d.NodeStartOpts{
log.Infoln("Starting the initializing server...")
if err := NodeStart(ctx, runtime, initNode, k3d.NodeStartOpts{
Wait: true, // always wait for the init node
NodeHooks: clusterStartOpts.NodeHooks,
ReadyLogMessage: types.GetReadyLogMessage(initNode, clusterStartOpts.Intent), // initNode means, that we're using etcd -> this will need quorum, so "k3s is up and running" won't happen right now
EnvironmentInfo: clusterStartOpts.EnvironmentInfo,
NodeHooks: startClusterOpts.NodeHooks,
ReadyLogMessage: "Running kube-apiserver", // initNode means, that we're using etcd -> this will need quorum, so "k3s is up and running" won't happen right now
}); err != nil {
return fmt.Errorf("Failed to start initializing server node: %+v", err)
}
@ -875,13 +879,13 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
/*
* Server Nodes
*/
l.Log().Infoln("Starting servers...")
log.Infoln("Starting servers...")
nodeStartOpts := k3d.NodeStartOpts{
Wait: true,
NodeHooks: startClusterOpts.NodeHooks,
}
for _, serverNode := range servers {
if err := NodeStart(ctx, runtime, serverNode, &k3d.NodeStartOpts{
Wait: true,
NodeHooks: append(clusterStartOpts.NodeHooks, serverNode.HookActions...),
EnvironmentInfo: clusterStartOpts.EnvironmentInfo,
}); err != nil {
if err := NodeStart(ctx, runtime, serverNode, nodeStartOpts); err != nil {
return fmt.Errorf("Failed to start server %s: %+v", serverNode.Name, err)
}
}
@ -892,15 +896,11 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
agentWG, aCtx := errgroup.WithContext(ctx)
l.Log().Infoln("Starting agents...")
log.Infoln("Starting agents...")
for _, agentNode := range agents {
currentAgentNode := agentNode
agentWG.Go(func() error {
return NodeStart(aCtx, runtime, currentAgentNode, &k3d.NodeStartOpts{
Wait: true,
NodeHooks: clusterStartOpts.NodeHooks,
EnvironmentInfo: clusterStartOpts.EnvironmentInfo,
})
return NodeStart(aCtx, runtime, currentAgentNode, nodeStartOpts)
})
}
if err := agentWG.Wait(); err != nil {
@ -912,14 +912,13 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
*/
helperWG, hCtx := errgroup.WithContext(ctx)
l.Log().Infoln("Starting helpers...")
log.Infoln("Starting helpers...")
for _, helperNode := range aux {
currentHelperNode := helperNode
helperWG.Go(func() error {
nodeStartOpts := &k3d.NodeStartOpts{
NodeHooks: currentHelperNode.HookActions,
EnvironmentInfo: clusterStartOpts.EnvironmentInfo,
nodeStartOpts := k3d.NodeStartOpts{
NodeHooks: currentHelperNode.HookActions,
}
if currentHelperNode.Role == k3d.LoadBalancerRole {
nodeStartOpts.Wait = true
@ -933,96 +932,17 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
return fmt.Errorf("Failed to add one or more helper nodes: %w", err)
}
/*
* Additional Cluster Preparation (post start)
*/
postStartErrgrp, postStartErrgrpCtx := errgroup.WithContext(ctx)
/*** DNS ***/
// add host.k3d.internal record to /etc/hosts in all nodes
postStartErrgrp.Go(func() error {
return prepInjectHostIP(postStartErrgrpCtx, runtime, cluster, &clusterStartOpts)
})
postStartErrgrp.Go(func() error {
hosts := fmt.Sprintf("%s %s\n", clusterStartOpts.EnvironmentInfo.HostGateway.String(), k3d.DefaultK3dInternalHostRecord)
net, err := runtime.GetNetwork(ctx, &cluster.Network)
if err != nil {
return fmt.Errorf("failed to get cluster network %s to inject host records into CoreDNS: %w", cluster.Network.Name, err)
}
for _, member := range net.Members {
hosts += fmt.Sprintf("%s %s\n", member.IP.String(), member.Name)
}
l.Log().Infof("Injecting records for host.k3d.internal and for %d network members into CoreDNS configmap...", len(net.Members))
act := actions.RewriteFileAction{
Runtime: runtime,
Path: "/var/lib/rancher/k3s/server/manifests/coredns.yaml",
Mode: 0744,
RewriteFunc: func(input []byte) ([]byte, error) {
split, err := util.SplitYAML(input)
if err != nil {
return nil, fmt.Errorf("error splitting yaml: %w", err)
}
var outputBuf bytes.Buffer
outputEncoder := yaml.NewEncoder(&outputBuf)
for _, d := range split {
var doc map[string]interface{}
if err := yaml.Unmarshal(d, &doc); err != nil {
return nil, err
}
if kind, ok := doc["kind"]; ok {
if strings.ToLower(kind.(string)) == "configmap" {
configmapData := doc["data"].(map[interface{}]interface{})
configmapData["NodeHosts"] = hosts
}
}
if err := outputEncoder.Encode(doc); err != nil {
return nil, err
}
}
outputEncoder.Close()
return outputBuf.Bytes(), nil
},
}
// get the first server in the list and run action on it once it's ready for it
for _, n := range cluster.Nodes {
if n.Role == k3d.ServerRole {
ts, err := time.Parse("2006-01-02T15:04:05.999999999Z", n.State.Started)
if err != nil {
return err
}
if err := NodeWaitForLogMessage(ctx, runtime, n, "Cluster dns configmap", ts.Truncate(time.Second)); err != nil {
return err
}
return act.Run(ctx, n)
}
}
return nil
})
if err := postStartErrgrp.Wait(); err != nil {
return fmt.Errorf("error during post-start cluster preparation: %w", err)
}
return nil
}
// ClusterStop stops a whole cluster (i.e. all nodes of the cluster)
func ClusterStop(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
l.Log().Infof("Stopping cluster '%s'", cluster.Name)
log.Infof("Stopping cluster '%s'", cluster.Name)
failed := 0
for _, node := range cluster.Nodes {
if err := runtime.StopNode(ctx, node); err != nil {
l.Log().Warningf("Failed to stop node '%s': Try to stop it manually", node.Name)
log.Warningf("Failed to stop node '%s': Try to stop it manually", node.Name)
failed++
continue
}
@ -1031,8 +951,6 @@ func ClusterStop(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluste
if failed > 0 {
return fmt.Errorf("Failed to stop %d nodes: Try to stop them manually", failed)
}
l.Log().Infof("Stopped cluster '%s'", cluster.Name)
return nil
}
@ -1044,85 +962,60 @@ func SortClusters(clusters []*k3d.Cluster) []*k3d.Cluster {
return clusters
}
// corednsAddHost adds a host entry to the CoreDNS configmap if it doesn't exist (a host entry is a single line of the form "IP HOST")
func corednsAddHost(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, ip string, name string) error {
retries := 3
if v, ok := os.LookupEnv(k3d.K3dEnvDebugCorednsRetries); ok && v != "" {
l.Log().Debugf("Running with %s=%s", k3d.K3dEnvDebugCorednsRetries, v)
if r, err := strconv.Atoi(v); err == nil {
retries = r
} else {
return fmt.Errorf("Invalid value set for env var %s (%s): %w", k3d.K3dEnvDebugCorednsRetries, v, err)
}
}
// select any server node
var node *k3d.Node
for _, n := range cluster.Nodes {
if n.Role == k3d.ServerRole {
node = n
}
}
hostsEntry := fmt.Sprintf("%s %s", ip, name)
patchCmd := `patch=$(kubectl get cm coredns -n kube-system --template='{{.data.NodeHosts}}' | sed -n -E -e '/[0-9\.]{4,12}\s` + name + `$/!p' -e '$a` + hostsEntry + `' | tr '\n' '^' | busybox xargs -0 printf '{"data": {"NodeHosts":"%s"}}'| sed -E 's%\^%\\n%g') && kubectl patch cm coredns -n kube-system -p="$patch"`
successInjectCoreDNSEntry := false
// try 3 (or K3D_DEBUG_COREDNS_RETRIES value) times, as e.g. on cluster startup it may take some time for the Configmap to be available and the server to be responsive
for i := 0; i < retries; i++ {
l.Log().Debugf("Running CoreDNS patch in node %s to add %s (try %d/%d)...", node.Name, hostsEntry, i, retries)
logreader, err := runtime.ExecInNodeGetLogs(ctx, node, []string{"sh", "-c", patchCmd})
if err == nil {
successInjectCoreDNSEntry = true
break
} else {
msg := fmt.Sprintf("(try %d/%d) error patching the CoreDNS ConfigMap to include entry '%s': %+v", i, retries, hostsEntry, err)
if logreader != nil {
readlogs, err := io.ReadAll(logreader)
if err != nil {
l.Log().Debugf("(try %d/%d) error reading the logs from failed CoreDNS patch exec process in node %s: %v", i, retries, node.Name, err)
} else {
msg += fmt.Sprintf("\nLogs: %s", string(readlogs))
}
} else {
l.Log().Debugf("(try %d/%d) error reading the logs from failed CoreDNS patch exec process in node %s: no logreader returned for exec process", i, retries, node.Name)
}
l.Log().Debugln(msg)
time.Sleep(1 * time.Second)
}
}
if !successInjectCoreDNSEntry {
return fmt.Errorf("failed to patch CoreDNS ConfigMap to include entry '%s' (%d tries, see debug logs)", hostsEntry, retries)
}
l.Log().Debugf("Successfully patched CoreDNS Configmap with record '%s'", hostsEntry)
return nil
}
// prepInjectHostIP adds /etc/hosts and CoreDNS entry for host.k3d.internal, referring to the host system
func prepInjectHostIP(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, clusterStartOpts *k3d.ClusterStartOpts) error {
if cluster.Network.Name == "host" {
l.Log().Tracef("Not injecting hostIP as clusternetwork is 'host'")
return nil
func prepInjectHostIP(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) {
log.Infoln("(Optional) Trying to get IP of the docker host and inject it into the cluster as 'host.k3d.internal' for easy access")
hostIP, err := GetHostIP(ctx, runtime, cluster)
if err != nil {
log.Warnf("Failed to get HostIP: %+v", err)
}
if hostIP != nil {
hostRecordSuccessMessage := ""
etcHostsFailureCount := 0
hostsEntry := fmt.Sprintf("%s %s", hostIP, k3d.DefaultK3dInternalHostRecord)
log.Debugf("Adding extra host entry '%s'...", hostsEntry)
for _, node := range cluster.Nodes {
if err := runtime.ExecInNode(ctx, node, []string{"sh", "-c", fmt.Sprintf("echo '%s' >> /etc/hosts", hostsEntry)}); err != nil {
log.Warnf("Failed to add extra entry '%s' to /etc/hosts in node '%s'", hostsEntry, node.Name)
etcHostsFailureCount++
}
}
if etcHostsFailureCount < len(cluster.Nodes) {
hostRecordSuccessMessage += fmt.Sprintf("Successfully added host record to /etc/hosts in %d/%d nodes", (len(cluster.Nodes) - etcHostsFailureCount), len(cluster.Nodes))
}
hostIP := clusterStartOpts.EnvironmentInfo.HostGateway
hostsEntry := fmt.Sprintf("%s %s", hostIP.String(), k3d.DefaultK3dInternalHostRecord)
l.Log().Infof("Injecting '%s' into /etc/hosts of all nodes...", hostsEntry)
patchCmd := `patch=$(kubectl get cm coredns -n kube-system --template='{{.data.NodeHosts}}' | sed -n -E -e '/[0-9\.]{4,12}\s+host\.k3d\.internal$/!p' -e '$a` + hostsEntry + `' | tr '\n' '^' | busybox xargs -0 printf '{"data": {"NodeHosts":"%s"}}'| sed -E 's%\^%\\n%g') && kubectl patch cm coredns -n kube-system -p="$patch"`
successInjectCoreDNSEntry := false
for _, node := range cluster.Nodes {
if node.Role == k3d.AgentRole || node.Role == k3d.ServerRole {
logreader, err := runtime.ExecInNodeGetLogs(ctx, node, []string{"sh", "-c", patchCmd})
if err == nil {
successInjectCoreDNSEntry = true
break
} else {
msg := fmt.Sprintf("error patching the CoreDNS ConfigMap to include entry '%s': %+v", hostsEntry, err)
readlogs, err := ioutil.ReadAll(logreader)
if err != nil {
log.Debugf("error reading the logs from failed CoreDNS patch exec process in node %s: %v", node.Name, err)
} else {
msg += fmt.Sprintf("\nLogs: %s", string(readlogs))
}
log.Debugln(msg)
}
}
}
if successInjectCoreDNSEntry == false {
log.Warnf("Failed to patch CoreDNS ConfigMap to include entry '%s' (see debug logs)", hostsEntry)
} else {
hostRecordSuccessMessage += " and to the CoreDNS ConfigMap"
}
if hostRecordSuccessMessage != "" {
log.Infoln(hostRecordSuccessMessage)
}
// entry in /etc/hosts
errgrp, errgrpctx := errgroup.WithContext(ctx)
for _, node := range cluster.Nodes {
n := node
errgrp.Go(func() error {
return runtime.ExecInNode(errgrpctx, n, []string{"sh", "-c", fmt.Sprintf("echo '%s' >> /etc/hosts", hostsEntry)})
})
}
if err := errgrp.Wait(); err != nil {
return fmt.Errorf("failed to add hosts entry %s: %w", hostsEntry, err)
}
l.Log().Debugf("Successfully added host record \"%s\" to /etc/hosts in all nodes", hostsEntry)
return nil
}
func prepCreateLocalRegistryHostingConfigMap(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
@ -1134,12 +1027,12 @@ func prepCreateLocalRegistryHostingConfigMap(ctx context.Context, runtime k3drt.
success = true
break
} else {
l.Log().Debugf("Failed to create LocalRegistryHosting ConfigMap in node %s: %+v", node.Name, err)
log.Debugf("Failed to create LocalRegistryHosting ConfigMap in node %s: %+v", node.Name, err)
}
}
}
if success == false {
l.Log().Warnf("Failed to create LocalRegistryHosting ConfigMap")
log.Warnf("Failed to create LocalRegistryHosting ConfigMap")
}
return nil
}
@ -1176,7 +1069,7 @@ func ClusterEditChangesetSimple(ctx context.Context, runtime k3drt.Runtime, clus
for _, portWithNodeFilters := range changeset.Ports {
filteredNodes, err := util.FilterNodesWithSuffix(nodeList, portWithNodeFilters.NodeFilters)
if err != nil {
return fmt.Errorf("failed to filter nodes: %w", err)
return err
}
for suffix := range filteredNodes {
@ -1196,12 +1089,12 @@ func ClusterEditChangesetSimple(ctx context.Context, runtime k3drt.Runtime, clus
}
}
l.Log().Debugf("ORIGINAL:\n> Ports: %+v\n> Config: %+v\nCHANGESET:\n> Ports: %+v\n> Config: %+v", existingLB.Node.Ports, existingLB.Config, lbChangeset.Node.Ports, lbChangeset.Config)
log.Debugf("ORIGINAL:\n> Ports: %+v\n> Config: %+v\nCHANGESET:\n> Ports: %+v\n> Config: %+v", existingLB.Node.Ports, existingLB.Config, lbChangeset.Node.Ports, lbChangeset.Config)
// prepare to write config to lb container
configyaml, err := yaml.Marshal(lbChangeset.Config)
if err != nil {
return fmt.Errorf("failed to marshal loadbalancer config changeset: %w", err)
return err
}
writeLbConfigAction := k3d.NodeHook{
Stage: k3d.LifecycleStagePreStart,

View File

@ -24,7 +24,7 @@ package client
import (
"fmt"
"github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v4/pkg/types"
)
// CheckName ensures that a cluster name is also a valid host name according to RFC 1123.

View File

@ -1,62 +0,0 @@
/*
Copyright © 2020-2021 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package client
import (
"context"
"fmt"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
k3d "github.com/rancher/k3d/v5/pkg/types"
)
func GatherEnvironmentInfo(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) (*k3d.EnvironmentInfo, error) {
envInfo := &k3d.EnvironmentInfo{}
rtimeInfo, err := runtime.Info()
if err != nil {
return nil, err
}
envInfo.RuntimeInfo = *rtimeInfo
l.Log().Infof("Using the k3d-tools node to gather environment information")
toolsNode, err := EnsureToolsNode(ctx, runtime, cluster)
if err != nil {
return nil, err
}
defer func() {
go NodeDelete(ctx, runtime, toolsNode, k3d.NodeDeleteOpts{SkipLBUpdate: true})
}()
hostIP, err := GetHostIP(ctx, runtime, cluster)
if err != nil {
return envInfo, fmt.Errorf("failed to get host IP: %w", err)
}
envInfo.HostGateway = hostIP
return envInfo, nil
}

View File

@ -25,28 +25,28 @@ import (
"os"
"strconv"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
"github.com/rancher/k3d/v5/pkg/types/fixes"
"github.com/rancher/k3d/v4/pkg/runtimes"
"github.com/rancher/k3d/v4/pkg/types/fixes"
log "github.com/sirupsen/logrus"
)
// FIXME: FixCgroupV2 - to be removed when fixed upstream
func EnableCgroupV2FixIfNeeded(runtime runtimes.Runtime) {
if _, isSet := os.LookupEnv(string(fixes.EnvFixCgroupV2)); !isSet {
if _, isSet := os.LookupEnv(fixes.EnvFixCgroupV2); !isSet {
runtimeInfo, err := runtime.Info()
if err != nil {
l.Log().Warnf("Failed to get runtime information: %+v", err)
log.Warnf("Failed to get runtime information: %+v", err)
return
}
cgroupVersion, err := strconv.Atoi(runtimeInfo.CgroupVersion)
if err != nil {
l.Log().Debugf("Failed to parse cgroupVersion: %+v", err)
log.Debugf("Failed to parse cgroupVersion: %+v", err)
return
}
if cgroupVersion == 2 {
l.Log().Debugf("Detected CgroupV2, enabling custom entrypoint (disable by setting %s=false)", fixes.EnvFixCgroupV2)
if err := os.Setenv(string(fixes.EnvFixCgroupV2), "true"); err != nil {
l.Log().Errorf("Detected CgroupsV2 but failed to enable k3d's hotfix (try `export %s=true`): %+v", fixes.EnvFixCgroupV2, err)
log.Debugf("Detected CgroupV2, enabling custom entrypoint (disable by setting %s=false)", fixes.EnvFixCgroupV2)
if err := os.Setenv(fixes.EnvFixCgroupV2, "true"); err != nil {
log.Errorf("Detected CgroupsV2 but failed to enable k3d's hotfix (try `export %s=true`): %+v", fixes.EnvFixCgroupV2, err)
}
}
}

View File

@ -27,74 +27,45 @@ import (
"fmt"
"net"
"regexp"
goruntime "runtime"
"strings"
"runtime"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v5/pkg/util"
rt "github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/rancher/k3d/v4/pkg/util"
log "github.com/sirupsen/logrus"
)
type ResolveHostCmd struct {
Cmd string
LogMatcher *regexp.Regexp
}
var (
ResolveHostCmdNSLookup = ResolveHostCmd{
Cmd: "nslookup %s",
LogMatcher: regexp.MustCompile(`^Address:\s+(?P<ip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})$`),
}
ResolveHostCmdGetEnt = ResolveHostCmd{
Cmd: "getent ahostsv4 '%s'",
LogMatcher: regexp.MustCompile(`(?P<ip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s+STREAM.+`), // e.g. `192.168.47.4 STREAM host.docker.internal`,
}
)
var nsLookupAddressRegexp = regexp.MustCompile(`^Address:\s+(?P<ip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})$`)
// GetHostIP returns the routable IP address to be able to access services running on the host system from inside the cluster.
// This depends on the Operating System and the chosen Runtime.
func GetHostIP(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) (net.IP, error) {
rtimeInfo, err := runtime.Info()
if err != nil {
return nil, err
}
l.Log().Tracef("GOOS: %s / Runtime OS: %s (%s)", goruntime.GOOS, rtimeInfo.OSType, rtimeInfo.OS)
isDockerDesktop := func(os string) bool {
return strings.ToLower(os) == "docker desktop"
}
func GetHostIP(ctx context.Context, rtime rt.Runtime, cluster *k3d.Cluster) (net.IP, error) {
// Docker Runtime
if runtime == runtimes.Docker {
if rtime == rt.Docker {
log.Tracef("Runtime GOOS: %s", runtime.GOOS)
// "native" Docker on Linux
if runtime.GOOS == "linux" {
ip, err := rtime.GetHostIP(ctx, cluster.Network.Name)
if err != nil {
return nil, err
}
return ip, nil
}
// Docker (for Desktop) on MacOS or Windows
if isDockerDesktop(rtimeInfo.OS) {
toolsNode, err := EnsureToolsNode(ctx, runtime, cluster)
if runtime.GOOS == "windows" || runtime.GOOS == "darwin" {
ip, err := resolveHostnameFromInside(ctx, rtime, cluster.Nodes[0], "host.docker.internal")
if err != nil {
return nil, fmt.Errorf("failed to ensure that k3d-tools node is running to get host IP :%w", err)
return nil, err
}
ip, err := resolveHostnameFromInside(ctx, runtime, toolsNode, "host.docker.internal", ResolveHostCmdGetEnt)
if err == nil {
return ip, nil
}
l.Log().Warnf("failed to resolve 'host.docker.internal' from inside the k3d-tools node: %v", err)
return ip, nil
}
l.Log().Infof("HostIP: using network gateway...")
ip, err := runtime.GetHostIP(ctx, cluster.Network.Name)
if err != nil {
return nil, fmt.Errorf("runtime failed to get host IP: %w", err)
}
return ip, nil
// Catch all other GOOS cases
return nil, fmt.Errorf("GetHostIP only implemented for Linux, MacOS (Darwin) and Windows")
}
@ -103,9 +74,9 @@ func GetHostIP(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Clust
}
func resolveHostnameFromInside(ctx context.Context, rtime runtimes.Runtime, node *k3d.Node, hostname string, cmd ResolveHostCmd) (net.IP, error) {
func resolveHostnameFromInside(ctx context.Context, rtime rt.Runtime, node *k3d.Node, hostname string) (net.IP, error) {
logreader, execErr := rtime.ExecInNodeGetLogs(ctx, node, []string{"sh", "-c", fmt.Sprintf(cmd.Cmd, hostname)})
logreader, execErr := rtime.ExecInNodeGetLogs(ctx, node, []string{"sh", "-c", fmt.Sprintf("nslookup %s", hostname)})
if logreader == nil {
if execErr != nil {
@ -123,28 +94,28 @@ func resolveHostnameFromInside(ctx context.Context, rtime runtimes.Runtime, node
return nil, fmt.Errorf("Failed to scan logs for host IP: Could not create scanner from logreader")
}
if scanner != nil && execErr != nil {
l.Log().Debugln("Exec Process Failed, but we still got logs, so we're at least trying to get the IP from there...")
l.Log().Tracef("-> Exec Process Error was: %+v", execErr)
log.Debugln("Exec Process Failed, but we still got logs, so we're at least trying to get the IP from there...")
log.Tracef("-> Exec Process Error was: %+v", execErr)
}
for scanner.Scan() {
l.Log().Tracef("Scanning Log Line '%s'", scanner.Text())
match := cmd.LogMatcher.FindStringSubmatch(scanner.Text())
log.Tracef("Scanning Log Line '%s'", scanner.Text())
match := nsLookupAddressRegexp.FindStringSubmatch(scanner.Text())
if len(match) == 0 {
continue
}
l.Log().Tracef("-> Match(es): '%+v'", match)
submatches = util.MapSubexpNames(cmd.LogMatcher.SubexpNames(), match)
l.Log().Tracef(" -> Submatch(es): %+v", submatches)
log.Tracef("-> Match(es): '%+v'", match)
submatches = util.MapSubexpNames(nsLookupAddressRegexp.SubexpNames(), match)
log.Tracef(" -> Submatch(es): %+v", submatches)
break
}
if _, ok := submatches["ip"]; !ok {
if execErr != nil {
l.Log().Errorln(execErr)
log.Errorln(execErr)
}
return nil, fmt.Errorf("Failed to read address for '%s' from command output", hostname)
return nil, fmt.Errorf("Failed to read address for '%s' from nslookup response", hostname)
}
l.Log().Debugf("Hostname '%s' -> Address '%s'", hostname, submatches["ip"])
log.Debugf("Hostname '%s' -> Address '%s'", hostname, submatches["ip"])
return net.ParseIP(submatches["ip"]), nil

View File

@ -23,20 +23,18 @@ package client
import (
"context"
"fmt"
l "github.com/rancher/k3d/v5/pkg/logger"
k3drt "github.com/rancher/k3d/v5/pkg/runtimes"
k3d "github.com/rancher/k3d/v5/pkg/types"
k3drt "github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
"inet.af/netaddr"
)
// GetIP checks a given network for a free IP and returns it, if possible
func GetIP(ctx context.Context, runtime k3drt.Runtime, network *k3d.ClusterNetwork) (netaddr.IP, error) {
network, err := runtime.GetNetwork(ctx, network)
if err != nil {
return netaddr.IP{}, fmt.Errorf("runtime failed to get network '%s': %w", network.Name, err)
return netaddr.IP{}, err
}
var ipsetbuilder netaddr.IPSetBuilder
@ -48,17 +46,14 @@ func GetIP(ctx context.Context, runtime k3drt.Runtime, network *k3d.ClusterNetwo
}
// exclude first and last address
ipsetbuilder.Remove(network.IPAM.IPPrefix.Range().From())
ipsetbuilder.Remove(network.IPAM.IPPrefix.Range().To())
ipsetbuilder.Remove(network.IPAM.IPPrefix.Range().From)
ipsetbuilder.Remove(network.IPAM.IPPrefix.Range().To)
ipset, err := ipsetbuilder.IPSet()
if err != nil {
return netaddr.IP{}, err
}
ipset := ipsetbuilder.IPSet()
ip := ipset.Ranges()[0].From()
ip := ipset.Ranges()[0].From
l.Log().Debugf("Found free IP %s in network %s", ip.String(), network.Name)
log.Debugf("Found free IP %s in network %s", ip.String(), network.Name)
return ip, nil
}

View File

@ -25,14 +25,14 @@ import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"time"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
@ -53,14 +53,14 @@ func KubeconfigGetWrite(ctx context.Context, runtime runtimes.Runtime, cluster *
// get kubeconfig from cluster node
kubeconfig, err := KubeconfigGet(ctx, runtime, cluster)
if err != nil {
return output, fmt.Errorf("failed to get kubeconfig for cluster '%s': %w", cluster.Name, err)
return output, err
}
// empty output parameter = write to default
if output == "" {
output, err = KubeconfigGetDefaultPath()
if err != nil {
return output, fmt.Errorf("failed to get default kubeconfig path: %w", err)
return output, err
}
}
@ -78,17 +78,19 @@ func KubeconfigGetWrite(ctx context.Context, runtime runtimes.Runtime, cluster *
// the output file does not exist: try to create it and try again
if os.IsNotExist(err) && firstRun {
l.Log().Debugf("Output path '%s' doesn't exist, trying to create it...", output)
log.Debugf("Output path '%s' doesn't exist, trying to create it...", output)
// create directory path
if err := os.MkdirAll(filepath.Dir(output), 0755); err != nil {
return output, fmt.Errorf("failed to create output directory '%s': %w", filepath.Dir(output), err)
log.Errorf("Failed to create output directory '%s'", filepath.Dir(output))
return output, err
}
// try create output file
f, err := os.Create(output)
if err != nil {
return output, fmt.Errorf("failed to create output file '%s': %w", output, err)
log.Errorf("Failed to create output file '%s'", output)
return output, err
}
f.Close()
@ -96,7 +98,8 @@ func KubeconfigGetWrite(ctx context.Context, runtime runtimes.Runtime, cluster *
firstRun = false
continue
}
return output, fmt.Errorf("failed to open output file '%s' or it's not a kubeconfig: %w", output, err)
log.Errorf("Failed to open output file '%s' or it's not a KubeConfig", output)
return output, err
}
break
}
@ -114,10 +117,11 @@ func KubeconfigGet(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.C
// TODO: getKubeconfig: we should make sure, that the server node we're trying to fetch from is actually running
serverNodes, err := runtime.GetNodesByLabel(ctx, map[string]string{k3d.LabelClusterName: cluster.Name, k3d.LabelRole: string(k3d.ServerRole)})
if err != nil {
return nil, fmt.Errorf("runtime failed to get server nodes for cluster '%s': %w", cluster.Name, err)
log.Errorln("Failed to get server nodes")
return nil, err
}
if len(serverNodes) == 0 {
return nil, fmt.Errorf("didn't find any server node for cluster '%s'", cluster.Name)
return nil, fmt.Errorf("Didn't find any server node")
}
// prefer a server node, which actually has the port exposed
@ -143,13 +147,15 @@ func KubeconfigGet(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.C
// get the kubeconfig from the first server node
reader, err := runtime.GetKubeconfig(ctx, chosenServer)
if err != nil {
return nil, fmt.Errorf("runtime failed to pull kubeconfig from node '%s': %w", chosenServer.Name, err)
log.Errorf("Failed to get kubeconfig from node '%s'", chosenServer.Name)
return nil, err
}
defer reader.Close()
readBytes, err := io.ReadAll(reader)
readBytes, err := ioutil.ReadAll(reader)
if err != nil {
return nil, fmt.Errorf("failed to read kubeconfig file: %w", err)
log.Errorln("Couldn't read kubeconfig file")
return nil, err
}
// drop the first 512 bytes which contain file metadata/control characters
@ -161,7 +167,8 @@ func KubeconfigGet(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.C
*/
kc, err := clientcmd.Load(trimBytes)
if err != nil {
return nil, fmt.Errorf("failed to parse kubeconfig: %w", err)
log.Errorln("Failed to parse the KubeConfig")
return nil, err
}
// update the server URL
@ -189,7 +196,7 @@ func KubeconfigGet(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.C
// set current-context to new context name
kc.CurrentContext = newContextName
l.Log().Tracef("Modified Kubeconfig: %+v", kc)
log.Tracef("Modified Kubeconfig: %+v", kc)
return kc, nil
}
@ -205,22 +212,25 @@ func KubeconfigWriteToPath(ctx context.Context, kubeconfig *clientcmdapi.Config,
} else {
output, err = os.Create(path)
if err != nil {
return fmt.Errorf("failed to create file '%s': %w", path, err)
log.Errorf("Failed to create file '%s'", path)
return err
}
defer output.Close()
}
kubeconfigBytes, err := clientcmd.Write(*kubeconfig)
if err != nil {
return fmt.Errorf("failed to write kubeconfig: %w", err)
log.Errorln("Failed to write KubeConfig")
return err
}
_, err = output.Write(kubeconfigBytes)
if err != nil {
return fmt.Errorf("failed to write file '%s': %w", output.Name(), err)
log.Errorf("Failed to write to file '%s'", output.Name())
return err
}
l.Log().Debugf("Wrote kubeconfig to '%s'", output.Name())
log.Debugf("Wrote kubeconfig to '%s'", output.Name())
return nil
@ -229,7 +239,7 @@ func KubeconfigWriteToPath(ctx context.Context, kubeconfig *clientcmdapi.Config,
// KubeconfigMerge merges a new kubeconfig into an existing kubeconfig and returns the result
func KubeconfigMerge(ctx context.Context, newKubeConfig *clientcmdapi.Config, existingKubeConfig *clientcmdapi.Config, outPath string, overwriteConflicting bool, updateCurrentContext bool) error {
l.Log().Tracef("Merging new Kubeconfig:\n%+v\n>>> into existing Kubeconfig:\n%+v", newKubeConfig, existingKubeConfig)
log.Tracef("Merging new Kubeconfig:\n%+v\n>>> into existing Kubeconfig:\n%+v", newKubeConfig, existingKubeConfig)
// Overwrite values in existing kubeconfig
for k, v := range newKubeConfig.Clusters {
@ -264,7 +274,7 @@ func KubeconfigMerge(ctx context.Context, newKubeConfig *clientcmdapi.Config, ex
updateCurrentContext = true
}
if updateCurrentContext {
l.Log().Debugf("Setting new current-context '%s'", newKubeConfig.CurrentContext)
log.Debugf("Setting new current-context '%s'", newKubeConfig.CurrentContext)
existingKubeConfig.CurrentContext = newKubeConfig.CurrentContext
}
@ -275,15 +285,17 @@ func KubeconfigMerge(ctx context.Context, newKubeConfig *clientcmdapi.Config, ex
func KubeconfigWrite(ctx context.Context, kubeconfig *clientcmdapi.Config, path string) error {
tempPath := fmt.Sprintf("%s.k3d_%s", path, time.Now().Format("20060102_150405.000000"))
if err := clientcmd.WriteToFile(*kubeconfig, tempPath); err != nil {
return fmt.Errorf("failed to write merged kubeconfig to temporary file '%s': %w", tempPath, err)
log.Errorf("Failed to write merged kubeconfig to temporary file '%s'", tempPath)
return err
}
// Move temporary file over existing KubeConfig
if err := os.Rename(tempPath, path); err != nil {
return fmt.Errorf("failed to overwrite existing KubeConfig '%s' with new kubeconfig '%s': %w", path, tempPath, err)
log.Errorf("Failed to overwrite existing KubeConfig '%s' with new KubeConfig '%s'", path, tempPath)
return err
}
l.Log().Debugf("Wrote kubeconfig to '%s'", path)
log.Debugf("Wrote kubeconfig to '%s'", path)
return nil
}
@ -292,9 +304,9 @@ func KubeconfigWrite(ctx context.Context, kubeconfig *clientcmdapi.Config, path
func KubeconfigGetDefaultFile() (*clientcmdapi.Config, error) {
path, err := KubeconfigGetDefaultPath()
if err != nil {
return nil, fmt.Errorf("failed to get default kubeconfig path: %w", err)
return nil, err
}
l.Log().Debugf("Using default kubeconfig '%s'", path)
log.Debugf("Using default kubeconfig '%s'", path)
return clientcmd.LoadFromFile(path)
}
@ -302,7 +314,7 @@ func KubeconfigGetDefaultFile() (*clientcmdapi.Config, error) {
func KubeconfigGetDefaultPath() (string, error) {
defaultKubeConfigLoadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
if len(defaultKubeConfigLoadingRules.GetLoadingPrecedence()) > 1 {
return "", fmt.Errorf("multiple kubeconfigs specified via KUBECONFIG env var: Please reduce to one entry, unset KUBECONFIG or explicitly choose an output")
return "", fmt.Errorf("Multiple kubeconfigs specified via KUBECONFIG env var: Please reduce to one entry, unset KUBECONFIG or explicitly choose an output")
}
return defaultKubeConfigLoadingRules.GetDefaultFilename(), nil
}
@ -311,11 +323,11 @@ func KubeconfigGetDefaultPath() (string, error) {
func KubeconfigRemoveClusterFromDefaultConfig(ctx context.Context, cluster *k3d.Cluster) error {
defaultKubeConfigPath, err := KubeconfigGetDefaultPath()
if err != nil {
return fmt.Errorf("failed to get default kubeconfig path: %w", err)
return err
}
kubeconfig, err := KubeconfigGetDefaultFile()
if err != nil {
return fmt.Errorf("failed to get default kubeconfig file: %w", err)
return err
}
kubeconfig = KubeconfigRemoveCluster(ctx, cluster, kubeconfig)
return KubeconfigWrite(ctx, kubeconfig, defaultKubeConfigPath)

View File

@ -26,18 +26,15 @@ import (
"context"
"errors"
"fmt"
"io"
"strings"
"io/ioutil"
"time"
"github.com/docker/go-connections/nat"
"github.com/go-test/deep"
"github.com/imdario/mergo"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
"github.com/rancher/k3d/v5/pkg/types"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/spf13/viper"
"github.com/rancher/k3d/v4/pkg/runtimes"
"github.com/rancher/k3d/v4/pkg/types"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
)
@ -54,7 +51,8 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu
// update cluster details to ensure that we have the latest node list
cluster, err = ClusterGet(ctx, runtime, cluster)
if err != nil {
return fmt.Errorf("failed to update details for cluster '%s': %w", cluster.Name, err)
log.Errorf("Failed to update details for cluster '%s'", cluster.Name)
return err
}
currentConfig, err := GetLoadbalancerConfig(ctx, runtime, cluster)
@ -62,23 +60,23 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu
return fmt.Errorf("error getting current config from loadbalancer: %w", err)
}
l.Log().Tracef("Current loadbalancer config:\n%+v", currentConfig)
log.Tracef("Current loadbalancer config:\n%+v", currentConfig)
newLBConfig, err := LoadbalancerGenerateConfig(cluster)
if err != nil {
return fmt.Errorf("error generating new loadbalancer config: %w", err)
}
l.Log().Tracef("New loadbalancer config:\n%+v", currentConfig)
log.Tracef("New loadbalancer config:\n%+v", currentConfig)
if diff := deep.Equal(currentConfig, newLBConfig); diff != nil {
l.Log().Debugf("Updating the loadbalancer with this diff: %+v", diff)
log.Debugf("Updating the loadbalancer with this diff: %+v", diff)
}
newLbConfigYaml, err := yaml.Marshal(&newLBConfig)
if err != nil {
return fmt.Errorf("error marshalling the new loadbalancer config: %w", err)
}
l.Log().Debugf("Writing lb config:\n%s", string(newLbConfigYaml))
log.Debugf("Writing lb config:\n%s", string(newLbConfigYaml))
startTime := time.Now().Truncate(time.Second).UTC()
if err := runtime.WriteToNode(ctx, newLbConfigYaml, k3d.DefaultLoadbalancerConfigPath, 0744, cluster.ServerLoadBalancer.Node); err != nil {
return fmt.Errorf("error writing new loadbalancer config to container: %w", err)
@ -86,25 +84,25 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu
successCtx, successCtxCancel := context.WithDeadline(ctx, time.Now().Add(5*time.Second))
defer successCtxCancel()
err = NodeWaitForLogMessage(successCtx, runtime, cluster.ServerLoadBalancer.Node, k3d.GetReadyLogMessage(cluster.ServerLoadBalancer.Node, k3d.IntentAny), startTime)
err = NodeWaitForLogMessage(successCtx, runtime, cluster.ServerLoadBalancer.Node, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], startTime)
if err != nil {
if errors.Is(err, context.DeadlineExceeded) {
failureCtx, failureCtxCancel := context.WithDeadline(ctx, time.Now().Add(5*time.Second))
defer failureCtxCancel()
err = NodeWaitForLogMessage(failureCtx, runtime, cluster.ServerLoadBalancer.Node, "host not found in upstream", startTime)
if err != nil {
l.Log().Warnf("Failed to check if the loadbalancer was configured correctly or if it broke. Please check it manually or try again: %v", err)
log.Warnf("Failed to check if the loadbalancer was configured correctly or if it broke. Please check it manually or try again: %v", err)
return ErrLBConfigFailedTest
} else {
l.Log().Warnln("Failed to configure loadbalancer because one of the nodes seems to be down! Run `k3d node list` to see which one it could be.")
log.Warnln("Failed to configure loadbalancer because one of the nodes seems to be down! Run `k3d node list` to see which one it could be.")
return ErrLBConfigHostNotFound
}
} else {
l.Log().Warnf("Failed to ensure that loadbalancer was configured correctly. Please check it manually or try again: %v", err)
log.Warnf("Failed to ensure that loadbalancer was configured correctly. Please check it manually or try again: %v", err)
return ErrLBConfigFailedTest
}
}
l.Log().Infof("Successfully configured loadbalancer %s!", cluster.ServerLoadBalancer.Node.Name)
log.Infof("Successfully configured loadbalancer %s!", cluster.ServerLoadBalancer.Node.Name)
time.Sleep(1 * time.Second) // waiting for a second, to avoid issues with too fast lb updates which would screw up the log waits
@ -122,7 +120,7 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste
var err error
cluster.ServerLoadBalancer.Node, err = NodeGet(ctx, runtime, node)
if err != nil {
return cfg, fmt.Errorf("failed to get loadbalancer node '%s': %w", node.Name, err)
return cfg, err
}
}
}
@ -130,13 +128,13 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste
reader, err := runtime.ReadFromNode(ctx, types.DefaultLoadbalancerConfigPath, cluster.ServerLoadBalancer.Node)
if err != nil {
return cfg, fmt.Errorf("runtime failed to read loadbalancer config '%s' from node '%s': %w", types.DefaultLoadbalancerConfigPath, cluster.ServerLoadBalancer.Node.Name, err)
return cfg, err
}
defer reader.Close()
file, err := io.ReadAll(reader)
file, err := ioutil.ReadAll(reader)
if err != nil {
return cfg, fmt.Errorf("failed to read loadbalancer config file: %w", err)
return cfg, err
}
file = bytes.Trim(file[512:], "\x00") // trim control characters, etc.
@ -172,7 +170,7 @@ func LoadbalancerGenerateConfig(cluster *k3d.Cluster) (k3d.LoadbalancerConfig, e
}
// some additional nginx settings
lbConfig.Settings.WorkerConnections = k3d.DefaultLoadbalancerWorkerConnections + len(cluster.ServerLoadBalancer.Node.Ports)*len(servers)
lbConfig.Settings.WorkerProcesses = k3d.DefaultLoadbalancerWorkerProcesses + len(cluster.ServerLoadBalancer.Node.Ports)*len(servers)
return lbConfig, nil
}
@ -195,22 +193,6 @@ func LoadbalancerPrepare(ctx context.Context, runtime runtimes.Runtime, cluster
}
}
if opts != nil && opts.ConfigOverrides != nil && len(opts.ConfigOverrides) > 0 {
tmpViper := viper.New()
for _, override := range opts.ConfigOverrides {
kv := strings.SplitN(override, "=", 2)
l.Log().Tracef("Overriding LB config with %s...", kv)
tmpViper.Set(kv[0], kv[1])
}
lbConfigOverride := &k3d.LoadbalancerConfig{}
if err := tmpViper.Unmarshal(lbConfigOverride); err != nil {
return nil, fmt.Errorf("failed to unmarshal loadbalancer config override into loadbalancer config: %w", err)
}
if err := mergo.MergeWithOverwrite(cluster.ServerLoadBalancer.Config, lbConfigOverride); err != nil {
return nil, fmt.Errorf("failed to override loadbalancer config: %w", err)
}
}
// Create LB as a modified node with loadbalancerRole
lbNode := &k3d.Node{
Name: fmt.Sprintf("%s-%s-serverlb", k3d.DefaultObjectNamePrefix, cluster.Name),
@ -231,7 +213,7 @@ func loadbalancerAddPortConfigs(loadbalancer *k3d.Loadbalancer, portmapping nat.
nodenames := []string{}
for _, node := range targetNodes {
if node.Role == k3d.LoadBalancerRole {
return fmt.Errorf("cannot add port config referencing the loadbalancer itself (loop)")
return fmt.Errorf("error adding port config to loadbalancer: cannot add port config referencing the loadbalancer itself (loop)")
}
nodenames = append(nodenames, node.Name)
}

View File

@ -23,15 +23,13 @@ THE SOFTWARE.
package client
import (
"bufio"
"bytes"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"reflect"
"strconv"
"strings"
"time"
@ -41,17 +39,14 @@ import (
"github.com/docker/go-connections/nat"
dockerunits "github.com/docker/go-units"
"github.com/imdario/mergo"
"github.com/rancher/k3d/v5/pkg/actions"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
"github.com/rancher/k3d/v5/pkg/runtimes/docker"
runtimeTypes "github.com/rancher/k3d/v5/pkg/runtimes/types"
runtimeErrors "github.com/rancher/k3d/v5/pkg/runtimes/errors"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v5/pkg/types/fixes"
"github.com/rancher/k3d/v5/pkg/types/k3s"
"github.com/rancher/k3d/v5/pkg/util"
"github.com/rancher/k3d/v4/pkg/actions"
"github.com/rancher/k3d/v4/pkg/runtimes"
"github.com/rancher/k3d/v4/pkg/runtimes/docker"
runtimeErrors "github.com/rancher/k3d/v4/pkg/runtimes/errors"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/rancher/k3d/v4/pkg/types/fixes"
"github.com/rancher/k3d/v4/pkg/util"
log "github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
@ -60,15 +55,12 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
targetClusterName := cluster.Name
cluster, err := ClusterGet(ctx, runtime, cluster)
if err != nil {
return fmt.Errorf("Failed to find specified cluster '%s': %w", targetClusterName, err)
log.Errorf("Failed to find specified cluster '%s'", targetClusterName)
return err
}
// networks: ensure that cluster network is on index 0
networks := []string{cluster.Network.Name}
if node.Networks != nil {
networks = append(networks, node.Networks...)
}
node.Networks = networks
// network
node.Networks = []string{cluster.Network.Name}
// skeleton
if node.RuntimeLabels == nil {
@ -88,7 +80,7 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
}
// if we didn't find a node with the same role in the cluster, just choose any other node
if srcNode == nil {
l.Log().Debugf("Didn't find node with role '%s' in cluster '%s'. Choosing any other node (and using defaults)...", node.Role, cluster.Name)
log.Debugf("Didn't find node with role '%s' in cluster '%s'. Choosing any other node (and using defaults)...", node.Role, cluster.Name)
node.Cmd = k3d.DefaultRoleCmds[node.Role]
for _, existingNode := range cluster.Nodes {
if existingNode.Role != k3d.LoadBalancerRole { // any role except for the LoadBalancer role
@ -113,7 +105,7 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
for _, forbiddenMount := range util.DoNotCopyVolumeSuffices {
for i, mount := range node.Volumes {
if strings.Contains(mount, forbiddenMount) {
l.Log().Tracef("Dropping copied volume mount %s to avoid issues...", mount)
log.Tracef("Dropping copied volume mount %s to avoid issues...", mount)
node.Volumes = util.RemoveElementFromStringSlice(node.Volumes, i)
}
}
@ -128,37 +120,37 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
for i, cmd := range srcNode.Cmd {
// cut out the '--cluster-init' flag as this should only be done by the initializing server node
if cmd == forbiddenCmd {
l.Log().Tracef("Dropping '%s' from source node's cmd", forbiddenCmd)
log.Tracef("Dropping '%s' from source node's cmd", forbiddenCmd)
srcNode.Cmd = append(srcNode.Cmd[:i], srcNode.Cmd[i+1:]...)
}
}
for i, arg := range node.Args {
// cut out the '--cluster-init' flag as this should only be done by the initializing server node
if arg == forbiddenCmd {
l.Log().Tracef("Dropping '%s' from source node's args", forbiddenCmd)
log.Tracef("Dropping '%s' from source node's args", forbiddenCmd)
srcNode.Args = append(srcNode.Args[:i], srcNode.Args[i+1:]...)
}
}
}
}
l.Log().Debugf("Adding node %s to cluster %s based on existing (sanitized) node %s", node.Name, cluster.Name, srcNode.Name)
l.Log().Tracef("Sanitized Source Node: %+v\nNew Node: %+v", srcNode, node)
log.Debugf("Adding node %s to cluster %s based on existing (sanitized) node %s", node.Name, cluster.Name, srcNode.Name)
log.Tracef("Sanitized Source Node: %+v\nNew Node: %+v", srcNode, node)
// fetch registry config
registryConfigBytes := []byte{}
registryConfigReader, err := runtime.ReadFromNode(ctx, k3d.DefaultRegistriesFilePath, srcNode)
if err != nil {
if !errors.Is(err, runtimeErrors.ErrRuntimeFileNotFound) {
l.Log().Warnf("Failed to read registry config from node %s: %+v", node.Name, err)
log.Warnf("Failed to read registry config from node %s: %+v", node.Name, err)
}
} else {
defer registryConfigReader.Close()
var err error
registryConfigBytes, err = io.ReadAll(registryConfigReader)
registryConfigBytes, err = ioutil.ReadAll(registryConfigReader)
if err != nil {
l.Log().Warnf("Failed to read registry config from node %s: %+v", node.Name, err)
log.Warnf("Failed to read registry config from node %s: %+v", node.Name, err)
}
registryConfigReader.Close()
registryConfigBytes = bytes.Trim(registryConfigBytes[512:], "\x00") // trim control characters, etc.
@ -166,35 +158,28 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
// merge node config of new node into existing node config
if err := mergo.MergeWithOverwrite(srcNode, *node); err != nil {
return fmt.Errorf("failed to merge new node config into existing node config: %w", err)
log.Errorln("Failed to merge new node config into existing node config")
return err
}
node = srcNode
l.Log().Tracef("Resulting node %+v", node)
log.Debugf("Resulting node %+v", node)
k3sURLEnvFound := false
k3sTokenEnvFoundIndex := -1
for index, envVar := range node.Env {
if strings.HasPrefix(envVar, k3s.EnvClusterConnectURL) {
k3sURLEnvFound = true
}
if strings.HasPrefix(envVar, k3s.EnvClusterToken) {
k3sTokenEnvFoundIndex = index
k3sURLFound := false
for _, envVar := range node.Env {
if strings.HasPrefix(envVar, "K3S_URL") {
k3sURLFound = true
break
}
}
if !k3sURLEnvFound {
if !k3sURLFound {
if url, ok := node.RuntimeLabels[k3d.LabelClusterURL]; ok {
node.Env = append(node.Env, fmt.Sprintf("%s=%s", k3s.EnvClusterConnectURL, url))
node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", url))
} else {
l.Log().Warnln("Failed to find K3S_URL value!")
log.Warnln("Failed to find K3S_URL value!")
}
}
if k3sTokenEnvFoundIndex != -1 && createNodeOpts.ClusterToken != "" {
l.Log().Debugln("Overriding copied cluster token with value from nodeCreateOpts...")
node.Env[k3sTokenEnvFoundIndex] = fmt.Sprintf("%s=%s", k3s.EnvClusterToken, createNodeOpts.ClusterToken)
node.RuntimeLabels[k3d.LabelClusterToken] = createNodeOpts.ClusterToken
}
// add node actions
if len(registryConfigBytes) != 0 {
@ -217,12 +202,12 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
node.State.Status = ""
if err := NodeRun(ctx, runtime, node, createNodeOpts); err != nil {
return fmt.Errorf("failed to run node '%s': %w", node.Name, err)
return err
}
// if it's a server node, then update the loadbalancer configuration
if node.Role == k3d.ServerRole {
l.Log().Infoln("Updating loadbalancer config to include new server node(s)")
log.Infoln("Updating loadbalancer config to include new server node(s)")
if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil {
if !errors.Is(err, ErrLBConfigHostNotFound) {
return fmt.Errorf("error updating loadbalancer: %w", err)
@ -233,33 +218,6 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
return nil
}
func NodeAddToClusterRemote(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, clusterRef string, createNodeOpts k3d.NodeCreateOpts) error {
// runtime labels
if node.RuntimeLabels == nil {
node.RuntimeLabels = map[string]string{}
}
node.FillRuntimeLabels()
node.RuntimeLabels[k3d.LabelClusterName] = clusterRef
node.RuntimeLabels[k3d.LabelClusterURL] = clusterRef
node.RuntimeLabels[k3d.LabelClusterExternal] = "true"
node.RuntimeLabels[k3d.LabelClusterToken] = createNodeOpts.ClusterToken
if node.Env == nil {
node.Env = []string{}
}
node.Env = append(node.Env, fmt.Sprintf("%s=%s", k3s.EnvClusterConnectURL, clusterRef))
node.Env = append(node.Env, fmt.Sprintf("%s=%s", k3s.EnvClusterToken, createNodeOpts.ClusterToken))
if err := NodeRun(ctx, runtime, node, createNodeOpts); err != nil {
return fmt.Errorf("failed to run node '%s': %w", node.Name, err)
}
return nil
}
// NodeAddToClusterMulti adds multiple nodes to a chosen cluster
func NodeAddToClusterMulti(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d.Node, cluster *k3d.Cluster, createNodeOpts k3d.NodeCreateOpts) error {
if createNodeOpts.Timeout > 0*time.Second {
@ -276,28 +234,7 @@ func NodeAddToClusterMulti(ctx context.Context, runtime runtimes.Runtime, nodes
})
}
if err := nodeWaitGroup.Wait(); err != nil {
return fmt.Errorf("failed to add one or more nodes: %w", err)
}
return nil
}
func NodeAddToClusterMultiRemote(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d.Node, clusterRef string, createNodeOpts k3d.NodeCreateOpts) error {
if createNodeOpts.Timeout > 0*time.Second {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, createNodeOpts.Timeout)
defer cancel()
}
nodeWaitGroup, ctx := errgroup.WithContext(ctx)
for _, node := range nodes {
currentNode := node
nodeWaitGroup.Go(func() error {
return NodeAddToClusterRemote(ctx, runtime, currentNode, clusterRef, createNodeOpts)
})
}
if err := nodeWaitGroup.Wait(); err != nil {
return fmt.Errorf("failed to add one or more nodes: %w", err)
return fmt.Errorf("Failed to add one or more nodes: %w", err)
}
return nil
@ -314,24 +251,26 @@ func NodeCreateMulti(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d
nodeWaitGroup, ctx := errgroup.WithContext(ctx)
for _, node := range nodes {
if err := NodeCreate(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
l.Log().Error(err)
log.Error(err)
}
if createNodeOpts.Wait {
currentNode := node
nodeWaitGroup.Go(func() error {
l.Log().Debugf("Starting to wait for node '%s'", currentNode.Name)
readyLogMessage := k3d.GetReadyLogMessage(currentNode, k3d.IntentNodeCreate)
log.Debugf("Starting to wait for node '%s'", currentNode.Name)
readyLogMessage := k3d.ReadyLogMessageByRole[currentNode.Role]
if readyLogMessage != "" {
return NodeWaitForLogMessage(ctx, runtime, currentNode, readyLogMessage, time.Time{})
}
l.Log().Warnf("NodeCreateMulti: Set to wait for node %s to get ready, but there's no target log message defined", currentNode.Name)
log.Warnf("NodeCreateMulti: Set to wait for node %s to get ready, but there's no target log message defined", currentNode.Name)
return nil
})
}
}
if err := nodeWaitGroup.Wait(); err != nil {
return fmt.Errorf("failed to create nodes: %w", err)
log.Errorln("Failed to bring up all nodes in time. Check the logs:")
log.Errorf(">>> %+v", err)
return fmt.Errorf("Failed to create nodes")
}
return nil
@ -340,145 +279,33 @@ func NodeCreateMulti(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d
// NodeRun creates and starts a node
func NodeRun(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, nodeCreateOpts k3d.NodeCreateOpts) error {
if err := NodeCreate(ctx, runtime, node, nodeCreateOpts); err != nil {
return fmt.Errorf("failed to create node '%s': %w", node.Name, err)
return err
}
if err := NodeStart(ctx, runtime, node, &k3d.NodeStartOpts{
Wait: nodeCreateOpts.Wait,
Timeout: nodeCreateOpts.Timeout,
NodeHooks: nodeCreateOpts.NodeHooks,
EnvironmentInfo: nodeCreateOpts.EnvironmentInfo,
Intent: k3d.IntentNodeCreate,
if err := NodeStart(ctx, runtime, node, k3d.NodeStartOpts{
Wait: nodeCreateOpts.Wait,
Timeout: nodeCreateOpts.Timeout,
NodeHooks: nodeCreateOpts.NodeHooks,
}); err != nil {
return fmt.Errorf("failed to start node '%s': %w", node.Name, err)
return err
}
return nil
}
// NodeStart starts an existing node
func NodeStart(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, nodeStartOpts *k3d.NodeStartOpts) error {
func NodeStart(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, nodeStartOpts k3d.NodeStartOpts) error {
// return early, if the node is already running
if node.State.Running {
l.Log().Infof("Node %s is already running", node.Name)
log.Infof("Node %s is already running", node.Name)
return nil
}
if err := enableFixes(ctx, runtime, node, nodeStartOpts); err != nil {
return fmt.Errorf("failed to enable k3d fixes: %w", err)
}
startTime := time.Now()
l.Log().Debugf("Node %s Start Time: %+v", node.Name, startTime)
// execute lifecycle hook actions
for _, hook := range nodeStartOpts.NodeHooks {
if hook.Stage == k3d.LifecycleStagePreStart {
l.Log().Tracef("Node %s: Executing preStartAction '%s'", node.Name, reflect.TypeOf(hook))
if err := hook.Action.Run(ctx, node); err != nil {
l.Log().Errorf("Node %s: Failed executing preStartAction '%+v': %+v", node.Name, hook, err)
}
}
}
// start the node
l.Log().Tracef("Starting node '%s'", node.Name)
if err := runtime.StartNode(ctx, node); err != nil {
return fmt.Errorf("runtime failed to start node '%s': %w", node.Name, err)
}
if node.State.Started != "" {
ts, err := time.Parse("2006-01-02T15:04:05.999999999Z", node.State.Started)
if err != nil {
l.Log().Debugf("Failed to parse '%s.State.Started' timestamp '%s', falling back to calulated time", node.Name, node.State.Started)
}
startTime = ts.Truncate(time.Second)
l.Log().Debugf("Truncated %s to %s", ts, startTime)
}
if nodeStartOpts.Wait {
if nodeStartOpts.ReadyLogMessage == "" {
nodeStartOpts.ReadyLogMessage = k3d.GetReadyLogMessage(node, nodeStartOpts.Intent)
}
if nodeStartOpts.ReadyLogMessage != "" {
l.Log().Debugf("Waiting for node %s to get ready (Log: '%s')", node.Name, nodeStartOpts.ReadyLogMessage)
if err := NodeWaitForLogMessage(ctx, runtime, node, nodeStartOpts.ReadyLogMessage, startTime); err != nil {
return fmt.Errorf("Node %s failed to get ready: %+v", node.Name, err)
}
} else {
l.Log().Warnf("NodeStart: Set to wait for node %s to be ready, but there's no target log message defined", node.Name)
}
}
// execute lifecycle hook actions
for _, hook := range nodeStartOpts.NodeHooks {
if hook.Stage == k3d.LifecycleStagePostStart {
l.Log().Tracef("Node %s: Executing postStartAction '%s'", node.Name, reflect.TypeOf(hook))
if err := hook.Action.Run(ctx, node); err != nil {
l.Log().Errorf("Node %s: Failed executing postStartAction '%+v': %+v", node.Name, hook, err)
}
}
}
return nil
}
func enableFixes(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, nodeStartOpts *k3d.NodeStartOpts) error {
// FIXME: FixCgroupV2 - to be removed when fixed upstream
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
// FIXME: FixCgroupV2 - to be removed when fixed upstream
// auto-enable, if needed
EnableCgroupV2FixIfNeeded(runtime)
// early exit if we don't need any fix
if !fixes.FixEnabledAny() {
l.Log().Debugln("No fix enabled.")
return nil
}
// ensure nodehook list
if nodeStartOpts.NodeHooks == nil {
nodeStartOpts.NodeHooks = []k3d.NodeHook{}
}
// write umbrella entrypoint
nodeStartOpts.NodeHooks = append(nodeStartOpts.NodeHooks, k3d.NodeHook{
Stage: k3d.LifecycleStagePreStart,
Action: actions.WriteFileAction{
Runtime: runtime,
Content: fixes.K3DEntrypoint,
Dest: "/bin/k3d-entrypoint.sh",
Mode: 0744,
},
})
// DNS Fix
if fixes.FixEnabled(fixes.EnvFixDNS) {
l.Log().Debugln(">>> enabling dns magic")
if nodeStartOpts.EnvironmentInfo == nil || nodeStartOpts.EnvironmentInfo.HostGateway == nil {
return fmt.Errorf("Cannot enable DNS fix, as Host Gateway IP is missing!")
}
data := []byte(strings.ReplaceAll(string(fixes.DNSMagicEntrypoint), "GATEWAY_IP", nodeStartOpts.EnvironmentInfo.HostGateway.String()))
nodeStartOpts.NodeHooks = append(nodeStartOpts.NodeHooks, k3d.NodeHook{
Stage: k3d.LifecycleStagePreStart,
Action: actions.WriteFileAction{
Runtime: runtime,
Content: data,
Dest: "/bin/k3d-entrypoint-dns.sh",
Mode: 0744,
},
})
}
// CGroupsV2Fix
if fixes.FixEnabled(fixes.EnvFixCgroupV2) {
l.Log().Debugf(">>> enabling cgroupsv2 magic")
if fixes.FixCgroupV2Enabled() {
if nodeStartOpts.NodeHooks == nil {
nodeStartOpts.NodeHooks = []k3d.NodeHook{}
@ -489,12 +316,57 @@ func enableFixes(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node,
Action: actions.WriteFileAction{
Runtime: runtime,
Content: fixes.CgroupV2Entrypoint,
Dest: "/bin/k3d-entrypoint-cgroupv2.sh",
Dest: "/bin/entrypoint.sh",
Mode: 0744,
},
})
}
}
startTime := time.Now()
log.Debugf("Node %s Start Time: %+v", node.Name, startTime)
// execute lifecycle hook actions
for _, hook := range nodeStartOpts.NodeHooks {
if hook.Stage == k3d.LifecycleStagePreStart {
log.Tracef("Node %s: Executing preStartAction '%s'", node.Name, reflect.TypeOf(hook))
if err := hook.Action.Run(ctx, node); err != nil {
log.Errorf("Node %s: Failed executing preStartAction '%+v': %+v", node.Name, hook, err)
}
}
}
// start the node
log.Tracef("Starting node '%s'", node.Name)
if err := runtime.StartNode(ctx, node); err != nil {
log.Errorf("Failed to start node '%s'", node.Name)
return err
}
if node.State.Started != "" {
ts, err := time.Parse("2006-01-02T15:04:05.999999999Z", node.State.Started)
if err != nil {
log.Debugf("Failed to parse '%s.State.Started' timestamp '%s', falling back to calulated time", node.Name, node.State.Started)
}
startTime = ts.Truncate(time.Second)
log.Debugf("Truncated %s to %s", ts, startTime)
}
if nodeStartOpts.Wait {
if nodeStartOpts.ReadyLogMessage == "" {
nodeStartOpts.ReadyLogMessage = k3d.ReadyLogMessageByRole[node.Role]
}
if nodeStartOpts.ReadyLogMessage != "" {
log.Debugf("Waiting for node %s to get ready (Log: '%s')", node.Name, nodeStartOpts.ReadyLogMessage)
if err := NodeWaitForLogMessage(ctx, runtime, node, nodeStartOpts.ReadyLogMessage, startTime); err != nil {
return fmt.Errorf("Node %s failed to get ready: %+v", node.Name, err)
}
} else {
log.Warnf("NodeStart: Set to wait for node %s to be ready, but there's no target log message defined", node.Name)
}
}
return nil
}
@ -502,7 +374,7 @@ func enableFixes(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node,
func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, createNodeOpts k3d.NodeCreateOpts) error {
// FIXME: FixCgroupV2 - to be removed when fixed upstream
EnableCgroupV2FixIfNeeded(runtime)
l.Log().Tracef("Creating node from spec\n%+v", node)
log.Tracef("Creating node from spec\n%+v", node)
/*
* CONFIGURATION
@ -523,39 +395,39 @@ func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, c
// specify options depending on node role
if node.Role == k3d.AgentRole { // TODO: check here AND in CLI or only here?
if err := patchAgentSpec(node); err != nil {
return fmt.Errorf("failed to patch agent spec on node %s: %w", node.Name, err)
return err
}
} else if node.Role == k3d.ServerRole {
if err := patchServerSpec(node, runtime); err != nil {
return fmt.Errorf("failed to patch server spec on node %s: %w", node.Name, err)
return err
}
}
// memory limits
if node.Memory != "" {
if runtime != runtimes.Docker {
l.Log().Warn("ignoring specified memory limits as runtime is not Docker")
log.Warn("ignoring specified memory limits as runtime is not Docker")
} else {
memory, err := dockerunits.RAMInBytes(node.Memory)
if err != nil {
return fmt.Errorf("invalid memory limit format: %w", err)
return fmt.Errorf("Invalid memory limit format: %+v", err)
}
// mount fake meminfo as readonly
fakemempath, err := util.MakeFakeMeminfo(memory, node.Name)
if err != nil {
return fmt.Errorf("failed to create fake meminfo: %w", err)
return fmt.Errorf("Failed to create fake meminfo: %+v", err)
}
node.Volumes = append(node.Volumes, fmt.Sprintf("%s:%s:ro", fakemempath, util.MemInfoPath))
// mount empty edac folder, but only if it exists
exists, err := docker.CheckIfDirectoryExists(ctx, node.Image, util.EdacFolderPath)
if err != nil {
return fmt.Errorf("failed to check for the existence of edac folder: %w", err)
return fmt.Errorf("Failed to check for the existence of edac folder: %+v", err)
}
if exists {
l.Log().Debugln("Found edac folder")
log.Debugln("Found edac folder")
fakeedacpath, err := util.MakeFakeEdac(node.Name)
if err != nil {
return fmt.Errorf("failed to create fake edac: %w", err)
return fmt.Errorf("Failed to create fake edac: %+v", err)
}
node.Volumes = append(node.Volumes, fmt.Sprintf("%s:%s:ro", fakeedacpath, util.EdacFolderPath))
}
@ -566,7 +438,7 @@ func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, c
* CREATION
*/
if err := runtime.CreateNode(ctx, node); err != nil {
return fmt.Errorf("runtime failed to create node '%s': %w", node.Name, err)
return err
}
return nil
@ -576,17 +448,17 @@ func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, c
func NodeDelete(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, opts k3d.NodeDeleteOpts) error {
// delete node
if err := runtime.DeleteNode(ctx, node); err != nil {
l.Log().Error(err)
log.Error(err)
}
// delete fake folder created for limits
if node.Memory != "" {
l.Log().Debug("Cleaning fake files folder from k3d config dir for this node...")
log.Debug("Cleaning fake files folder from k3d config dir for this node...")
filepath, err := util.GetNodeFakerDirOrCreate(node.Name)
err = os.RemoveAll(filepath)
if err != nil {
// this err prob should not be fatal, just log it
l.Log().Errorf("Could not remove fake files folder for node %s: %+v", node.Name, err)
log.Errorf("Could not remove fake files folder for node %s: %+v", node.Name, err)
}
}
@ -594,14 +466,15 @@ func NodeDelete(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, o
if !opts.SkipLBUpdate && (node.Role == k3d.ServerRole || node.Role == k3d.AgentRole) {
cluster, err := ClusterGet(ctx, runtime, &k3d.Cluster{Name: node.RuntimeLabels[k3d.LabelClusterName]})
if err != nil {
return fmt.Errorf("failed fo find cluster for node '%s': %w", node.Name, err)
log.Errorf("Failed to find cluster for node '%s'", node.Name)
return err
}
// if it's a server node, then update the loadbalancer configuration
if node.Role == k3d.ServerRole {
if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil {
if !errors.Is(err, ErrLBConfigHostNotFound) {
return fmt.Errorf("failed to update cluster loadbalancer: %w", err)
return fmt.Errorf("Failed to update cluster loadbalancer: %w", err)
}
}
}
@ -637,7 +510,7 @@ func patchServerSpec(node *k3d.Node, runtime runtimes.Runtime) error {
dockerHost := runtime.GetHost()
if dockerHost != "" {
dockerHost = strings.Split(dockerHost, ":")[0] // remove the port
l.Log().Tracef("Using docker host %s", dockerHost)
log.Tracef("Using docker host %s", dockerHost)
node.RuntimeLabels[k3d.LabelServerAPIHostIP] = dockerHost
node.RuntimeLabels[k3d.LabelServerAPIHost] = dockerHost
}
@ -652,7 +525,8 @@ func patchServerSpec(node *k3d.Node, runtime runtimes.Runtime) error {
func NodeList(ctx context.Context, runtime runtimes.Runtime) ([]*k3d.Node, error) {
nodes, err := runtime.GetNodesByLabel(ctx, k3d.DefaultRuntimeLabels)
if err != nil {
return nil, fmt.Errorf("failed to list nodes: %w", err)
log.Errorln("Failed to get nodes")
return nil, err
}
return nodes, nil
@ -663,7 +537,8 @@ func NodeGet(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) (*k3
// get node
node, err := runtime.GetNode(ctx, node)
if err != nil {
return nil, fmt.Errorf("failed to get node '%s': %w", node.Name, err)
log.Errorf("Failed to get node '%s'", node.Name)
return nil, err
}
return node, nil
@ -671,101 +546,62 @@ func NodeGet(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) (*k3
// NodeWaitForLogMessage follows the logs of a node container and returns if it finds a specific line in there (or timeout is reached)
func NodeWaitForLogMessage(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, message string, since time.Time) error {
l.Log().Tracef("NodeWaitForLogMessage: Node '%s' waiting for log message '%s' since '%+v'", node.Name, message, since)
// specify max number of retries if container is in crashloop (as defined by last seen message being a fatal log)
backOffLimit := k3d.DefaultNodeWaitForLogMessageCrashLoopBackOffLimit
if l, ok := os.LookupEnv(k3d.K3dEnvDebugNodeWaitBackOffLimit); ok {
limit, err := strconv.Atoi(l)
if err == nil {
backOffLimit = limit
}
}
// start a goroutine to print a warning continuously if a node is restarting for quite some time already
donechan := make(chan struct{})
defer close(donechan)
go func(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, since time.Time, donechan chan struct{}) {
for {
select {
case <-ctx.Done():
return
case <-donechan:
return
default:
log.Tracef("NodeWaitForLogMessage: Node '%s' waiting for log message '%s' since '%+v'", node.Name, message, since)
for {
select {
case <-ctx.Done():
if ctx.Err() == context.DeadlineExceeded {
d, ok := ctx.Deadline()
if ok {
log.Debugf("NodeWaitForLogMessage: Context Deadline (%s) > Current Time (%s)", d, time.Now())
}
return fmt.Errorf("Context deadline exceeded while waiting for log message '%s' of node %s: %w", message, node.Name, ctx.Err())
}
// check if the container is restarting
running, status, _ := runtime.GetNodeStatus(ctx, node)
if running && status == k3d.NodeStatusRestarting && time.Now().Sub(since) > k3d.NodeWaitForLogMessageRestartWarnTime {
l.Log().Warnf("Node '%s' is restarting for more than %s now. Possibly it will recover soon (e.g. when it's waiting to join). Consider using a creation timeout to avoid waiting forever in a Restart Loop.", node.Name, k3d.NodeWaitForLogMessageRestartWarnTime)
}
time.Sleep(500 * time.Millisecond)
return ctx.Err()
default:
}
}(ctx, runtime, node, since, donechan)
// Start loop to check log stream for specified log message.
// We're looping here, as sometimes the containers run into a crash loop, but *may* recover from that
// e.g. when a new server is joining an existing cluster and has to wait for another member to finish learning.
// The logstream returned by docker ends everytime the container restarts, so we have to start from the beginning.
for i := 0; i < backOffLimit; i++ {
// get the log stream (reader is following the logstream)
out, err := runtime.GetNodeLogs(ctx, node, since, &runtimeTypes.NodeLogsOpts{Follow: true})
if out != nil {
defer out.Close()
}
// read the logs
out, err := runtime.GetNodeLogs(ctx, node, since)
if err != nil {
if out != nil {
out.Close()
}
return fmt.Errorf("Failed waiting for log message '%s' from node '%s': %w", message, node.Name, err)
}
defer out.Close()
// We're scanning the logstream continuously line-by-line
scanner := bufio.NewScanner(out)
var previousline string
for scanner.Scan() {
select {
case <-ctx.Done():
if ctx.Err() == context.DeadlineExceeded {
d, ok := ctx.Deadline()
if ok {
l.Log().Debugf("NodeWaitForLogMessage: Context Deadline (%s) > Current Time (%s)", d, time.Now())
}
return fmt.Errorf("Context deadline exceeded while waiting for log message '%s' of node %s: %w", message, node.Name, ctx.Err())
}
return ctx.Err()
default:
}
if strings.Contains(os.Getenv(k3d.K3dEnvLogNodeWaitLogs), string(node.Role)) {
l.Log().Tracef(">>> Parsing log line: `%s`", scanner.Text())
}
// check if we can find the specified line in the log
if strings.Contains(scanner.Text(), message) {
l.Log().Tracef("Found target message `%s` in log line `%s`", message, scanner.Text())
l.Log().Debugf("Finished waiting for log message '%s' from node '%s'", message, node.Name)
return nil
}
previousline = scanner.Text()
buf := new(bytes.Buffer)
nRead, _ := buf.ReadFrom(out)
out.Close()
output := buf.String()
if nRead > 0 && strings.Contains(os.Getenv("K3D_LOG_NODE_WAIT_LOGS"), string(node.Role)) {
log.Tracef("=== Read logs since %s ===\n%s\n", since, output)
}
out.Close() // no more input on scanner, but target log not yet found -> close current logreader (precautionary)
// we got here, because the logstream ended (no more input on scanner), so we check if maybe the container crashed
if strings.Contains(previousline, "level=fatal") {
// case 1: last log line we saw contained a fatal error, so probably it crashed and we want to retry on restart
l.Log().Warnf("warning: encountered fatal log from node %s (retrying %d/%d): %s", node.Name, i, backOffLimit, previousline)
out.Close()
time.Sleep(500 * time.Millisecond)
continue
} else {
// case 2: last log line we saw did not contain a fatal error, so we break the loop here and return a generic error
// check if we can find the specified line in the log
if nRead > 0 && strings.Contains(output, message) {
if log.GetLevel() >= log.TraceLevel {
temp := strings.Split(output, "\n")
for _, l := range temp {
if strings.Contains(l, message) {
log.Tracef("Found target log line: `%s`", l)
}
}
}
break
}
// check if the container is restarting
running, status, _ := runtime.GetNodeStatus(ctx, node)
if running && status == k3d.NodeStatusRestarting && time.Now().Sub(since) > k3d.NodeWaitForLogMessageRestartWarnTime {
log.Warnf("Node '%s' is restarting for more than a minute now. Possibly it will recover soon (e.g. when it's waiting to join). Consider using a creation timeout to avoid waiting forever in a Restart Loop.", node.Name)
}
time.Sleep(500 * time.Millisecond) // wait for half a second to avoid overloading docker (error `socket: too many open files`)
}
return fmt.Errorf("error waiting for log line `%s` from node '%s': stopped returning log lines", message, node.Name)
log.Debugf("Finished waiting for log message '%s' from node '%s'", message, node.Name)
return nil
}
// NodeFilterByRoles filters a list of nodes by their roles
@ -774,7 +610,7 @@ func NodeFilterByRoles(nodes []*k3d.Node, includeRoles, excludeRoles []k3d.Role)
for _, includeRole := range includeRoles {
for _, excludeRole := range excludeRoles {
if includeRole == excludeRole {
l.Log().Warnf("You've specified the same role ('%s') for inclusion and exclusion. Exclusion precedes inclusion.", includeRole)
log.Warnf("You've specified the same role ('%s') for inclusion and exclusion. Exclusion precedes inclusion.", includeRole)
}
}
}
@ -798,7 +634,7 @@ nodeLoop:
}
}
l.Log().Tracef("Filteres %d nodes by roles (in: %+v | ex: %+v), got %d left", len(nodes), includeRoles, excludeRoles, len(resultList))
log.Tracef("Filteres %d nodes by roles (in: %+v | ex: %+v), got %d left", len(nodes), includeRoles, excludeRoles, len(resultList))
return resultList
}
@ -812,7 +648,7 @@ func NodeEdit(ctx context.Context, runtime runtimes.Runtime, existingNode, chang
result, err := CopyNode(ctx, existingNode, CopyNodeOpts{keepState: false})
if err != nil {
return fmt.Errorf("failed to copy node %s: %w", existingNode.Name, err)
return err
}
/*
@ -830,11 +666,11 @@ func NodeEdit(ctx context.Context, runtime runtimes.Runtime, existingNode, chang
// loop over existing portbindings to avoid port collisions (docker doesn't check for it)
for _, existingPB := range result.Ports[port] {
if util.IsPortBindingEqual(portbinding, existingPB) { // also matches on "equal" HostIPs (127.0.0.1, "", 0.0.0.0)
l.Log().Tracef("Skipping existing PortBinding: %+v", existingPB)
log.Tracef("Skipping existing PortBinding: %+v", existingPB)
continue loopChangesetPortbindings
}
}
l.Log().Tracef("Adding portbinding %+v for port %s", portbinding, port.Port())
log.Tracef("Adding portbinding %+v for port %s", portbinding, port.Port())
result.Ports[port] = append(result.Ports[port], portbinding)
}
}
@ -857,7 +693,7 @@ func NodeEdit(ctx context.Context, runtime runtimes.Runtime, existingNode, chang
// prepare to write config to lb container
configyaml, err := yaml.Marshal(lbConfig)
if err != nil {
return fmt.Errorf("failed to marshal loadbalancer config: %w", err)
return err
}
writeLbConfigAction := k3d.NodeHook{
@ -882,14 +718,14 @@ func NodeReplace(ctx context.Context, runtime runtimes.Runtime, old, new *k3d.No
// rename existing node
oldNameTemp := fmt.Sprintf("%s-%s", old.Name, util.GenerateRandomString(5))
oldNameOriginal := old.Name
l.Log().Infof("Renaming existing node %s to %s...", old.Name, oldNameTemp)
log.Infof("Renaming existing node %s to %s...", old.Name, oldNameTemp)
if err := runtime.RenameNode(ctx, old, oldNameTemp); err != nil {
return fmt.Errorf("runtime failed to rename node '%s': %w", old.Name, err)
return err
}
old.Name = oldNameTemp
// create (not start) new node
l.Log().Infof("Creating new node %s...", new.Name)
log.Infof("Creating new node %s...", new.Name)
if err := NodeCreate(ctx, runtime, new, k3d.NodeCreateOpts{Wait: true}); err != nil {
if err := runtime.RenameNode(ctx, old, oldNameOriginal); err != nil {
return fmt.Errorf("Failed to create new node. Also failed to rename %s back to %s: %+v", old.Name, oldNameOriginal, err)
@ -898,14 +734,14 @@ func NodeReplace(ctx context.Context, runtime runtimes.Runtime, old, new *k3d.No
}
// stop existing/old node
l.Log().Infof("Stopping existing node %s...", old.Name)
log.Infof("Stopping existing node %s...", old.Name)
if err := runtime.StopNode(ctx, old); err != nil {
return fmt.Errorf("runtime failed to stop node '%s': %w", old.Name, err)
return err
}
// start new node
l.Log().Infof("Starting new node %s...", new.Name)
if err := NodeStart(ctx, runtime, new, &k3d.NodeStartOpts{Wait: true, NodeHooks: new.HookActions}); err != nil {
log.Infof("Starting new node %s...", new.Name)
if err := NodeStart(ctx, runtime, new, k3d.NodeStartOpts{Wait: true, NodeHooks: new.HookActions}); err != nil {
if err := NodeDelete(ctx, runtime, new, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil {
return fmt.Errorf("Failed to start new node. Also failed to rollback: %+v", err)
}
@ -913,16 +749,16 @@ func NodeReplace(ctx context.Context, runtime runtimes.Runtime, old, new *k3d.No
return fmt.Errorf("Failed to start new node. Also failed to rename %s back to %s: %+v", old.Name, oldNameOriginal, err)
}
old.Name = oldNameOriginal
if err := NodeStart(ctx, runtime, old, &k3d.NodeStartOpts{Wait: true}); err != nil {
if err := NodeStart(ctx, runtime, old, k3d.NodeStartOpts{Wait: true}); err != nil {
return fmt.Errorf("Failed to start new node. Also failed to restart old node: %+v", err)
}
return fmt.Errorf("Failed to start new node. Rolled back: %+v", err)
}
// cleanup: delete old node
l.Log().Infof("Deleting old node %s...", old.Name)
log.Infof("Deleting old node %s...", old.Name)
if err := NodeDelete(ctx, runtime, old, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil {
return fmt.Errorf("failed to delete old node '%s': %w", old.Name, err)
return err
}
// done
@ -937,7 +773,7 @@ func CopyNode(ctx context.Context, src *k3d.Node, opts CopyNodeOpts) (*k3d.Node,
targetCopy, err := copystruct.Copy(src)
if err != nil {
return nil, fmt.Errorf("failed to copy node struct: %w", err)
return nil, err
}
result := targetCopy.(*k3d.Node)
@ -947,5 +783,5 @@ func CopyNode(ctx context.Context, src *k3d.Node, opts CopyNodeOpts) (*k3d.Node,
result.State = k3d.NodeState{}
}
return result, nil
return result, err
}

View File

@ -28,13 +28,12 @@ import (
"strings"
"github.com/docker/go-connections/nat"
"github.com/rancher/k3d/v5/pkg/config/types"
config "github.com/rancher/k3d/v5/pkg/config/v1alpha3"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v5/pkg/util"
"github.com/sirupsen/logrus"
"github.com/rancher/k3d/v4/pkg/config/types"
config "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/rancher/k3d/v4/pkg/util"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
)
@ -47,15 +46,15 @@ func TransformPorts(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.
nodeList := cluster.Nodes
for _, portWithNodeFilters := range portsWithNodeFilters {
l.Log().Tracef("inspecting port mapping for %s with nodefilters %s", portWithNodeFilters.Port, portWithNodeFilters.NodeFilters)
log.Tracef("inspecting port mapping for %s with nodefilters %s", portWithNodeFilters.Port, portWithNodeFilters.NodeFilters)
if len(portWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 {
l.Log().Infof("portmapping '%s' lacks a nodefilter, but there's more than one node: defaulting to %s", portWithNodeFilters.Port, types.DefaultTargetsNodefiltersPortMappings)
log.Infof("portmapping '%s' lacks a nodefilter, but there's more than one node: defaulting to %s", portWithNodeFilters.Port, types.DefaultTargetsNodefiltersPortMappings)
portWithNodeFilters.NodeFilters = types.DefaultTargetsNodefiltersPortMappings
}
for _, f := range portWithNodeFilters.NodeFilters {
if strings.HasPrefix(f, "loadbalancer") {
l.Log().Infof("portmapping '%s' targets the loadbalancer: defaulting to %s", portWithNodeFilters.Port, types.DefaultTargetsNodefiltersPortMappings)
log.Infof("portmapping '%s' targets the loadbalancer: defaulting to %s", portWithNodeFilters.Port, types.DefaultTargetsNodefiltersPortMappings)
portWithNodeFilters.NodeFilters = types.DefaultTargetsNodefiltersPortMappings
break
}
@ -81,7 +80,7 @@ func TransformPorts(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.
}
for _, pm := range portmappings {
if err := loadbalancerAddPortConfigs(cluster.ServerLoadBalancer, pm, nodes); err != nil {
return fmt.Errorf("error adding port config to loadbalancer: %w", err)
return err
}
}
} else if suffix == "direct" {
@ -100,14 +99,13 @@ func TransformPorts(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.
}
// print generated loadbalancer config if exists
// (avoid segmentation fault if loadbalancer is disabled)
if l.Log().GetLevel() >= logrus.DebugLevel && cluster.ServerLoadBalancer != nil {
// print generated loadbalancer config
if log.GetLevel() >= log.DebugLevel {
yamlized, err := yaml.Marshal(cluster.ServerLoadBalancer.Config)
if err != nil {
l.Log().Errorf("error printing loadbalancer config: %v", err)
log.Errorf("error printing loadbalancer config: %v", err)
} else {
l.Log().Debugf("generated loadbalancer config:\n%s", string(yamlized))
log.Debugf("generated loadbalancer config:\n%s", string(yamlized))
}
}
return nil

View File

@ -28,12 +28,12 @@ import (
"github.com/docker/go-connections/nat"
"github.com/imdario/mergo"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
"github.com/rancher/k3d/v5/pkg/runtimes/docker"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v5/pkg/types/k3s"
"github.com/rancher/k3d/v5/pkg/types/k8s"
"github.com/rancher/k3d/v4/pkg/runtimes"
"github.com/rancher/k3d/v4/pkg/runtimes/docker"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/rancher/k3d/v4/pkg/types/k3s"
"github.com/rancher/k3d/v4/pkg/types/k8s"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
)
@ -43,11 +43,11 @@ func RegistryRun(ctx context.Context, runtime runtimes.Runtime, reg *k3d.Registr
return nil, fmt.Errorf("Failed to create registry: %+v", err)
}
if err := NodeStart(ctx, runtime, regNode, &k3d.NodeStartOpts{}); err != nil {
if err := NodeStart(ctx, runtime, regNode, k3d.NodeStartOpts{}); err != nil {
return nil, fmt.Errorf("Failed to start registry: %+v", err)
}
return regNode, nil
return regNode, err
}
// RegistryCreate creates a registry node
@ -58,8 +58,8 @@ func RegistryCreate(ctx context.Context, runtime runtimes.Runtime, reg *k3d.Regi
reg.Host = k3d.DefaultRegistryName
}
// if err := ValidateHostname(reg.Host); err != nil {
// l.Log().Errorln("Invalid name for registry")
// l.Log().Fatalln(err)
// log.Errorln("Invalid name for registry")
// log.Fatalln(err)
// }
registryNode := &k3d.Node{
@ -97,12 +97,13 @@ func RegistryCreate(ctx context.Context, runtime runtimes.Runtime, reg *k3d.Regi
registryNode.Ports[reg.ExposureOpts.Port] = []nat.PortBinding{reg.ExposureOpts.Binding}
// create the registry node
l.Log().Infof("Creating node '%s'", registryNode.Name)
log.Infof("Creating node '%s'", registryNode.Name)
if err := NodeCreate(ctx, runtime, registryNode, k3d.NodeCreateOpts{}); err != nil {
return nil, fmt.Errorf("failed to create registry node '%s': %w", registryNode.Name, err)
log.Errorln("Failed to create registry node")
return nil, err
}
l.Log().Infof("Successfully created registry '%s'", registryNode.Name)
log.Infof("Successfully created registry '%s'", registryNode.Name)
return registryNode, nil
@ -114,7 +115,8 @@ func RegistryConnectClusters(ctx context.Context, runtime runtimes.Runtime, regi
// find registry node
registryNode, err := NodeGet(ctx, runtime, registryNode)
if err != nil {
return fmt.Errorf("Failed to find registry node '%s': %w", registryNode.Name, err)
log.Errorf("Failed to find registry node '%s'", registryNode.Name)
return err
}
// get cluster details and connect
@ -122,13 +124,13 @@ func RegistryConnectClusters(ctx context.Context, runtime runtimes.Runtime, regi
for _, c := range clusters {
cluster, err := ClusterGet(ctx, runtime, c)
if err != nil {
l.Log().Warnf("Failed to connect to cluster '%s': Cluster not found", c.Name)
log.Warnf("Failed to connect to cluster '%s': Cluster not found", c.Name)
failed++
continue
}
if err := runtime.ConnectNodeToNetwork(ctx, registryNode, cluster.Network.Name); err != nil {
l.Log().Warnf("Failed to connect to cluster '%s': Connection failed", cluster.Name)
l.Log().Warnln(err)
log.Warnf("Failed to connect to cluster '%s': Connection failed", cluster.Name)
log.Warnln(err)
failed++
}
}
@ -146,15 +148,16 @@ func RegistryConnectNetworks(ctx context.Context, runtime runtimes.Runtime, regi
// find registry node
registryNode, err := NodeGet(ctx, runtime, registryNode)
if err != nil {
return fmt.Errorf("Failed to find registry node '%s': %w", registryNode.Name, err)
log.Errorf("Failed to find registry node '%s'", registryNode.Name)
return err
}
// get cluster details and connect
failed := 0
for _, net := range networks {
if err := runtime.ConnectNodeToNetwork(ctx, registryNode, net); err != nil {
l.Log().Warnf("Failed to connect to network '%s': Connection failed", net)
l.Log().Warnln(err)
log.Warnf("Failed to connect to network '%s': Connection failed", net)
log.Warnln(err)
failed++
}
}
@ -244,7 +247,7 @@ func RegistryFromNode(node *k3d.Node) (*k3d.Registry, error) {
}
}
l.Log().Tracef("Got registry %+v from node %+v", registry, node)
log.Tracef("Got registry %+v from node %+v", registry, node)
return registry, nil
@ -270,11 +273,11 @@ func RegistryGenerateLocalRegistryHostingConfigMapYAML(ctx context.Context, runt
}
if len(registries) > 1 {
l.Log().Warnf("More than one registry specified, but the LocalRegistryHostingV1 spec only supports one -> Selecting the first one: %s", registries[0].Host)
log.Warnf("More than one registry specified, but the LocalRegistryHostingV1 spec only supports one -> Selecting the first one: %s", registries[0].Host)
}
if len(registries) < 1 {
l.Log().Debugln("No registry specified, not generating local registry hosting configmap")
log.Debugln("No registry specified, not generating local registry hosting configmap")
return nil, nil
}
@ -287,15 +290,15 @@ func RegistryGenerateLocalRegistryHostingConfigMapYAML(ctx context.Context, runt
// if the host is now 0.0.0.0, check if we can set it to the IP of the docker-machine, if it's used
if host == k3d.DefaultAPIHost && runtime == runtimes.Docker {
if gort.GOOS == "windows" || gort.GOOS == "darwin" {
l.Log().Tracef("Running on %s: checking if it's using docker-machine", gort.GOOS)
log.Tracef("Running on %s: checking if it's using docker-machine", gort.GOOS)
machineIP, err := runtime.(docker.Docker).GetDockerMachineIP()
if err != nil {
l.Log().Warnf("Using docker-machine, but failed to get it's IP for usage in LocalRegistryHosting Config Map: %+v", err)
log.Warnf("Using docker-machine, but failed to get it's IP for usage in LocalRegistryHosting Config Map: %+v", err)
} else if machineIP != "" {
l.Log().Infof("Using the docker-machine IP %s in the LocalRegistryHosting Config Map", machineIP)
log.Infof("Using the docker-machine IP %s in the LocalRegistryHosting Config Map", machineIP)
host = machineIP
} else {
l.Log().Traceln("Not using docker-machine")
log.Traceln("Not using docker-machine")
}
}
}
@ -310,12 +313,11 @@ func RegistryGenerateLocalRegistryHostingConfigMapYAML(ctx context.Context, runt
k8s.LocalRegistryHostingV1{
Host: fmt.Sprintf("%s:%s", host, registries[0].ExposureOpts.Binding.HostPort),
HostFromContainerRuntime: fmt.Sprintf("%s:%s", registries[0].Host, registries[0].ExposureOpts.Port.Port()),
HostFromClusterNetwork: fmt.Sprintf("%s:%s", registries[0].Host, registries[0].ExposureOpts.Port.Port()),
Help: "https://k3d.io/usage/guides/registries/#using-a-local-registry",
},
)
if err != nil {
return nil, fmt.Errorf("failed to marshal LocalRegistryHosting configmap data: %w", err)
return nil, err
}
cm := configmap{
@ -332,10 +334,10 @@ func RegistryGenerateLocalRegistryHostingConfigMapYAML(ctx context.Context, runt
cmYaml, err := yaml.Marshal(cm)
if err != nil {
return nil, fmt.Errorf("failed to marshal LocalRegistryHosting configmap: %w", err)
return nil, err
}
l.Log().Tracef("LocalRegistryHostingConfigMapYaml: %s", string(cmYaml))
log.Tracef("LocalRegistryHostingConfigMapYaml: %s", string(cmYaml))
return cmYaml, nil
}
@ -343,7 +345,7 @@ func RegistryGenerateLocalRegistryHostingConfigMapYAML(ctx context.Context, runt
// RegistryMergeConfig merges a source registry config into an existing dest registry cofnig
func RegistryMergeConfig(ctx context.Context, dest, src *k3s.Registry) error {
if err := mergo.MergeWithOverwrite(dest, src); err != nil {
return fmt.Errorf("failed to merge registry configs: %w", err)
return fmt.Errorf("Failed to merge registry configs: %+v", err)
}
return nil
}

View File

@ -27,8 +27,8 @@ import (
"testing"
"github.com/docker/go-connections/nat"
"github.com/rancher/k3d/v5/pkg/runtimes"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
)
func TestRegistryGenerateLocalRegistryHostingConfigMapYAML(t *testing.T) {
@ -42,7 +42,6 @@ metadata:
data:
localRegistryHosting.v1: |
host: test-host:5432
hostFromClusterNetwork: test-host:1234
hostFromContainerRuntime: test-host:1234
help: https://k3d.io/usage/guides/registries/#using-a-local-registry
`

View File

@ -25,15 +25,15 @@ import (
"fmt"
"strings"
l "github.com/rancher/k3d/v5/pkg/logger"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper"
"github.com/rancher/k3d/v5/pkg/config/v1alpha2"
"github.com/rancher/k3d/v5/pkg/config/v1alpha3"
defaultConfig "github.com/rancher/k3d/v5/pkg/config/v1alpha3"
"github.com/rancher/k3d/v4/pkg/config/v1alpha2"
"github.com/rancher/k3d/v4/pkg/config/v1alpha3"
defaultConfig "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
types "github.com/rancher/k3d/v5/pkg/config/types"
types "github.com/rancher/k3d/v4/pkg/config/types"
)
const DefaultConfigApiVersion = defaultConfig.ApiVersion
@ -59,7 +59,7 @@ func FromViper(config *viper.Viper) (types.Config, error) {
apiVersion := strings.ToLower(config.GetString("apiversion"))
kind := strings.ToLower(config.GetString("kind"))
l.Log().Tracef("Trying to read config apiVersion='%s', kind='%s'", apiVersion, kind)
log.Tracef("Trying to read config apiVersion='%s', kind='%s'", apiVersion, kind)
switch apiVersion {
case "k3d.io/v1alpha2":
@ -73,11 +73,13 @@ func FromViper(config *viper.Viper) (types.Config, error) {
}
if err != nil {
return nil, fmt.Errorf("failed to parse config '%s': %w'", config.ConfigFileUsed(), err)
return nil, err
}
if err := config.Unmarshal(&cfg); err != nil {
return nil, fmt.Errorf("failed to unmarshal config file '%s': %w", config.ConfigFileUsed(), err)
log.Errorln("Failed to unmarshal File config")
return nil, err
}
return cfg, nil

View File

@ -26,11 +26,11 @@ import (
"time"
"github.com/go-test/deep"
configtypes "github.com/rancher/k3d/v5/pkg/config/types"
conf "github.com/rancher/k3d/v5/pkg/config/v1alpha3"
configtypes "github.com/rancher/k3d/v4/pkg/config/types"
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
"github.com/spf13/viper"
k3d "github.com/rancher/k3d/v5/pkg/types"
k3d "github.com/rancher/k3d/v4/pkg/types"
)
func TestReadSimpleConfig(t *testing.T) {
@ -256,52 +256,3 @@ func TestReadUnknownConfig(t *testing.T) {
}
}
func TestReadSimpleConfigRegistries(t *testing.T) {
exposedAPI := conf.SimpleExposureOpts{}
exposedAPI.HostIP = "0.0.0.0"
exposedAPI.HostPort = "6443"
expectedConfig := conf.SimpleConfig{
TypeMeta: configtypes.TypeMeta{
APIVersion: "k3d.io/v1alpha3",
Kind: "Simple",
},
Name: "test",
Servers: 1,
Agents: 1,
Registries: conf.SimpleConfigRegistries{
Create: &conf.SimpleConfigRegistryCreateConfig{
Name: "registry.localhost",
Host: "0.0.0.0",
HostPort: "5001",
},
},
}
cfgFile := "./test_assets/config_test_registries.yaml"
config := viper.New()
config.SetConfigFile(cfgFile)
// try to read config into memory (viper map structure)
if err := config.ReadInConfig(); err != nil {
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
t.Error(err)
}
// config file found but some other error happened
t.Error(err)
}
readConfig, err := FromViper(config)
if err != nil {
t.Error(err)
}
t.Logf("\n========== Read Config ==========\n%+v\n=================================\n", readConfig)
if diff := deep.Equal(readConfig, expectedConfig); diff != nil {
t.Errorf("Actual representation\n%+v\ndoes not match expected representation\n%+v\nDiff:\n%+v", readConfig, expectedConfig, diff)
}
}

View File

@ -25,21 +25,21 @@ import (
"bytes"
"errors"
"fmt"
"os"
"io/ioutil"
"strings"
"sigs.k8s.io/yaml"
"github.com/xeipuuv/gojsonschema"
l "github.com/rancher/k3d/v5/pkg/logger"
log "github.com/sirupsen/logrus"
)
// ValidateSchemaFile takes a filepath, reads the file and validates it against a JSON schema
func ValidateSchemaFile(filepath string, schema []byte) error {
l.Log().Debugf("Validating file %s against default JSONSchema...", filepath)
log.Debugf("Validating file %s against default JSONSchema...", filepath)
fileContents, err := os.ReadFile(filepath)
fileContents, err := ioutil.ReadFile(filepath)
if err != nil {
return fmt.Errorf("Failed to read file %s: %+v", filepath, err)
}
@ -53,7 +53,7 @@ func ValidateSchemaFile(filepath string, schema []byte) error {
}
// ValidateSchema validates a YAML construct (non-struct representation) against a JSON Schema
func ValidateSchema(content interface{}, schemaJSON []byte) error {
func ValidateSchema(content map[string]interface{}, schemaJSON []byte) error {
contentYaml, err := yaml.Marshal(content)
if err != nil {
@ -64,11 +64,6 @@ func ValidateSchema(content interface{}, schemaJSON []byte) error {
return err
}
return ValidateSchemaJSON(contentJSON, schemaJSON)
}
func ValidateSchemaJSON(contentJSON []byte, schemaJSON []byte) error {
if bytes.Equal(contentJSON, []byte("null")) {
contentJSON = []byte("{}") // non-json yaml struct
}
@ -78,10 +73,10 @@ func ValidateSchemaJSON(contentJSON []byte, schemaJSON []byte) error {
result, err := gojsonschema.Validate(schemaLoader, configLoader)
if err != nil {
return fmt.Errorf("failed to validate config: %w", err)
return err
}
l.Log().Debugf("JSON Schema Validation Result: %+v", result)
log.Debugf("JSON Schema Validation Result: %+v", result)
if !result.Valid() {
var sb strings.Builder

Some files were not shown because too many files have changed in this diff Show More