Compare commits
72 Commits
enhancemen
...
main
Author | SHA1 | Date | |
---|---|---|---|
|
89d979287e | ||
|
09686f121d | ||
|
1bcf802012 | ||
|
3a467b42c1 | ||
|
5e5a35c67c | ||
|
f8f17caf78 | ||
|
407ced6405 | ||
|
7113694ab5 | ||
|
376f0378af | ||
|
a4d573fc2c | ||
|
6abb1fb20c | ||
|
594e3ced3f | ||
|
12180ffdd5 | ||
|
de2cda4396 | ||
|
33507fd19a | ||
|
1e9c20e0a9 | ||
|
7d4d63f18b | ||
|
a897201914 | ||
|
2040a458b1 | ||
|
de5805640b | ||
|
0b4c4d51aa | ||
|
8bdba73669 | ||
|
5f2ea9aac0 | ||
|
53bdbec636 | ||
|
81a41bdab1 | ||
|
d21882a01c | ||
|
2a2bee0e63 | ||
|
67d8c8c84f | ||
|
f801e46e9e | ||
|
acf9e65ef9 | ||
|
a3d27e9c86 | ||
|
3b9d8373e1 | ||
|
bfe1efb1e6 | ||
|
7071129df8 | ||
|
149dfdb9ab | ||
|
fdcdc4117b | ||
|
2a62eab3a2 | ||
|
7073a8fad7 | ||
|
5aa1edfb73 | ||
|
630788f1e7 | ||
|
7ba71ad66c | ||
|
91426eabd1 | ||
|
78738058c8 | ||
|
6f76f8ce5d | ||
|
17dc4b7b4d | ||
|
6d45a15e05 | ||
|
9efe980789 | ||
|
b4158a1dc1 | ||
|
7c635c29ab | ||
|
bcc1d60db9 | ||
|
790b9e492f | ||
|
1ae8302980 | ||
|
e448f488be | ||
|
212979d0bb | ||
|
737ae9570c | ||
|
6a58a9f57d | ||
|
6770225a45 | ||
|
af82c130c6 | ||
|
093b4e550f | ||
|
aa6e902743 | ||
|
2faeda2117 | ||
|
8a49181798 | ||
|
9abcbedb37 | ||
|
95e0045418 | ||
|
8647a0ca40 | ||
|
e0f5e2ba2c | ||
|
8a745062ed | ||
|
0c02607d1e | ||
|
917c19eae5 | ||
|
ac527e7c8a | ||
|
b8f9bad879 | ||
|
5d0d0acc77 |
@ -121,6 +121,24 @@
|
||||
"contributions": [
|
||||
"doc"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "Shanduur",
|
||||
"name": "Mateusz Urbanek",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/32583062?v=4",
|
||||
"profile": "http://shanduur.github.io",
|
||||
"contributions": [
|
||||
"code"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "benjaminjb",
|
||||
"name": "Benjamin Blattberg",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/4651855?v=4",
|
||||
"profile": "https://github.com/benjaminjb",
|
||||
"contributions": [
|
||||
"code"
|
||||
]
|
||||
}
|
||||
],
|
||||
"contributorsPerLine": 7,
|
||||
|
476
.drone.yml
476
.drone.yml
@ -14,7 +14,7 @@ platform:
|
||||
steps:
|
||||
|
||||
- name: lint
|
||||
image: golang:1.16
|
||||
image: golang:1.17
|
||||
commands:
|
||||
- make ci-setup
|
||||
- make check-fmt lint
|
||||
@ -40,7 +40,7 @@ steps:
|
||||
- tag
|
||||
|
||||
- name: build
|
||||
image: golang:1.16
|
||||
image: golang:1.17
|
||||
environment:
|
||||
GIT_TAG: "${DRONE_TAG}"
|
||||
commands:
|
||||
@ -73,6 +73,7 @@ steps:
|
||||
- tag
|
||||
ref:
|
||||
include:
|
||||
# include only pre-release tags
|
||||
- "refs/tags/*rc*"
|
||||
- "refs/tags/*beta*"
|
||||
- "refs/tags/*alpha*"
|
||||
@ -97,64 +98,13 @@ steps:
|
||||
- tag
|
||||
ref:
|
||||
exclude:
|
||||
# exclude pre-release tags
|
||||
- "refs/tags/*rc*"
|
||||
- "refs/tags/*beta*"
|
||||
- "refs/tags/*alpha*"
|
||||
- "refs/tags/*test*"
|
||||
- "refs/tags/*dev*"
|
||||
|
||||
- name: docker_build_push_dind
|
||||
image: plugins/docker
|
||||
environment:
|
||||
DOCKER_BUILDKIT: "1"
|
||||
settings:
|
||||
repo: rancher/k3d
|
||||
tags:
|
||||
- latest-dind
|
||||
- "${DRONE_TAG}-dind"
|
||||
dockerfile: Dockerfile
|
||||
target: dind
|
||||
context: .
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
build_args:
|
||||
- GIT_TAG_OVERRIDE=${DRONE_TAG}
|
||||
depends_on:
|
||||
- lint
|
||||
- test
|
||||
- build
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
|
||||
- name: docker_build_push_binary
|
||||
environment:
|
||||
DOCKER_BUILDKIT: "1"
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d
|
||||
tags:
|
||||
- latest
|
||||
- "${DRONE_TAG}"
|
||||
dockerfile: Dockerfile
|
||||
target: binary-only
|
||||
context: .
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
build_args:
|
||||
- GIT_TAG_OVERRIDE=${DRONE_TAG}
|
||||
depends_on:
|
||||
- lint
|
||||
- test
|
||||
- build
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
|
||||
services:
|
||||
# Starting the docker service to be used by dind
|
||||
- name: docker
|
||||
@ -168,14 +118,29 @@ volumes:
|
||||
- name: dockersock
|
||||
temp: {}
|
||||
|
||||
|
||||
---
|
||||
#########################
|
||||
##### Documentation #####
|
||||
#########################
|
||||
###########################
|
||||
###### Docker Images ######
|
||||
###########################
|
||||
#
|
||||
# +++ Docker Images +++
|
||||
# Tagged using the auto_tag feature of the docker plugin
|
||||
# See http://plugins.drone.io/drone-plugins/drone-docker/#autotag
|
||||
# > if event type is `tag`
|
||||
# > > 1.0.0 produces docker tags 1, 1.0, 1.0.0
|
||||
# > > 1.0.0-rc.1 produces docker tags 1.0.0-rc.1
|
||||
# > if event type is `push` and target branch == default branch (main)
|
||||
# > > tag `latest`
|
||||
|
||||
|
||||
################################
|
||||
##### Docker Images: amd64 #####
|
||||
################################
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: docs
|
||||
name: linux_amd64
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
@ -183,93 +148,99 @@ platform:
|
||||
|
||||
steps:
|
||||
|
||||
- name: build
|
||||
image: python:3.9
|
||||
commands:
|
||||
- python3 -m pip install -r docs/requirements.txt
|
||||
- mkdocs build --verbose --clean --strict
|
||||
when:
|
||||
branch:
|
||||
- main
|
||||
event:
|
||||
- push
|
||||
|
||||
- name: publish
|
||||
image: plugins/gh-pages
|
||||
- name: build_push_binary
|
||||
environment:
|
||||
DOCKER_BUILDKIT: "1"
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-amd64
|
||||
dockerfile: Dockerfile
|
||||
target: binary-only
|
||||
context: .
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: github_token
|
||||
username: rancherio-gh-m
|
||||
pages_directory: site/
|
||||
target_branch: gh-pages
|
||||
when:
|
||||
branch:
|
||||
- main
|
||||
event:
|
||||
- push
|
||||
from_secret: docker_password
|
||||
build_args:
|
||||
- GIT_TAG_OVERRIDE=${DRONE_TAG}
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- push
|
||||
branch:
|
||||
- main
|
||||
- name: build_push_dind
|
||||
image: plugins/docker
|
||||
environment:
|
||||
DOCKER_BUILDKIT: "1"
|
||||
settings:
|
||||
repo: rancher/k3d
|
||||
auto_tag: true
|
||||
auto_tag_suffix: dind-linux-amd64
|
||||
dockerfile: Dockerfile
|
||||
target: dind
|
||||
context: .
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
build_args:
|
||||
- GIT_TAG_OVERRIDE=${DRONE_TAG}
|
||||
- ARCH=amd64
|
||||
|
||||
---
|
||||
#####################
|
||||
##### k3d-proxy #####
|
||||
#####################
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: proxy_linux_amd64
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
- name: build_push
|
||||
- name: build_push_proxy
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d-proxy
|
||||
tags:
|
||||
- latest-linux-amd64
|
||||
- "${DRONE_TAG}-linux-amd64"
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-amd64
|
||||
dockerfile: proxy/Dockerfile
|
||||
context: proxy/
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
|
||||
- name: build_push_tools
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d-tools
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-amd64
|
||||
dockerfile: tools/Dockerfile
|
||||
context: tools/
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag
|
||||
- tag # see note at the start of the "Docker Images" section: creates SemVer tagged images using the `auto_tag` option of the docker plugin
|
||||
- push # `auto_tag` option only creates the `latest` tag if target branch is default branch (i.e. `main`)
|
||||
|
||||
depends_on:
|
||||
- main
|
||||
|
||||
---
|
||||
|
||||
################################
|
||||
##### Docker Images: arm #####
|
||||
################################
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: proxy_linux_arm
|
||||
name: linux_arm
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: arm
|
||||
|
||||
steps:
|
||||
- name: build_push
|
||||
|
||||
- name: build_push_proxy
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d-proxy
|
||||
tags:
|
||||
- latest-linux-arm
|
||||
- "${DRONE_TAG}-linux-arm"
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-arm
|
||||
dockerfile: proxy/Dockerfile
|
||||
context: proxy/
|
||||
username:
|
||||
@ -278,35 +249,87 @@ steps:
|
||||
from_secret: docker_password
|
||||
build_args:
|
||||
- ARCH=arm
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
|
||||
- name: build_push_tools
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d-tools
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-arm
|
||||
dockerfile: tools/Dockerfile
|
||||
context: tools/
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag
|
||||
- tag # see note at the start of the "Docker Images" section: creates SemVer tagged images using the `auto_tag` option of the docker plugin
|
||||
- push # `auto_tag` option only creates the `latest` tag if target branch is default branch (i.e. `main`)
|
||||
|
||||
depends_on:
|
||||
- main
|
||||
|
||||
---
|
||||
|
||||
################################
|
||||
##### Docker Images: arm64 #####
|
||||
################################
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: proxy_linux_arm64
|
||||
name: linux_arm64
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: arm64
|
||||
|
||||
steps:
|
||||
- name: build_push
|
||||
|
||||
- name: build_push_binary
|
||||
environment:
|
||||
DOCKER_BUILDKIT: "1"
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-arm64
|
||||
dockerfile: Dockerfile
|
||||
target: binary-only
|
||||
context: .
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
build_args:
|
||||
- GIT_TAG_OVERRIDE=${DRONE_TAG}
|
||||
|
||||
- name: build_push_dind
|
||||
image: plugins/docker
|
||||
environment:
|
||||
DOCKER_BUILDKIT: "1"
|
||||
settings:
|
||||
repo: rancher/k3d
|
||||
auto_tag: true
|
||||
auto_tag_suffix: dind-linux-arm64
|
||||
dockerfile: Dockerfile
|
||||
target: dind
|
||||
context: .
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
build_args:
|
||||
- GIT_TAG_OVERRIDE=${DRONE_TAG}
|
||||
- ARCH=arm64
|
||||
|
||||
- name: build_push_proxy
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d-proxy
|
||||
tags:
|
||||
- latest-linux-arm64
|
||||
- "${DRONE_TAG}-linux-arm64"
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-arm64
|
||||
dockerfile: proxy/Dockerfile
|
||||
context: proxy/
|
||||
username:
|
||||
@ -315,195 +338,94 @@ steps:
|
||||
from_secret: docker_password
|
||||
build_args:
|
||||
- ARCH=arm64
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
|
||||
- name: build_push_tools
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d-tools
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-arm64
|
||||
dockerfile: tools/Dockerfile
|
||||
context: tools/
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag
|
||||
- tag # see note at the start of the "Docker Images" section: creates SemVer tagged images using the `auto_tag` option of the docker plugin
|
||||
- push # `auto_tag` option only creates the `latest` tag if target branch is default branch (i.e. `main`)
|
||||
|
||||
depends_on:
|
||||
- main
|
||||
|
||||
---
|
||||
|
||||
##############################
|
||||
###### Docker Manifests ######
|
||||
##############################
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: proxy_manifest
|
||||
name: manifests
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
- name: push_manifest
|
||||
- name: push_manifest_binary
|
||||
image: plugins/manifest
|
||||
settings:
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
target: "rancher/k3d-proxy:${DRONE_TAG}"
|
||||
template: "rancher/k3d-proxy:${DRONE_TAG}-OS-ARCH"
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/arm
|
||||
- linux/arm64
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
spec: manifest.tmpl
|
||||
auto_tag: true
|
||||
ignore_missing: true # expected, as we dropped arm due to missing base image for that arch
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag
|
||||
|
||||
depends_on:
|
||||
- main
|
||||
- proxy_linux_amd64
|
||||
- proxy_linux_arm
|
||||
- proxy_linux_arm64
|
||||
|
||||
---
|
||||
#####################
|
||||
##### k3d-tools #####
|
||||
#####################
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: tools_linux_amd64
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
- name: build_push
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d-tools
|
||||
tags:
|
||||
- latest-linux-amd64
|
||||
- "${DRONE_TAG}-linux-amd64"
|
||||
dockerfile: tools/Dockerfile
|
||||
context: tools/
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag
|
||||
|
||||
depends_on:
|
||||
- main
|
||||
|
||||
---
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: tools_linux_arm
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: arm
|
||||
|
||||
steps:
|
||||
- name: build_push
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d-tools
|
||||
tags:
|
||||
- latest-linux-arm
|
||||
- "${DRONE_TAG}-linux-arm"
|
||||
dockerfile: tools/Dockerfile
|
||||
context: tools/
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag
|
||||
|
||||
depends_on:
|
||||
- main
|
||||
---
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: tools_linux_arm64
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: arm64
|
||||
|
||||
steps:
|
||||
- name: build_push
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d-tools
|
||||
tags:
|
||||
- latest-linux-arm64
|
||||
- "${DRONE_TAG}-linux-arm64"
|
||||
dockerfile: tools/Dockerfile
|
||||
context: tools/
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag
|
||||
|
||||
depends_on:
|
||||
- main
|
||||
---
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: tools_manifest
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
- name: push_manifest
|
||||
- name: push_manifest_dind
|
||||
image: plugins/manifest
|
||||
settings:
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
target: "rancher/k3d-tools:${DRONE_TAG}"
|
||||
template: "rancher/k3d-tools:${DRONE_TAG}-OS-ARCH"
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/arm
|
||||
- linux/arm64
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
spec: dind-manifest.tmpl
|
||||
auto_tag: true
|
||||
ignore_missing: true # expected, as we dropped arm due to missing base image for that arch
|
||||
|
||||
- name: push_manifest_proxy
|
||||
image: plugins/manifest
|
||||
settings:
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
spec: proxy/manifest.tmpl
|
||||
auto_tag: true
|
||||
ignore_missing: false
|
||||
|
||||
- name: push_manifest_tools
|
||||
image: plugins/manifest
|
||||
settings:
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
spec: tools/manifest.tmpl
|
||||
auto_tag: true
|
||||
ignore_missing: false
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag
|
||||
- tag # see note at the start of the "Docker Images" section: creates SemVer tagged images using the `auto_tag` option of the manifest plugin
|
||||
- push # `auto_tag` option only creates the `latest` tag if target branch is default branch (i.e. `main`)
|
||||
|
||||
depends_on:
|
||||
- main
|
||||
- tools_linux_amd64
|
||||
- tools_linux_arm
|
||||
- tools_linux_arm64
|
||||
- linux_amd64
|
||||
- linux_arm
|
||||
- linux_arm64
|
||||
|
||||
|
46
.github/workflows/docs.yml
vendored
Normal file
46
.github/workflows/docs.yml
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
name: k3d.io
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
# only run on tags for real releases and special docs releases
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+'
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+-docs.[0-9]+'
|
||||
# tags-ignore:
|
||||
# - "*rc*"
|
||||
# - "*beta*"
|
||||
# - "*alpha*"
|
||||
# - "*test*"
|
||||
# - "*dev*"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
container:
|
||||
image: python:3.9
|
||||
steps:
|
||||
- name: Checkout Project
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install Requirements
|
||||
run: pip install -r docs/requirements.txt
|
||||
- name: Build with MkDocs (validation)
|
||||
run: |
|
||||
mkdocs build --verbose --clean --strict
|
||||
rm -r site/
|
||||
- name: Configure Git
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
id: git
|
||||
run: |
|
||||
git config --global user.name ghaction-k3d.io
|
||||
git config --global user.email ghaction@k3d.io
|
||||
echo ::set-output name=tag::${GITHUB_REF#refs/tags/}
|
||||
- name: Build & Deploy with Mike (versioned)
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
run: |
|
||||
mike deploy --update-aliases --push --rebase ${{ steps.git.outputs.tag }} stable
|
||||
|
||||
|
79
CHANGELOG.md
79
CHANGELOG.md
@ -1,7 +1,61 @@
|
||||
# Changelog
|
||||
|
||||
## v5.0.3
|
||||
|
||||
### Enhancements & Fixes
|
||||
|
||||
- simplified way of getting a Docker API Client that works with Docker Contexts and `DOCKER_*` environment variable configuration (#829, @dragonflylee)
|
||||
- fix: didn't honor `DOCKER_TLS` environment variables before
|
||||
|
||||
## v5.0.2
|
||||
|
||||
### Enhancements
|
||||
|
||||
- CoreDNS Configmap is now edited in the auto-deploy manifest on disk instead of relying on `kubectl patch` command (#814)
|
||||
- refactor: add cmd subcommands in a single function call (#819, @moeryomenko)
|
||||
- handle ready-log-messages by type and intent & check them in single log streams instead of checking whole chunks every time (#818)
|
||||
|
||||
### Fixes
|
||||
|
||||
- fix: config file check failing with env var expansion because unexpanded input file was checked
|
||||
|
||||
### Misc
|
||||
|
||||
- cleanup: ensure that connections/streams are closed once unused (#818)
|
||||
- cleanup: split type definitions across multiple files to increase readability (#818)
|
||||
- docs: clarify `node create` help text about cluster reference (#808, @losinggeneration)
|
||||
- refactor: move from io/ioutil (deprecated) to io and os packages (#827, @Juneezee)
|
||||
|
||||
## v5.0.1
|
||||
|
||||
### Enhancement
|
||||
|
||||
- add `HostFromClusterNetwork` field to `LocalRegistryHosting` configmap as per KEP-1755 (#754)
|
||||
|
||||
### Fixes
|
||||
|
||||
- fix: nilpointer exception on failed exec process with no returned logreader
|
||||
- make post-create cluster preparation (DNS stuff mostly) more resilient (#780)
|
||||
- fix v1alpha2 -> v1alpha3 config migration (and other related issues) (#799)
|
||||
|
||||
### Misc
|
||||
|
||||
- docs: fix typo (#784)
|
||||
- docs: fix usage of legacy `--k3s-agent/server-arg` flag
|
||||
|
||||
## v5.0.0
|
||||
|
||||
This release contains a whole lot of new features, breaking changes as well as smaller fixes and improvements.
|
||||
The changelog shown here is likely not complete but gives a broad overview over the changes.
|
||||
For more details, please check the v5 milestone (<https://github.com/rancher/k3d/milestone/27>) or even the commit history.
|
||||
The docs have been updated, so you should also find the information you need there, with more to come!
|
||||
|
||||
The demo repository has also been updated to work with k3d v5: <https://github.com/iwilltry42/k3d-demo>.
|
||||
|
||||
**Info**: <https://k3d.io> is now versioned, so you can checkout different versions of the documentation by using the dropdown menu in the page title bar!
|
||||
|
||||
**Feedback welcome!**
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
- new syntax for nodefilters
|
||||
@ -15,6 +69,7 @@
|
||||
- the `--port` flag has the `proxy` opt (see new nodefilter syntax above) set by default
|
||||
- to leverage the old behavior of direct port-mappings, use the `direct` opt on the port flag
|
||||
- the nodefilter `loadbalancer` will now do the same as `servers:*;agents:*` (proxied via the loadbalancer)
|
||||
- flag `--registries-create` transformed from bool flag to string flag: let's you define the name and port-binding of the newly created registry, e.g. `--registry-create myregistry.localhost:5001`
|
||||
|
||||
### Fixes
|
||||
|
||||
@ -31,6 +86,7 @@
|
||||
- updated fork of `confd` to make usage of the file backend including a file watcher for auto-reloads
|
||||
- this also checks the config before applying it, so the lb doesn't crash on a faulty config
|
||||
- updating the loadbalancer writes the new config file and also checks if everything's going fine afterwards
|
||||
- some settings of the loadbalancer can now be configured using `--lb-config-override`, see docs at <https://k3d.io/v5.0.0/design/defaults/#k3d-loadbalancer>
|
||||
- helper images can now be set explicitly via environment variables: `K3D_IMAGE_LOADBALANCER` & `K3D_IMAGE_TOOLS` (#638)
|
||||
- concurrently add new nodes to an existing cluster (remove some dumb code) (#640)
|
||||
- `--wait` is now the default for `k3d node create`
|
||||
@ -52,6 +108,7 @@
|
||||
- new config path `options.k3s.extraArgs`
|
||||
- config file: environment variables (`$VAR`, `${VAR}` will be expanded unconditionally) (#643)
|
||||
- docker context support (#601, @developer-guy & #674)
|
||||
- Feature flag using the environment variable `K3D_FIX_DNS` and setting it to a true value (e.g. `export K3D_FIX_DNS=1`) to forward DNS queries to your local machine, e.g. to use your local company DNS
|
||||
|
||||
### Misc
|
||||
|
||||
@ -59,6 +116,28 @@
|
||||
- logs: really final output when creating/deleting nodes (so far, we were not outputting a final success message and the process was still doing stuff) (#640)
|
||||
- tests/e2e: add tests for v1alpha2 to v1alpha3 migration
|
||||
- docs: use v1alpha3 config version
|
||||
- docs: update general appearance and cleanup
|
||||
|
||||
## v4.4.8
|
||||
|
||||
## Enhancements
|
||||
|
||||
- Improved DroneCI Pipeline for Multiarch Images and SemVer Tags (#712)
|
||||
- **Important**: New images will not have the `v` prefix in the tag anymore!
|
||||
- but now real releases will use the "hierarchical" SemVer tags, so you could e.g. subscribe to rancher/k3d-proxy:4 to get v4.x.x images for the proxy container
|
||||
|
||||
## Fixes
|
||||
|
||||
- clusterCreate: do not override hostIP if hostPort is missing (#693, @lukaszo)
|
||||
- imageImport: import all listed images, not only the first one (#701, @mszostok)
|
||||
- clusterCreate: when memory constraints are set, only pull the image used for checking the edac folder, if it's not present on the machine
|
||||
- fix: update k3d-tools dependencies and use API Version Negotiation, so it still works with older versions of the Docker Engine (#679)
|
||||
|
||||
### Misc
|
||||
|
||||
- install script: add darwin/arm64 support (#676, @colelawrence)
|
||||
- docs: fix go install command (#677, @Rots)
|
||||
- docs: add project overview (<https://k3d.io/internals/project/>) (#680)
|
||||
|
||||
## v4.4.7
|
||||
|
||||
|
14
Dockerfile
14
Dockerfile
@ -3,7 +3,7 @@
|
||||
# -> golang image used solely for building the k3d binary #
|
||||
# -> built executable can then be copied into other stages #
|
||||
############################################################
|
||||
FROM golang:1.16 as builder
|
||||
FROM golang:1.17 as builder
|
||||
ARG GIT_TAG_OVERRIDE
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
@ -15,16 +15,20 @@ RUN make build -e GIT_TAG_OVERRIDE=${GIT_TAG_OVERRIDE} && bin/k3d version
|
||||
# -> used e.g. in our CI pipelines for testing #
|
||||
#######################################################
|
||||
FROM docker:20.10-dind as dind
|
||||
ARG OS=linux
|
||||
ARG ARCH=amd64
|
||||
|
||||
# install some basic packages needed for testing, etc.
|
||||
RUN apk update && apk add bash curl sudo jq git make netcat-openbsd
|
||||
RUN echo "building for ${OS}/${ARCH}" && \
|
||||
apk update && \
|
||||
apk add bash curl sudo jq git make netcat-openbsd
|
||||
|
||||
# install kubectl to interact with the k3d cluster
|
||||
RUN curl -L https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl && \
|
||||
RUN curl -L https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/${OS}/${ARCH}/kubectl -o /usr/local/bin/kubectl && \
|
||||
chmod +x /usr/local/bin/kubectl
|
||||
|
||||
# install yq (yaml processor) from source, as the busybox yq had some issues
|
||||
RUN curl -L https://github.com/mikefarah/yq/releases/download/v4.9.6/yq_linux_amd64 -o /usr/bin/yq &&\
|
||||
RUN curl -L https://github.com/mikefarah/yq/releases/download/v4.9.6/yq_${OS}_${ARCH} -o /usr/bin/yq &&\
|
||||
chmod +x /usr/bin/yq
|
||||
COPY --from=builder /app/bin/k3d /bin/k3d
|
||||
|
||||
@ -34,4 +38,4 @@ COPY --from=builder /app/bin/k3d /bin/k3d
|
||||
#########################################
|
||||
FROM scratch as binary-only
|
||||
COPY --from=builder /app/bin/k3d /bin/k3d
|
||||
ENTRYPOINT ["/bin/k3d"]
|
||||
ENTRYPOINT ["/bin/k3d"]
|
||||
|
16
Makefile
16
Makefile
@ -26,8 +26,8 @@ ifeq ($(GIT_TAG),)
|
||||
GIT_TAG := $(shell git describe --always)
|
||||
endif
|
||||
|
||||
# Docker image tag derived from Git tag
|
||||
K3D_IMAGE_TAG := $(GIT_TAG)
|
||||
# Docker image tag derived from Git tag (with prefix "v" stripped off)
|
||||
K3D_IMAGE_TAG := $(GIT_TAG:v%=%)
|
||||
|
||||
# get latest k3s version: grep the tag and replace + with - (difference between git and dockerhub tags)
|
||||
K3S_TAG := $(shell curl --silent "https://update.k3s.io/v1-release/channels/stable" | egrep -o '/v[^ ]+"' | sed -E 's/\/|\"//g' | sed -E 's/\+/\-/')
|
||||
@ -65,7 +65,7 @@ PKG := $(shell go mod vendor)
|
||||
TAGS :=
|
||||
TESTS := ./...
|
||||
TESTFLAGS :=
|
||||
LDFLAGS := -w -s -X github.com/rancher/k3d/v4/version.Version=${GIT_TAG} -X github.com/rancher/k3d/v4/version.K3sVersion=${K3S_TAG}
|
||||
LDFLAGS := -w -s -X github.com/rancher/k3d/v5/version.Version=${GIT_TAG} -X github.com/rancher/k3d/v5/version.K3sVersion=${K3S_TAG}
|
||||
GCFLAGS :=
|
||||
GOFLAGS :=
|
||||
BINDIR := $(CURDIR)/bin
|
||||
@ -74,7 +74,7 @@ BINARIES := k3d
|
||||
# Set version of the k3d helper images for build
|
||||
ifneq ($(K3D_HELPER_VERSION),)
|
||||
$(info [INFO] Helper Image version set to ${K3D_HELPER_VERSION})
|
||||
LDFLAGS += -X github.com/rancher/k3d/v4/version.HelperVersionOverride=${K3D_HELPER_VERSION}
|
||||
LDFLAGS += -X github.com/rancher/k3d/v5/version.HelperVersionOverride=${K3D_HELPER_VERSION}
|
||||
endif
|
||||
|
||||
# Rules for finding all go source files using 'DIRS' and 'REC_DIRS'
|
||||
@ -129,10 +129,10 @@ build-docker-%:
|
||||
|
||||
# build helper images
|
||||
build-helper-images:
|
||||
@echo "Building docker image rancher/k3d-proxy:$(GIT_TAG)"
|
||||
DOCKER_BUILDKIT=1 docker build proxy/ -f proxy/Dockerfile -t rancher/k3d-proxy:$(GIT_TAG)
|
||||
@echo "Building docker image rancher/k3d-tools:$(GIT_TAG)"
|
||||
DOCKER_BUILDKIT=1 docker build --no-cache tools/ -f tools/Dockerfile -t rancher/k3d-tools:$(GIT_TAG) --build-arg GIT_TAG=$(GIT_TAG)
|
||||
@echo "Building docker image rancher/k3d-proxy:$(K3D_IMAGE_TAG)"
|
||||
DOCKER_BUILDKIT=1 docker build proxy/ -f proxy/Dockerfile -t rancher/k3d-proxy:$(K3D_IMAGE_TAG)
|
||||
@echo "Building docker image rancher/k3d-tools:$(K3D_IMAGE_TAG)"
|
||||
DOCKER_BUILDKIT=1 docker build --no-cache tools/ -f tools/Dockerfile -t rancher/k3d-tools:$(K3D_IMAGE_TAG) --build-arg GIT_TAG=$(GIT_TAG)
|
||||
|
||||
##############################
|
||||
########## Cleaning ##########
|
||||
|
21
README.md
21
README.md
@ -4,16 +4,16 @@
|
||||
[](./LICENSE.md)
|
||||

|
||||
|
||||
[](https://pkg.go.dev/github.com/rancher/k3d/v4)
|
||||
[](https://pkg.go.dev/github.com/rancher/k3d/v5)
|
||||
[](./go.mod)
|
||||
[](https://goreportcard.com/report/github.com/rancher/k3d)
|
||||
|
||||
<!-- ALL-CONTRIBUTORS-BADGE:START - Do not remove or modify this section -->
|
||||
[](#contributors-)
|
||||
[](#contributors-)
|
||||
<!-- ALL-CONTRIBUTORS-BADGE:END -->
|
||||
[](code_of_conduct.md)
|
||||
|
||||
**Please Note:** `main` is now v4.0.0 and the code for v3.x can be found in the `main-v3` branch!
|
||||
**Please Note:** `main` is now v5.0.0 and the code for v4.x can be found in the `main-v4` branch!
|
||||
|
||||
## [k3s in docker](https://k3d.io)
|
||||
|
||||
@ -21,7 +21,7 @@ k3s is the lightweight Kubernetes distribution by Rancher: [rancher/k3s](https:/
|
||||
|
||||
k3d creates containerized k3s clusters. This means, that you can spin up a multi-node k3s cluster on a single machine using docker.
|
||||
|
||||
[](https://asciinema.org/a/347570)
|
||||
[](https://asciinema.org/a/436420)
|
||||
|
||||
## Learning
|
||||
|
||||
@ -35,8 +35,9 @@ k3d creates containerized k3s clusters. This means, that you can spin up a multi
|
||||
|
||||
## Releases
|
||||
|
||||
**Note**: In May 2020 we upgraded from v1.7.x to **v3.0.0** after a complete rewrite of k3d!
|
||||
**Note**: In January 2021 we upgraded from v3.x.x to **v4.0.0** which includes some breaking changes!
|
||||
**Note**: In May 2020 we upgraded from v1.7.x to **v3.0.0** after a complete rewrite of k3d!
|
||||
**Note**: In January 2021 we upgraded from v3.x.x to **v4.0.0** which includes some breaking changes!
|
||||
**Note**: In September 2021 we upgraded from v4.4.8 to **v5.0.0** which includes some breaking changes!
|
||||
|
||||
| Platform | Stage | Version | Release Date | |
|
||||
|-----------------|--------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|---|
|
||||
@ -53,8 +54,8 @@ You have several options there:
|
||||
- wget: `wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash`
|
||||
- curl: `curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash`
|
||||
- use the install script to grab a specific release (via `TAG` environment variable):
|
||||
- wget: `wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v4.0.0 bash`
|
||||
- curl: `curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v4.0.0 bash`
|
||||
- wget: `wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v5.0.0 bash`
|
||||
- curl: `curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v5.0.0 bash`
|
||||
|
||||
- use [Homebrew](https://brew.sh): `brew install k3d` (Homebrew is available for MacOS and Linux)
|
||||
- Formula can be found in [homebrew/homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/k3d.rb) and is mirrored to [homebrew/linuxbrew-core](https://github.com/Homebrew/linuxbrew-core/blob/master/Formula/k3d.rb)
|
||||
@ -69,7 +70,7 @@ or...
|
||||
|
||||
## Build
|
||||
|
||||
1. Clone this repo, e.g. via `git clone git@github.com:rancher/k3d.git` or `go get github.com/rancher/k3d/v4@main`
|
||||
1. Clone this repo, e.g. via `git clone git@github.com:rancher/k3d.git` or `go get github.com/rancher/k3d/v5@main`
|
||||
2. Inside the repo run
|
||||
- 'make install-tools' to make sure required go packages are installed
|
||||
3. Inside the repo run one of the following commands
|
||||
@ -139,6 +140,8 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
|
||||
<td align="center"><a href="http://wsl.dev"><img src="https://avatars2.githubusercontent.com/u/905874?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Nuno do Carmo</b></sub></a><br /><a href="#content-nunix" title="Content">🖋</a> <a href="#tutorial-nunix" title="Tutorials">✅</a> <a href="#question-nunix" title="Answering Questions">💬</a></td>
|
||||
<td align="center"><a href="https://github.com/erwinkersten"><img src="https://avatars0.githubusercontent.com/u/4391121?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Erwin Kersten</b></sub></a><br /><a href="https://github.com/rancher/k3d/commits?author=erwinkersten" title="Documentation">📖</a></td>
|
||||
<td align="center"><a href="http://www.alexsears.com"><img src="https://avatars.githubusercontent.com/u/3712883?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Alex Sears</b></sub></a><br /><a href="https://github.com/rancher/k3d/commits?author=searsaw" title="Documentation">📖</a></td>
|
||||
<td align="center"><a href="http://shanduur.github.io"><img src="https://avatars.githubusercontent.com/u/32583062?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Mateusz Urbanek</b></sub></a><br /><a href="https://github.com/rancher/k3d/commits?author=Shanduur" title="Code">💻</a></td>
|
||||
<td align="center"><a href="https://github.com/benjaminjb"><img src="https://avatars.githubusercontent.com/u/4651855?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Benjamin Blattberg</b></sub></a><br /><a href="https://github.com/rancher/k3d/commits?author=benjaminjb" title="Code">💻</a></td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
@ -22,7 +22,8 @@ THE SOFTWARE.
|
||||
package cluster
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -36,19 +37,19 @@ func NewCmdCluster() *cobra.Command {
|
||||
Long: `Manage cluster(s)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
log.Errorln("Couldn't get help text")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
cmd.AddCommand(NewCmdClusterCreate())
|
||||
cmd.AddCommand(NewCmdClusterStart())
|
||||
cmd.AddCommand(NewCmdClusterStop())
|
||||
cmd.AddCommand(NewCmdClusterDelete())
|
||||
cmd.AddCommand(NewCmdClusterList())
|
||||
cmd.AddCommand(NewCmdClusterEdit())
|
||||
cmd.AddCommand(NewCmdClusterCreate(),
|
||||
NewCmdClusterStart(),
|
||||
NewCmdClusterStop(),
|
||||
NewCmdClusterDelete(),
|
||||
NewCmdClusterList(),
|
||||
NewCmdClusterEdit())
|
||||
|
||||
// add flags
|
||||
|
||||
|
@ -24,28 +24,28 @@ package cluster
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
cliutil "github.com/rancher/k3d/v4/cmd/util"
|
||||
k3dCluster "github.com/rancher/k3d/v4/pkg/client"
|
||||
"github.com/rancher/k3d/v4/pkg/config"
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v4/version"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
cliutil "github.com/rancher/k3d/v5/cmd/util"
|
||||
cliconfig "github.com/rancher/k3d/v5/cmd/util/config"
|
||||
k3dCluster "github.com/rancher/k3d/v5/pkg/client"
|
||||
"github.com/rancher/k3d/v5/pkg/config"
|
||||
conf "github.com/rancher/k3d/v5/pkg/config/v1alpha3"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/rancher/k3d/v5/version"
|
||||
)
|
||||
|
||||
var configFile string
|
||||
@ -58,74 +58,30 @@ Every cluster will consist of one or more containers:
|
||||
- (optionally) 1 (or more) agent node containers (k3s)
|
||||
`
|
||||
|
||||
var cfgViper = viper.New()
|
||||
var ppViper = viper.New()
|
||||
/*
|
||||
* Viper for configuration handling
|
||||
* we use two different instances of Viper here to handle
|
||||
* - cfgViper: "static" configuration
|
||||
* - ppViper: "pre-processed" configuration, where CLI input has to be pre-processed
|
||||
* to be treated as part of the SImpleConfig
|
||||
*/
|
||||
var (
|
||||
cfgViper = viper.New()
|
||||
ppViper = viper.New()
|
||||
)
|
||||
|
||||
func initConfig() {
|
||||
func initConfig() error {
|
||||
|
||||
// Viper for pre-processed config options
|
||||
ppViper.SetEnvPrefix("K3D")
|
||||
|
||||
// viper for the general config (file, env and non pre-processed flags)
|
||||
cfgViper.SetEnvPrefix("K3D")
|
||||
cfgViper.AutomaticEnv()
|
||||
if l.Log().GetLevel() >= logrus.DebugLevel {
|
||||
|
||||
cfgViper.SetConfigType("yaml")
|
||||
|
||||
// Set config file, if specified
|
||||
if configFile != "" {
|
||||
|
||||
if _, err := os.Stat(configFile); err != nil {
|
||||
log.Fatalf("Failed to stat config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
// create temporary file to expand environment variables in the config without writing that back to the original file
|
||||
// we're doing it here, because this happens just before absolutely all other processing
|
||||
tmpfile, err := os.CreateTemp(os.TempDir(), fmt.Sprintf("k3d-config-tmp-%s", filepath.Base(configFile)))
|
||||
if err != nil {
|
||||
log.Fatalf("error creating temp copy of configfile %s for variable expansion: %v", configFile, err)
|
||||
}
|
||||
defer tmpfile.Close()
|
||||
|
||||
originalcontent, err := ioutil.ReadFile(configFile)
|
||||
if err != nil {
|
||||
log.Fatalf("error reading config file %s: %v", configFile, err)
|
||||
}
|
||||
expandedcontent := os.ExpandEnv(string(originalcontent))
|
||||
if _, err := tmpfile.WriteString(expandedcontent); err != nil {
|
||||
log.Fatalf("error writing expanded config file contents to temp file %s: %v", tmpfile.Name(), err)
|
||||
}
|
||||
|
||||
// use temp file with expanded variables
|
||||
cfgViper.SetConfigFile(tmpfile.Name())
|
||||
|
||||
// try to read config into memory (viper map structure)
|
||||
if err := cfgViper.ReadInConfig(); err != nil {
|
||||
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
|
||||
log.Fatalf("Config file %s not found: %+v", configFile, err)
|
||||
}
|
||||
// config file found but some other error happened
|
||||
log.Fatalf("Failed to read config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
schema, err := config.GetSchemaByVersion(cfgViper.GetString("apiVersion"))
|
||||
if err != nil {
|
||||
log.Fatalf("Cannot validate config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
if err := config.ValidateSchemaFile(configFile, schema); err != nil {
|
||||
log.Fatalf("Schema Validation failed for config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
log.Infof("Using config file %s (%s#%s)", configFile, strings.ToLower(cfgViper.GetString("apiVersion")), strings.ToLower(cfgViper.GetString("kind")))
|
||||
c, _ := yaml.Marshal(ppViper.AllSettings())
|
||||
l.Log().Debugf("Additional CLI Configuration:\n%s", c)
|
||||
}
|
||||
if log.GetLevel() >= log.DebugLevel {
|
||||
c, _ := yaml.Marshal(cfgViper.AllSettings())
|
||||
log.Debugf("Configuration:\n%s", c)
|
||||
|
||||
c, _ = yaml.Marshal(ppViper.AllSettings())
|
||||
log.Debugf("Additional CLI Configuration:\n%s", c)
|
||||
}
|
||||
return cliconfig.InitViperWithConfigFile(cfgViper, configFile)
|
||||
}
|
||||
|
||||
// NewCmdClusterCreate returns a new cobra command
|
||||
@ -138,8 +94,7 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
Long: clusterCreateDescription,
|
||||
Args: cobra.RangeArgs(0, 1), // exactly one cluster name can be set (default: k3d.DefaultClusterName)
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
initConfig()
|
||||
return nil
|
||||
return initConfig()
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
@ -154,27 +109,27 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
}
|
||||
cfg, err := config.FromViper(cfgViper)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
if cfg.GetAPIVersion() != config.DefaultConfigApiVersion {
|
||||
log.Warnf("Default config apiVersion is '%s', but you're using '%s': consider migrating.", config.DefaultConfigApiVersion, cfg.GetAPIVersion())
|
||||
l.Log().Warnf("Default config apiVersion is '%s', but you're using '%s': consider migrating.", config.DefaultConfigApiVersion, cfg.GetAPIVersion())
|
||||
cfg, err = config.Migrate(cfg, config.DefaultConfigApiVersion)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
simpleCfg := cfg.(conf.SimpleConfig)
|
||||
|
||||
log.Debugf("========== Simple Config ==========\n%+v\n==========================\n", simpleCfg)
|
||||
l.Log().Debugf("========== Simple Config ==========\n%+v\n==========================\n", simpleCfg)
|
||||
|
||||
simpleCfg, err = applyCLIOverrides(simpleCfg)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to apply CLI overrides: %+v", err)
|
||||
l.Log().Fatalf("Failed to apply CLI overrides: %+v", err)
|
||||
}
|
||||
|
||||
log.Debugf("========== Merged Simple Config ==========\n%+v\n==========================\n", simpleCfg)
|
||||
l.Log().Debugf("========== Merged Simple Config ==========\n%+v\n==========================\n", simpleCfg)
|
||||
|
||||
/**************************************
|
||||
* Transform, Process & Validate Configuration *
|
||||
@ -187,18 +142,18 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
|
||||
clusterConfig, err := config.TransformSimpleToClusterConfig(cmd.Context(), runtimes.SelectedRuntime, simpleCfg)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
log.Debugf("===== Merged Cluster Config =====\n%+v\n===== ===== =====\n", clusterConfig)
|
||||
l.Log().Debugf("===== Merged Cluster Config =====\n%+v\n===== ===== =====\n", clusterConfig)
|
||||
|
||||
clusterConfig, err = config.ProcessClusterConfig(*clusterConfig)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
log.Debugf("===== Processed Cluster Config =====\n%+v\n===== ===== =====\n", clusterConfig)
|
||||
l.Log().Debugf("===== Processed Cluster Config =====\n%+v\n===== ===== =====\n", clusterConfig)
|
||||
|
||||
if err := config.ValidateClusterConfig(cmd.Context(), runtimes.SelectedRuntime, *clusterConfig); err != nil {
|
||||
log.Fatalln("Failed Cluster Configuration Validation: ", err)
|
||||
l.Log().Fatalln("Failed Cluster Configuration Validation: ", err)
|
||||
}
|
||||
|
||||
/**************************************
|
||||
@ -207,44 +162,44 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
|
||||
// check if a cluster with that name exists already
|
||||
if _, err := k3dCluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster); err == nil {
|
||||
log.Fatalf("Failed to create cluster '%s' because a cluster with that name already exists", clusterConfig.Cluster.Name)
|
||||
l.Log().Fatalf("Failed to create cluster '%s' because a cluster with that name already exists", clusterConfig.Cluster.Name)
|
||||
}
|
||||
|
||||
// create cluster
|
||||
if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig {
|
||||
log.Debugln("'--kubeconfig-update-default set: enabling wait-for-server")
|
||||
l.Log().Debugln("'--kubeconfig-update-default set: enabling wait-for-server")
|
||||
clusterConfig.ClusterCreateOpts.WaitForServer = true
|
||||
}
|
||||
//if err := k3dCluster.ClusterCreate(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster, &clusterConfig.ClusterCreateOpts); err != nil {
|
||||
if err := k3dCluster.ClusterRun(cmd.Context(), runtimes.SelectedRuntime, clusterConfig); err != nil {
|
||||
// rollback if creation failed
|
||||
log.Errorln(err)
|
||||
l.Log().Errorln(err)
|
||||
if simpleCfg.Options.K3dOptions.NoRollback { // TODO: move rollback mechanics to pkg/
|
||||
log.Fatalln("Cluster creation FAILED, rollback deactivated.")
|
||||
l.Log().Fatalln("Cluster creation FAILED, rollback deactivated.")
|
||||
}
|
||||
// rollback if creation failed
|
||||
log.Errorln("Failed to create cluster >>> Rolling Back")
|
||||
l.Log().Errorln("Failed to create cluster >>> Rolling Back")
|
||||
if err := k3dCluster.ClusterDelete(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster, k3d.ClusterDeleteOpts{SkipRegistryCheck: true}); err != nil {
|
||||
log.Errorln(err)
|
||||
log.Fatalln("Cluster creation FAILED, also FAILED to rollback changes!")
|
||||
l.Log().Errorln(err)
|
||||
l.Log().Fatalln("Cluster creation FAILED, also FAILED to rollback changes!")
|
||||
}
|
||||
log.Fatalln("Cluster creation FAILED, all changes have been rolled back!")
|
||||
l.Log().Fatalln("Cluster creation FAILED, all changes have been rolled back!")
|
||||
}
|
||||
log.Infof("Cluster '%s' created successfully!", clusterConfig.Cluster.Name)
|
||||
l.Log().Infof("Cluster '%s' created successfully!", clusterConfig.Cluster.Name)
|
||||
|
||||
/**************
|
||||
* Kubeconfig *
|
||||
**************/
|
||||
|
||||
if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig && clusterConfig.KubeconfigOpts.SwitchCurrentContext {
|
||||
log.Infoln("--kubeconfig-update-default=false --> sets --kubeconfig-switch-context=false")
|
||||
if !clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig && clusterConfig.KubeconfigOpts.SwitchCurrentContext {
|
||||
l.Log().Infoln("--kubeconfig-update-default=false --> sets --kubeconfig-switch-context=false")
|
||||
clusterConfig.KubeconfigOpts.SwitchCurrentContext = false
|
||||
}
|
||||
|
||||
if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig {
|
||||
log.Debugf("Updating default kubeconfig with a new context for cluster %s", clusterConfig.Cluster.Name)
|
||||
l.Log().Debugf("Updating default kubeconfig with a new context for cluster %s", clusterConfig.Cluster.Name)
|
||||
if _, err := k3dCluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster, "", &k3dCluster.WriteKubeConfigOptions{UpdateExisting: true, OverwriteExisting: false, UpdateCurrentContext: simpleCfg.Options.KubeconfigOptions.SwitchCurrentContext}); err != nil {
|
||||
log.Warningln(err)
|
||||
l.Log().Warningln(err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -253,7 +208,7 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
*****************/
|
||||
|
||||
// print information on how to use the cluster with kubectl
|
||||
log.Infoln("You can now use it like this:")
|
||||
l.Log().Infoln("You can now use it like this:")
|
||||
if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig && !clusterConfig.KubeconfigOpts.SwitchCurrentContext {
|
||||
fmt.Printf("kubectl config use-context %s\n", fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, clusterConfig.Cluster.Name))
|
||||
} else if !clusterConfig.KubeconfigOpts.SwitchCurrentContext {
|
||||
@ -273,7 +228,7 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
|
||||
cmd.Flags().StringVarP(&configFile, "config", "c", "", "Path of a config file to use")
|
||||
if err := cmd.MarkFlagFilename("config", "yaml", "yml"); err != nil {
|
||||
log.Fatalln("Failed to mark flag 'config' as filename flag")
|
||||
l.Log().Fatalln("Failed to mark flag 'config' as filename flag")
|
||||
}
|
||||
|
||||
/***********************
|
||||
@ -310,9 +265,12 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
cmd.Flags().StringArrayP("runtime-label", "", nil, "Add label to container runtime (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 --runtime-label \"my.label@agent:0,1\" --runtime-label \"other.label=somevalue@server:0\"`")
|
||||
_ = ppViper.BindPFlag("cli.runtime-labels", cmd.Flags().Lookup("runtime-label"))
|
||||
|
||||
cmd.Flags().String("registry-create", "", "Create a k3d-managed registry and connect it to the cluster (Format: `NAME[:HOST][:HOSTPORT]`\n - Example: `k3d cluster create --registry-create mycluster-registry:0.0.0.0:5432`")
|
||||
_ = ppViper.BindPFlag("cli.registries.create", cmd.Flags().Lookup("registry-create"))
|
||||
|
||||
/* k3s */
|
||||
cmd.Flags().StringArray("k3s-arg", nil, "Additional args passed to k3s command (Format: `ARG@NODEFILTER[;@NODEFILTER]`)\n - Example: `k3d cluster create --k3s-arg \"--disable=traefik@server:0\"")
|
||||
_ = cfgViper.BindPFlag("cli.k3sargs", cmd.Flags().Lookup("k3s-arg"))
|
||||
_ = ppViper.BindPFlag("cli.k3sargs", cmd.Flags().Lookup("k3s-arg"))
|
||||
|
||||
/******************
|
||||
* "Normal" Flags *
|
||||
@ -362,9 +320,6 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
cmd.Flags().Bool("no-rollback", false, "Disable the automatic rollback actions, if anything goes wrong")
|
||||
_ = cfgViper.BindPFlag("options.k3d.disablerollback", cmd.Flags().Lookup("no-rollback"))
|
||||
|
||||
cmd.Flags().Bool("no-hostip", false, "Disable the automatic injection of the Host IP as 'host.k3d.internal' into the containers and CoreDNS")
|
||||
_ = cfgViper.BindPFlag("options.k3d.disablehostipinjection", cmd.Flags().Lookup("no-hostip"))
|
||||
|
||||
cmd.Flags().String("gpus", "", "GPU devices to add to the cluster node containers ('all' to pass all GPUs) [From docker]")
|
||||
_ = cfgViper.BindPFlag("options.runtime.gpurequest", cmd.Flags().Lookup("gpus"))
|
||||
|
||||
@ -382,15 +337,16 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
cmd.Flags().StringArray("registry-use", nil, "Connect to one or more k3d-managed registries running locally")
|
||||
_ = cfgViper.BindPFlag("registries.use", cmd.Flags().Lookup("registry-use"))
|
||||
|
||||
cmd.Flags().Bool("registry-create", false, "Create a k3d-managed registry and connect it to the cluster")
|
||||
_ = cfgViper.BindPFlag("registries.create", cmd.Flags().Lookup("registry-create"))
|
||||
|
||||
cmd.Flags().String("registry-config", "", "Specify path to an extra registries.yaml file")
|
||||
_ = cfgViper.BindPFlag("registries.config", cmd.Flags().Lookup("registry-config"))
|
||||
if err := cmd.MarkFlagFilename("registry-config", "yaml", "yml"); err != nil {
|
||||
log.Fatalln("Failed to mark flag 'config' as filename flag")
|
||||
l.Log().Fatalln("Failed to mark flag 'config' as filename flag")
|
||||
}
|
||||
|
||||
/* Loadbalancer / Proxy */
|
||||
cmd.Flags().StringSlice("lb-config-override", nil, "Use dotted YAML path syntax to override nginx loadbalancer settings")
|
||||
_ = cfgViper.BindPFlag("options.k3d.loadbalancer.configoverrides", cmd.Flags().Lookup("lb-config-override"))
|
||||
|
||||
/* Subcommands */
|
||||
|
||||
// done
|
||||
@ -424,20 +380,25 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
// Overwrite if cli arg is set
|
||||
if ppViper.IsSet("cli.api-port") {
|
||||
if cfg.ExposeAPI.HostPort != "" {
|
||||
log.Debugf("Overriding pre-defined kubeAPI Exposure Spec %+v with CLI argument %s", cfg.ExposeAPI, ppViper.GetString("cli.api-port"))
|
||||
l.Log().Debugf("Overriding pre-defined kubeAPI Exposure Spec %+v with CLI argument %s", cfg.ExposeAPI, ppViper.GetString("cli.api-port"))
|
||||
}
|
||||
exposeAPI, err = cliutil.ParsePortExposureSpec(ppViper.GetString("cli.api-port"), k3d.DefaultAPIPort)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
return cfg, fmt.Errorf("failed to parse API Port spec: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Set to random port if port is empty string
|
||||
if len(exposeAPI.Binding.HostPort) == 0 {
|
||||
exposeAPI, err = cliutil.ParsePortExposureSpec("random", k3d.DefaultAPIPort)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
var freePort string
|
||||
port, err := cliutil.GetFreePort()
|
||||
freePort = strconv.Itoa(port)
|
||||
if err != nil || port == 0 {
|
||||
l.Log().Warnf("Failed to get random free port: %+v", err)
|
||||
l.Log().Warnf("Falling back to internal port %s (may be blocked though)...", k3d.DefaultAPIPort)
|
||||
freePort = k3d.DefaultAPIPort
|
||||
}
|
||||
exposeAPI.Binding.HostPort = freePort
|
||||
}
|
||||
|
||||
cfg.ExposeAPI = conf.SimpleExposureOpts{
|
||||
@ -454,11 +415,11 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
// split node filter from the specified volume
|
||||
volume, filters, err := cliutil.SplitFiltersFromFlag(volumeFlag)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
if strings.Contains(volume, k3d.DefaultRegistriesFilePath) && (cfg.Registries.Create || cfg.Registries.Config != "" || len(cfg.Registries.Use) != 0) {
|
||||
log.Warnf("Seems like you're mounting a file at '%s' while also using a referenced registries config or k3d-managed registries: Your mounted file will probably be overwritten!", k3d.DefaultRegistriesFilePath)
|
||||
if strings.Contains(volume, k3d.DefaultRegistriesFilePath) && (cfg.Registries.Create != nil || cfg.Registries.Config != "" || len(cfg.Registries.Use) != 0) {
|
||||
l.Log().Warnf("Seems like you're mounting a file at '%s' while also using a referenced registries config or k3d-managed registries: Your mounted file will probably be overwritten!", k3d.DefaultRegistriesFilePath)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
@ -476,7 +437,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
})
|
||||
}
|
||||
|
||||
log.Tracef("VolumeFilterMap: %+v", volumeFilterMap)
|
||||
l.Log().Tracef("VolumeFilterMap: %+v", volumeFilterMap)
|
||||
|
||||
// -> PORTS
|
||||
portFilterMap := make(map[string][]string, 1)
|
||||
@ -484,12 +445,12 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
// split node filter from the specified volume
|
||||
portmap, filters, err := cliutil.SplitFiltersFromFlag(portFlag)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
if _, exists := portFilterMap[portmap]; exists {
|
||||
log.Fatalln("Same Portmapping can not be used for multiple nodes")
|
||||
l.Log().Fatalln("Same Portmapping can not be used for multiple nodes")
|
||||
} else {
|
||||
portFilterMap[portmap] = filters
|
||||
}
|
||||
@ -502,7 +463,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
})
|
||||
}
|
||||
|
||||
log.Tracef("PortFilterMap: %+v", portFilterMap)
|
||||
l.Log().Tracef("PortFilterMap: %+v", portFilterMap)
|
||||
|
||||
// --k3s-node-label
|
||||
// k3sNodeLabelFilterMap will add k3s node label to applied node filters
|
||||
@ -512,7 +473,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
// split node filter from the specified label
|
||||
label, nodeFilters, err := cliutil.SplitFiltersFromFlag(labelFlag)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
@ -530,7 +491,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
})
|
||||
}
|
||||
|
||||
log.Tracef("K3sNodeLabelFilterMap: %+v", k3sNodeLabelFilterMap)
|
||||
l.Log().Tracef("K3sNodeLabelFilterMap: %+v", k3sNodeLabelFilterMap)
|
||||
|
||||
// --runtime-label
|
||||
// runtimeLabelFilterMap will add container runtime label to applied node filters
|
||||
@ -540,7 +501,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
// split node filter from the specified label
|
||||
label, nodeFilters, err := cliutil.SplitFiltersFromFlag(labelFlag)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
cliutil.ValidateRuntimeLabelKey(strings.Split(label, "=")[0])
|
||||
@ -560,7 +521,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
})
|
||||
}
|
||||
|
||||
log.Tracef("RuntimeLabelFilterMap: %+v", runtimeLabelFilterMap)
|
||||
l.Log().Tracef("RuntimeLabelFilterMap: %+v", runtimeLabelFilterMap)
|
||||
|
||||
// --env
|
||||
// envFilterMap will add container env vars to applied node filters
|
||||
@ -570,7 +531,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
// split node filter from the specified env var
|
||||
env, filters, err := cliutil.SplitFiltersFromFlag(envFlag)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
@ -588,7 +549,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
})
|
||||
}
|
||||
|
||||
log.Tracef("EnvFilterMap: %+v", envFilterMap)
|
||||
l.Log().Tracef("EnvFilterMap: %+v", envFilterMap)
|
||||
|
||||
// --k3s-arg
|
||||
argFilterMap := make(map[string][]string, 1)
|
||||
@ -597,7 +558,7 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
// split node filter from the specified arg
|
||||
arg, filters, err := cliutil.SplitFiltersFromFlag(argFlag)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
@ -615,5 +576,24 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// --registry-create
|
||||
if ppViper.IsSet("cli.registries.create") {
|
||||
flagvalue := ppViper.GetString("cli.registries.create")
|
||||
fvSplit := strings.SplitN(flagvalue, ":", 2)
|
||||
if cfg.Registries.Create == nil {
|
||||
cfg.Registries.Create = &conf.SimpleConfigRegistryCreateConfig{}
|
||||
}
|
||||
cfg.Registries.Create.Name = fvSplit[0]
|
||||
if len(fvSplit) > 1 {
|
||||
exposeAPI, err = cliutil.ParsePortExposureSpec(fvSplit[1], "1234") // internal port is unused after all
|
||||
if err != nil {
|
||||
return cfg, fmt.Errorf("failed to registry port spec: %w", err)
|
||||
}
|
||||
cfg.Registries.Create.Host = exposeAPI.Host
|
||||
cfg.Registries.Create.HostPort = exposeAPI.Binding.HostPort
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
@ -26,16 +26,21 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
k3dutil "github.com/rancher/k3d/v4/pkg/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
cliconfig "github.com/rancher/k3d/v5/cmd/util/config"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
k3dutil "github.com/rancher/k3d/v5/pkg/util"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var clusterDeleteConfigFile string
|
||||
var clusterDeleteCfgViper = viper.New()
|
||||
|
||||
// NewCmdClusterDelete returns a new cobra command
|
||||
func NewCmdClusterDelete() *cobra.Command {
|
||||
|
||||
@ -47,35 +52,38 @@ func NewCmdClusterDelete() *cobra.Command {
|
||||
Long: `Delete cluster(s).`,
|
||||
Args: cobra.MinimumNArgs(0), // 0 or n arguments; 0 = default cluster name
|
||||
ValidArgsFunction: util.ValidArgsAvailableClusters,
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cliconfig.InitViperWithConfigFile(clusterDeleteCfgViper, clusterDeleteConfigFile)
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
clusters := parseDeleteClusterCmd(cmd, args)
|
||||
|
||||
if len(clusters) == 0 {
|
||||
log.Infoln("No clusters found")
|
||||
l.Log().Infoln("No clusters found")
|
||||
} else {
|
||||
for _, c := range clusters {
|
||||
if err := client.ClusterDelete(cmd.Context(), runtimes.SelectedRuntime, c, k3d.ClusterDeleteOpts{SkipRegistryCheck: false}); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
log.Infoln("Removing cluster details from default kubeconfig...")
|
||||
l.Log().Infoln("Removing cluster details from default kubeconfig...")
|
||||
if err := client.KubeconfigRemoveClusterFromDefaultConfig(cmd.Context(), c); err != nil {
|
||||
log.Warnln("Failed to remove cluster details from default kubeconfig")
|
||||
log.Warnln(err)
|
||||
l.Log().Warnln("Failed to remove cluster details from default kubeconfig")
|
||||
l.Log().Warnln(err)
|
||||
}
|
||||
log.Infoln("Removing standalone kubeconfig file (if there is one)...")
|
||||
l.Log().Infoln("Removing standalone kubeconfig file (if there is one)...")
|
||||
configDir, err := k3dutil.GetConfigDirOrCreate()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to delete kubeconfig file: %+v", err)
|
||||
l.Log().Warnf("Failed to delete kubeconfig file: %+v", err)
|
||||
} else {
|
||||
kubeconfigfile := path.Join(configDir, fmt.Sprintf("kubeconfig-%s.yaml", c.Name))
|
||||
if err := os.Remove(kubeconfigfile); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
log.Warnf("Failed to delete kubeconfig file '%s'", kubeconfigfile)
|
||||
l.Log().Warnf("Failed to delete kubeconfig file '%s'", kubeconfigfile)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Successfully deleted cluster %s!", c.Name)
|
||||
l.Log().Infof("Successfully deleted cluster %s!", c.Name)
|
||||
}
|
||||
}
|
||||
|
||||
@ -87,6 +95,15 @@ func NewCmdClusterDelete() *cobra.Command {
|
||||
// add flags
|
||||
cmd.Flags().BoolP("all", "a", false, "Delete all existing clusters")
|
||||
|
||||
/***************
|
||||
* Config File *
|
||||
***************/
|
||||
|
||||
cmd.Flags().StringVarP(&clusterDeleteConfigFile, "config", "c", "", "Path of a config file to use")
|
||||
if err := cmd.MarkFlagFilename("config", "yaml", "yml"); err != nil {
|
||||
l.Log().Fatalln("Failed to mark flag 'config' as filename flag")
|
||||
}
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
@ -94,20 +111,45 @@ func NewCmdClusterDelete() *cobra.Command {
|
||||
// parseDeleteClusterCmd parses the command input into variables required to delete clusters
|
||||
func parseDeleteClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
|
||||
// --all
|
||||
var clusters []*k3d.Cluster
|
||||
|
||||
if all, err := cmd.Flags().GetBool("all"); err != nil {
|
||||
log.Fatalln(err)
|
||||
} else if all {
|
||||
log.Infoln("Deleting all clusters...")
|
||||
// --all
|
||||
all, err := cmd.Flags().GetBool("all")
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// --config
|
||||
if clusterDeleteConfigFile != "" {
|
||||
// not allowed with --all or more args
|
||||
if len(args) > 0 || all {
|
||||
l.Log().Fatalln("failed to delete cluster: cannot use `--config` flag with additional arguments or `--all`")
|
||||
}
|
||||
|
||||
if clusterDeleteCfgViper.GetString("name") == "" {
|
||||
l.Log().Fatalln("failed to delete cluster via config file: no name in config file")
|
||||
}
|
||||
|
||||
c, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterDeleteCfgViper.GetString("name")})
|
||||
if err != nil {
|
||||
l.Log().Fatalf("failed to delete cluster '%s': %v", clusterDeleteCfgViper.GetString("name"), err)
|
||||
}
|
||||
|
||||
clusters = append(clusters, c)
|
||||
return clusters
|
||||
}
|
||||
|
||||
// --all was set
|
||||
if all {
|
||||
l.Log().Infoln("Deleting all clusters...")
|
||||
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
return clusters
|
||||
}
|
||||
|
||||
// args only
|
||||
clusternames := []string{k3d.DefaultClusterName}
|
||||
if len(args) != 0 {
|
||||
clusternames = args
|
||||
@ -119,7 +161,7 @@ func parseDeleteClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
if err == client.ClusterGetNoNodesFoundError {
|
||||
continue
|
||||
}
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, c)
|
||||
}
|
||||
|
@ -22,13 +22,13 @@ THE SOFTWARE.
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
cliutil "github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
cliutil "github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
conf "github.com/rancher/k3d/v5/pkg/config/v1alpha3"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -47,13 +47,13 @@ func NewCmdClusterEdit() *cobra.Command {
|
||||
|
||||
existingCluster, changeset := parseEditClusterCmd(cmd, args)
|
||||
|
||||
log.Debugf("===== Current =====\n%+v\n===== Changeset =====\n%+v\n", existingCluster, changeset)
|
||||
l.Log().Debugf("===== Current =====\n%+v\n===== Changeset =====\n%+v\n", existingCluster, changeset)
|
||||
|
||||
if err := client.ClusterEditChangesetSimple(cmd.Context(), runtimes.SelectedRuntime, existingCluster, changeset); err != nil {
|
||||
log.Fatalf("Failed to update the cluster: %v", err)
|
||||
l.Log().Fatalf("Failed to update the cluster: %v", err)
|
||||
}
|
||||
|
||||
log.Infof("Successfully updated %s", existingCluster.Name)
|
||||
l.Log().Infof("Successfully updated %s", existingCluster.Name)
|
||||
|
||||
},
|
||||
}
|
||||
@ -72,11 +72,11 @@ func parseEditClusterCmd(cmd *cobra.Command, args []string) (*k3d.Cluster, *conf
|
||||
|
||||
existingCluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: args[0]})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
if existingCluster == nil {
|
||||
log.Infof("Cluster %s not found", args[0])
|
||||
l.Log().Infof("Cluster %s not found", args[0])
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -87,7 +87,7 @@ func parseEditClusterCmd(cmd *cobra.Command, args []string) (*k3d.Cluster, *conf
|
||||
*/
|
||||
portFlags, err := cmd.Flags().GetStringArray("port-add")
|
||||
if err != nil {
|
||||
log.Errorln(err)
|
||||
l.Log().Errorln(err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -100,12 +100,12 @@ func parseEditClusterCmd(cmd *cobra.Command, args []string) (*k3d.Cluster, *conf
|
||||
// split node filter from the specified volume
|
||||
portmap, filters, err := cliutil.SplitFiltersFromFlag(portFlag)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
if _, exists := portFilterMap[portmap]; exists {
|
||||
log.Fatalln("Same Portmapping can not be used for multiple nodes")
|
||||
l.Log().Fatalln("Same Portmapping can not be used for multiple nodes")
|
||||
} else {
|
||||
portFilterMap[portmap] = filters
|
||||
}
|
||||
@ -118,7 +118,7 @@ func parseEditClusterCmd(cmd *cobra.Command, args []string) (*k3d.Cluster, *conf
|
||||
})
|
||||
}
|
||||
|
||||
log.Tracef("PortFilterMap: %+v", portFilterMap)
|
||||
l.Log().Tracef("PortFilterMap: %+v", portFilterMap)
|
||||
|
||||
return existingCluster, &changeset
|
||||
}
|
||||
|
@ -28,15 +28,14 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
k3cluster "github.com/rancher/k3d/v4/pkg/client"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
k3cluster "github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/liggitt/tabwriter"
|
||||
)
|
||||
|
||||
@ -83,14 +82,14 @@ func buildClusterList(ctx context.Context, args []string) []*k3d.Cluster {
|
||||
// cluster name not specified : get all clusters
|
||||
clusters, err = k3cluster.ClusterList(ctx, runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
for _, clusterName := range args {
|
||||
// cluster name specified : get specific cluster
|
||||
retrievedCluster, err := k3cluster.ClusterGet(ctx, runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, retrievedCluster)
|
||||
}
|
||||
@ -126,7 +125,7 @@ func PrintClusters(clusters []*k3d.Cluster, flags clusterFlags) {
|
||||
}
|
||||
_, err := fmt.Fprintf(tabwriter, "%s\n", strings.Join(headers, "\t"))
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to print headers")
|
||||
l.Log().Fatalln("Failed to print headers")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -24,21 +24,22 @@ package cluster
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
)
|
||||
|
||||
// NewCmdClusterStart returns a new cobra command
|
||||
func NewCmdClusterStart() *cobra.Command {
|
||||
|
||||
startClusterOpts := types.ClusterStartOpts{}
|
||||
startClusterOpts := types.ClusterStartOpts{
|
||||
Intent: k3d.IntentClusterStart,
|
||||
}
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
@ -49,12 +50,18 @@ func NewCmdClusterStart() *cobra.Command {
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
clusters := parseStartClusterCmd(cmd, args)
|
||||
if len(clusters) == 0 {
|
||||
log.Infoln("No clusters found")
|
||||
l.Log().Infoln("No clusters found")
|
||||
} else {
|
||||
for _, c := range clusters {
|
||||
if err := client.ClusterStart(cmd.Context(), runtimes.SelectedRuntime, c, startClusterOpts); err != nil {
|
||||
log.Fatalln(err)
|
||||
envInfo, err := client.GatherEnvironmentInfo(cmd.Context(), runtimes.SelectedRuntime, c)
|
||||
if err != nil {
|
||||
l.Log().Fatalf("failed to gather info about cluster environment: %v", err)
|
||||
}
|
||||
startClusterOpts.EnvironmentInfo = envInfo
|
||||
if err := client.ClusterStart(cmd.Context(), runtimes.SelectedRuntime, c, startClusterOpts); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
l.Log().Infof("Started cluster '%s'", c.Name)
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -77,11 +84,11 @@ func parseStartClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
var clusters []*k3d.Cluster
|
||||
|
||||
if all, err := cmd.Flags().GetBool("all"); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
} else if all {
|
||||
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
return clusters
|
||||
}
|
||||
@ -94,7 +101,7 @@ func parseStartClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
for _, name := range clusternames {
|
||||
cluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, cluster)
|
||||
}
|
||||
|
@ -24,12 +24,11 @@ package cluster
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
)
|
||||
|
||||
// NewCmdClusterStop returns a new cobra command
|
||||
@ -44,11 +43,11 @@ func NewCmdClusterStop() *cobra.Command {
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
clusters := parseStopClusterCmd(cmd, args)
|
||||
if len(clusters) == 0 {
|
||||
log.Infoln("No clusters found")
|
||||
l.Log().Infoln("No clusters found")
|
||||
} else {
|
||||
for _, c := range clusters {
|
||||
if err := client.ClusterStop(cmd.Context(), runtimes.SelectedRuntime, c); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -70,11 +69,11 @@ func parseStopClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
var clusters []*k3d.Cluster
|
||||
|
||||
if all, err := cmd.Flags().GetBool("all"); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
} else if all {
|
||||
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
return clusters
|
||||
}
|
||||
@ -87,7 +86,7 @@ func parseStopClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
for _, name := range clusternames {
|
||||
cluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, cluster)
|
||||
}
|
||||
|
@ -22,7 +22,8 @@ THE SOFTWARE.
|
||||
package config
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -34,14 +35,13 @@ func NewCmdConfig() *cobra.Command {
|
||||
Long: `Work with config file(s)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
log.Errorln("Couldn't get help text")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
cmd.AddCommand(NewCmdConfigInit())
|
||||
cmd.AddCommand(NewCmdConfigMigrate())
|
||||
cmd.AddCommand(NewCmdConfigInit(), NewCmdConfigMigrate())
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
@ -25,8 +25,8 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
config "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
|
||||
log "github.com/sirupsen/logrus"
|
||||
config "github.com/rancher/k3d/v5/pkg/config/v1alpha3"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -39,7 +39,7 @@ func NewCmdConfigInit() *cobra.Command {
|
||||
Use: "init",
|
||||
Aliases: []string{"create"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
log.Infoln("COMING SOON: print a basic k3d config with default pre-filled.")
|
||||
l.Log().Infoln("COMING SOON: print a basic k3d config with default pre-filled.")
|
||||
if output == "-" {
|
||||
fmt.Println(config.DefaultConfig)
|
||||
} else {
|
||||
@ -51,16 +51,16 @@ func NewCmdConfigInit() *cobra.Command {
|
||||
// create/overwrite file
|
||||
file, err = os.Create(output)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create/overwrite output file: %s", err)
|
||||
l.Log().Fatalf("Failed to create/overwrite output file: %s", err)
|
||||
}
|
||||
// write content
|
||||
if _, err = file.WriteString(config.DefaultConfig); err != nil {
|
||||
log.Fatalf("Failed to write to output file: %+v", err)
|
||||
l.Log().Fatalf("Failed to write to output file: %+v", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
log.Fatalf("Failed to stat output file: %+v", err)
|
||||
l.Log().Fatalf("Failed to stat output file: %+v", err)
|
||||
} else {
|
||||
log.Errorln("Output file exists and --force was not set")
|
||||
l.Log().Errorln("Output file exists and --force was not set")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@ -69,7 +69,7 @@ func NewCmdConfigInit() *cobra.Command {
|
||||
|
||||
cmd.Flags().StringVarP(&output, "output", "o", "k3d-default.yaml", "Write a default k3d config")
|
||||
if err := cmd.MarkFlagFilename("output", "yaml", "yml"); err != nil {
|
||||
log.Fatalf("Failed to mark flag 'output' as filename flag: %v", err)
|
||||
l.Log().Fatalf("Failed to mark flag 'output' as filename flag: %v", err)
|
||||
}
|
||||
cmd.Flags().BoolVarP(&force, "force", "f", false, "Force overwrite of target file")
|
||||
|
||||
|
@ -25,8 +25,8 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/k3d/v4/pkg/config"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/rancher/k3d/v5/pkg/config"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"gopkg.in/yaml.v2"
|
||||
@ -44,7 +44,7 @@ func NewCmdConfigMigrate() *cobra.Command {
|
||||
configFile := args[0]
|
||||
|
||||
if _, err := os.Stat(configFile); err != nil {
|
||||
log.Fatalf("Failed to stat config file %s: %+v", configFile, err)
|
||||
l.Log().Fatalf("Failed to stat config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
cfgViper := viper.New()
|
||||
@ -55,38 +55,38 @@ func NewCmdConfigMigrate() *cobra.Command {
|
||||
// try to read config into memory (viper map structure)
|
||||
if err := cfgViper.ReadInConfig(); err != nil {
|
||||
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
|
||||
log.Fatalf("Config file %s not found: %+v", configFile, err)
|
||||
l.Log().Fatalf("Config file %s not found: %+v", configFile, err)
|
||||
}
|
||||
// config file found but some other error happened
|
||||
log.Fatalf("Failed to read config file %s: %+v", configFile, err)
|
||||
l.Log().Fatalf("Failed to read config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
schema, err := config.GetSchemaByVersion(cfgViper.GetString("apiVersion"))
|
||||
if err != nil {
|
||||
log.Fatalf("Cannot validate config file %s: %+v", configFile, err)
|
||||
l.Log().Fatalf("Cannot validate config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
if err := config.ValidateSchemaFile(configFile, schema); err != nil {
|
||||
log.Fatalf("Schema Validation failed for config file %s: %+v", configFile, err)
|
||||
l.Log().Fatalf("Schema Validation failed for config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
log.Infof("Using config file %s (%s#%s)", cfgViper.ConfigFileUsed(), strings.ToLower(cfgViper.GetString("apiVersion")), strings.ToLower(cfgViper.GetString("kind")))
|
||||
l.Log().Infof("Using config file %s (%s#%s)", cfgViper.ConfigFileUsed(), strings.ToLower(cfgViper.GetString("apiVersion")), strings.ToLower(cfgViper.GetString("kind")))
|
||||
|
||||
cfg, err := config.FromViper(cfgViper)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
if cfg.GetAPIVersion() != config.DefaultConfigApiVersion {
|
||||
cfg, err = config.Migrate(cfg, config.DefaultConfigApiVersion)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
yamlout, err := yaml.Marshal(cfg)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
output := "-"
|
||||
@ -97,11 +97,11 @@ func NewCmdConfigMigrate() *cobra.Command {
|
||||
|
||||
if output == "-" {
|
||||
if _, err := os.Stdout.Write(yamlout); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
if err := os.WriteFile(output, yamlout, os.ModeAppend); err != nil {
|
||||
log.Fatalln(err)
|
||||
if err := os.WriteFile(output, yamlout, os.ModePerm); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -24,11 +24,11 @@ package debug
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
@ -42,8 +42,8 @@ func NewCmdDebug() *cobra.Command {
|
||||
Long: `Debug k3d cluster(s)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
log.Errorln("Couldn't get help text")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -61,8 +61,8 @@ func NewCmdDebugLoadbalancer() *cobra.Command {
|
||||
Long: `Debug the loadbalancer`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
log.Errorln("Couldn't get help text")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -74,16 +74,16 @@ func NewCmdDebugLoadbalancer() *cobra.Command {
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
c, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &types.Cluster{Name: args[0]})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
lbconf, err := client.GetLoadbalancerConfig(cmd.Context(), runtimes.SelectedRuntime, c)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
yamlized, err := yaml.Marshal(lbconf)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
fmt.Println(string(yamlized))
|
||||
},
|
||||
|
@ -22,7 +22,7 @@ THE SOFTWARE.
|
||||
package image
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -37,8 +37,8 @@ func NewCmdImage() *cobra.Command {
|
||||
Long: `Handle container images.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
log.Errorln("Couldn't get help text")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
@ -26,12 +26,11 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v4/pkg/tools"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
)
|
||||
|
||||
// NewCmdImageImport returns a new cobra command
|
||||
@ -60,20 +59,20 @@ So if a file './rancher/k3d-tools' exists, k3d will try to import it instead of
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
images, clusters := parseLoadImageCmd(cmd, args)
|
||||
log.Debugf("Importing image(s) [%+v] from runtime [%s] into cluster(s) [%+v]...", images, runtimes.SelectedRuntime, clusters)
|
||||
l.Log().Debugf("Importing image(s) [%+v] from runtime [%s] into cluster(s) [%+v]...", images, runtimes.SelectedRuntime, clusters)
|
||||
errOccured := false
|
||||
for _, cluster := range clusters {
|
||||
log.Infof("Importing image(s) into cluster '%s'", cluster.Name)
|
||||
if err := tools.ImageImportIntoClusterMulti(cmd.Context(), runtimes.SelectedRuntime, images, &cluster, loadImageOpts); err != nil {
|
||||
log.Errorf("Failed to import image(s) into cluster '%s': %+v", cluster.Name, err)
|
||||
l.Log().Infof("Importing image(s) into cluster '%s'", cluster.Name)
|
||||
if err := client.ImageImportIntoClusterMulti(cmd.Context(), runtimes.SelectedRuntime, images, &cluster, loadImageOpts); err != nil {
|
||||
l.Log().Errorf("Failed to import image(s) into cluster '%s': %+v", cluster.Name, err)
|
||||
errOccured = true
|
||||
}
|
||||
}
|
||||
if errOccured {
|
||||
log.Warnln("At least one error occured while trying to import the image(s) into the selected cluster(s)")
|
||||
l.Log().Warnln("At least one error occured while trying to import the image(s) into the selected cluster(s)")
|
||||
os.Exit(1)
|
||||
}
|
||||
log.Infof("Successfully imported %d image(s) into %d cluster(s)", len(images), len(clusters))
|
||||
l.Log().Infof("Successfully imported %d image(s) into %d cluster(s)", len(images), len(clusters))
|
||||
},
|
||||
}
|
||||
|
||||
@ -82,7 +81,7 @@ So if a file './rancher/k3d-tools' exists, k3d will try to import it instead of
|
||||
*********/
|
||||
cmd.Flags().StringArrayP("cluster", "c", []string{k3d.DefaultClusterName}, "Select clusters to load the image to.")
|
||||
if err := cmd.RegisterFlagCompletionFunc("cluster", util.ValidArgsAvailableClusters); err != nil {
|
||||
log.Fatalln("Failed to register flag completion for '--cluster'", err)
|
||||
l.Log().Fatalln("Failed to register flag completion for '--cluster'", err)
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVarP(&loadImageOpts.KeepTar, "keep-tarball", "k", false, "Do not delete the tarball containing the saved images from the shared volume")
|
||||
@ -100,7 +99,7 @@ func parseLoadImageCmd(cmd *cobra.Command, args []string) ([]string, []k3d.Clust
|
||||
// --cluster
|
||||
clusterNames, err := cmd.Flags().GetStringArray("cluster")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
clusters := []k3d.Cluster{}
|
||||
for _, clusterName := range clusterNames {
|
||||
@ -110,7 +109,7 @@ func parseLoadImageCmd(cmd *cobra.Command, args []string) ([]string, []k3d.Clust
|
||||
// images
|
||||
images := args
|
||||
if len(images) == 0 {
|
||||
log.Fatalln("No images specified!")
|
||||
l.Log().Fatalln("No images specified!")
|
||||
}
|
||||
|
||||
return images, clusters
|
||||
|
@ -22,7 +22,7 @@ THE SOFTWARE.
|
||||
package kubeconfig
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -36,15 +36,14 @@ func NewCmdKubeconfig() *cobra.Command {
|
||||
Long: `Manage kubeconfig(s)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
log.Errorln("Couldn't get help text")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
cmd.AddCommand(NewCmdKubeconfigGet())
|
||||
cmd.AddCommand(NewCmdKubeconfigMerge())
|
||||
cmd.AddCommand(NewCmdKubeconfigGet(), NewCmdKubeconfigMerge())
|
||||
|
||||
// add flags
|
||||
|
||||
|
@ -25,13 +25,12 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type getKubeconfigFlags struct {
|
||||
@ -70,13 +69,13 @@ func NewCmdKubeconfigGet() *cobra.Command {
|
||||
if getKubeconfigFlags.all {
|
||||
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
for _, clusterName := range args {
|
||||
retrievedCluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, retrievedCluster)
|
||||
}
|
||||
@ -85,10 +84,10 @@ func NewCmdKubeconfigGet() *cobra.Command {
|
||||
// get kubeconfigs from all clusters
|
||||
errorGettingKubeconfig := false
|
||||
for _, c := range clusters {
|
||||
log.Debugf("Getting kubeconfig for cluster '%s'", c.Name)
|
||||
l.Log().Debugf("Getting kubeconfig for cluster '%s'", c.Name)
|
||||
fmt.Println("---") // YAML document separator
|
||||
if _, err := client.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, c, "-", &writeKubeConfigOptions); err != nil {
|
||||
log.Errorln(err)
|
||||
l.Log().Errorln(err)
|
||||
errorGettingKubeconfig = true
|
||||
}
|
||||
}
|
||||
|
@ -27,15 +27,14 @@ import (
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
k3dutil "github.com/rancher/k3d/v4/pkg/util"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
k3dutil "github.com/rancher/k3d/v5/pkg/util"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type mergeKubeconfigFlags struct {
|
||||
@ -64,14 +63,14 @@ func NewCmdKubeconfigMerge() *cobra.Command {
|
||||
var err error
|
||||
|
||||
if mergeKubeconfigFlags.targetDefault && mergeKubeconfigFlags.output != "" {
|
||||
log.Fatalln("Cannot use both '--output' and '--kubeconfig-merge-default' at the same time")
|
||||
l.Log().Fatalln("Cannot use both '--output' and '--kubeconfig-merge-default' at the same time")
|
||||
}
|
||||
|
||||
// generate list of clusters
|
||||
if mergeKubeconfigFlags.all {
|
||||
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
|
||||
@ -83,7 +82,7 @@ func NewCmdKubeconfigMerge() *cobra.Command {
|
||||
for _, clusterName := range clusternames {
|
||||
retrievedCluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, retrievedCluster)
|
||||
}
|
||||
@ -94,18 +93,18 @@ func NewCmdKubeconfigMerge() *cobra.Command {
|
||||
var outputs []string
|
||||
outputDir, err := k3dutil.GetConfigDirOrCreate()
|
||||
if err != nil {
|
||||
log.Errorln(err)
|
||||
log.Fatalln("Failed to save kubeconfig to local directory")
|
||||
l.Log().Errorln(err)
|
||||
l.Log().Fatalln("Failed to save kubeconfig to local directory")
|
||||
}
|
||||
for _, c := range clusters {
|
||||
log.Debugf("Getting kubeconfig for cluster '%s'", c.Name)
|
||||
l.Log().Debugf("Getting kubeconfig for cluster '%s'", c.Name)
|
||||
output := mergeKubeconfigFlags.output
|
||||
if output == "" && !mergeKubeconfigFlags.targetDefault {
|
||||
output = path.Join(outputDir, fmt.Sprintf("kubeconfig-%s.yaml", c.Name))
|
||||
}
|
||||
output, err = client.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, c, output, &writeKubeConfigOptions)
|
||||
if err != nil {
|
||||
log.Errorln(err)
|
||||
l.Log().Errorln(err)
|
||||
errorGettingKubeconfig = true
|
||||
} else {
|
||||
outputs = append(outputs, output)
|
||||
@ -127,7 +126,7 @@ func NewCmdKubeconfigMerge() *cobra.Command {
|
||||
// add flags
|
||||
cmd.Flags().StringVarP(&mergeKubeconfigFlags.output, "output", "o", "", fmt.Sprintf("Define output [ - | FILE ] (default from $KUBECONFIG or %s", clientcmd.RecommendedHomeFile))
|
||||
if err := cmd.MarkFlagFilename("output"); err != nil {
|
||||
log.Fatalln("Failed to mark flag --output as filename")
|
||||
l.Log().Fatalln("Failed to mark flag --output as filename")
|
||||
}
|
||||
cmd.Flags().BoolVarP(&mergeKubeconfigFlags.targetDefault, "kubeconfig-merge-default", "d", false, fmt.Sprintf("Merge into the default kubeconfig ($KUBECONFIG or %s)", clientcmd.RecommendedHomeFile))
|
||||
cmd.Flags().BoolVarP(&writeKubeConfigOptions.UpdateExisting, "update", "u", true, "Update conflicting fields in existing kubeconfig")
|
||||
|
@ -22,7 +22,7 @@ THE SOFTWARE.
|
||||
package node
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -36,19 +36,19 @@ func NewCmdNode() *cobra.Command {
|
||||
Long: `Manage node(s)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
log.Errorln("Couldn't get help text")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
cmd.AddCommand(NewCmdNodeCreate())
|
||||
cmd.AddCommand(NewCmdNodeStart())
|
||||
cmd.AddCommand(NewCmdNodeStop())
|
||||
cmd.AddCommand(NewCmdNodeDelete())
|
||||
cmd.AddCommand(NewCmdNodeList())
|
||||
cmd.AddCommand(NewCmdNodeEdit())
|
||||
cmd.AddCommand(NewCmdNodeCreate(),
|
||||
NewCmdNodeStart(),
|
||||
NewCmdNodeStop(),
|
||||
NewCmdNodeDelete(),
|
||||
NewCmdNodeList(),
|
||||
NewCmdNodeEdit())
|
||||
|
||||
// add flags
|
||||
|
||||
|
@ -29,13 +29,13 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
dockerunits "github.com/docker/go-units"
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
cliutil "github.com/rancher/k3d/v4/cmd/util"
|
||||
k3dc "github.com/rancher/k3d/v4/pkg/client"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v4/version"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
cliutil "github.com/rancher/k3d/v5/cmd/util"
|
||||
k3dc "github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/rancher/k3d/v5/version"
|
||||
)
|
||||
|
||||
// NewCmdNodeCreate returns a new cobra command
|
||||
@ -50,12 +50,19 @@ func NewCmdNodeCreate() *cobra.Command {
|
||||
Long: `Create a new containerized k3s node (k3s in docker).`,
|
||||
Args: cobra.ExactArgs(1), // exactly one name accepted // TODO: if not specified, inherit from cluster that the node shall belong to, if that is specified
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
nodes, cluster := parseCreateNodeCmd(cmd, args)
|
||||
if err := k3dc.NodeAddToClusterMulti(cmd.Context(), runtimes.SelectedRuntime, nodes, cluster, createNodeOpts); err != nil {
|
||||
log.Errorf("Failed to add nodes to cluster '%s'", cluster.Name)
|
||||
log.Fatalln(err)
|
||||
nodes, clusterName := parseCreateNodeCmd(cmd, args)
|
||||
if strings.HasPrefix(clusterName, "https://") {
|
||||
l.Log().Infof("Adding %d node(s) to the remote cluster '%s'...", len(nodes), clusterName)
|
||||
if err := k3dc.NodeAddToClusterMultiRemote(cmd.Context(), runtimes.SelectedRuntime, nodes, clusterName, createNodeOpts); err != nil {
|
||||
l.Log().Fatalf("failed to add %d node(s) to the remote cluster '%s': %v", len(nodes), clusterName, err)
|
||||
}
|
||||
} else {
|
||||
l.Log().Infof("Adding %d node(s) to the runtime local cluster '%s'...", len(nodes), clusterName)
|
||||
if err := k3dc.NodeAddToClusterMulti(cmd.Context(), runtimes.SelectedRuntime, nodes, &k3d.Cluster{Name: clusterName}, createNodeOpts); err != nil {
|
||||
l.Log().Fatalf("failed to add %d node(s) to the runtime local cluster '%s': %v", len(nodes), clusterName, err)
|
||||
}
|
||||
}
|
||||
log.Infof("Successfully created %d node(s)!", len(nodes))
|
||||
l.Log().Infof("Successfully created %d node(s)!", len(nodes))
|
||||
},
|
||||
}
|
||||
|
||||
@ -63,11 +70,11 @@ func NewCmdNodeCreate() *cobra.Command {
|
||||
cmd.Flags().Int("replicas", 1, "Number of replicas of this node specification.")
|
||||
cmd.Flags().String("role", string(k3d.AgentRole), "Specify node role [server, agent]")
|
||||
if err := cmd.RegisterFlagCompletionFunc("role", util.ValidArgsNodeRoles); err != nil {
|
||||
log.Fatalln("Failed to register flag completion for '--role'", err)
|
||||
l.Log().Fatalln("Failed to register flag completion for '--role'", err)
|
||||
}
|
||||
cmd.Flags().StringP("cluster", "c", k3d.DefaultClusterName, "Select the cluster that the node shall connect to.")
|
||||
cmd.Flags().StringP("cluster", "c", k3d.DefaultClusterName, "Cluster URL or k3d cluster name to connect to.")
|
||||
if err := cmd.RegisterFlagCompletionFunc("cluster", util.ValidArgsAvailableClusters); err != nil {
|
||||
log.Fatalln("Failed to register flag completion for '--cluster'", err)
|
||||
l.Log().Fatalln("Failed to register flag completion for '--cluster'", err)
|
||||
}
|
||||
|
||||
cmd.Flags().StringP("image", "i", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), "Specify k3s image used for the node(s)")
|
||||
@ -79,69 +86,70 @@ func NewCmdNodeCreate() *cobra.Command {
|
||||
cmd.Flags().StringSliceP("runtime-label", "", []string{}, "Specify container runtime labels in format \"foo=bar\"")
|
||||
cmd.Flags().StringSliceP("k3s-node-label", "", []string{}, "Specify k3s node labels in format \"foo=bar\"")
|
||||
|
||||
cmd.Flags().StringSliceP("network", "n", []string{}, "Add node to (another) runtime network")
|
||||
|
||||
cmd.Flags().StringVarP(&createNodeOpts.ClusterToken, "token", "t", "", "Override cluster token (required when connecting to an external cluster)")
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
|
||||
// parseCreateNodeCmd parses the command input into variables required to create a cluster
|
||||
func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, *k3d.Cluster) {
|
||||
// parseCreateNodeCmd parses the command input into variables required to create a node
|
||||
func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, string) {
|
||||
|
||||
// --replicas
|
||||
replicas, err := cmd.Flags().GetInt("replicas")
|
||||
if err != nil {
|
||||
log.Errorln("No replica count specified")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("No replica count specified")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// --role
|
||||
roleStr, err := cmd.Flags().GetString("role")
|
||||
if err != nil {
|
||||
log.Errorln("No node role specified")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("No node role specified")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
if _, ok := k3d.NodeRoles[roleStr]; !ok {
|
||||
log.Fatalf("Unknown node role '%s'\n", roleStr)
|
||||
l.Log().Fatalf("Unknown node role '%s'\n", roleStr)
|
||||
}
|
||||
role := k3d.NodeRoles[roleStr]
|
||||
|
||||
// --image
|
||||
image, err := cmd.Flags().GetString("image")
|
||||
if err != nil {
|
||||
log.Errorln("No image specified")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("No image specified")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// --cluster
|
||||
clusterName, err := cmd.Flags().GetString("cluster")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
cluster := &k3d.Cluster{
|
||||
Name: clusterName,
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// --memory
|
||||
memory, err := cmd.Flags().GetString("memory")
|
||||
if err != nil {
|
||||
log.Errorln("No memory specified")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("No memory specified")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
if _, err := dockerunits.RAMInBytes(memory); memory != "" && err != nil {
|
||||
log.Errorf("Provided memory limit value is invalid")
|
||||
l.Log().Errorf("Provided memory limit value is invalid")
|
||||
}
|
||||
|
||||
// --runtime-label
|
||||
runtimeLabelsFlag, err := cmd.Flags().GetStringSlice("runtime-label")
|
||||
if err != nil {
|
||||
log.Errorln("No runtime-label specified")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("No runtime-label specified")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
runtimeLabels := make(map[string]string, len(runtimeLabelsFlag)+1)
|
||||
for _, label := range runtimeLabelsFlag {
|
||||
labelSplitted := strings.Split(label, "=")
|
||||
if len(labelSplitted) != 2 {
|
||||
log.Fatalf("unknown runtime-label format format: %s, use format \"foo=bar\"", label)
|
||||
l.Log().Fatalf("unknown runtime-label format format: %s, use format \"foo=bar\"", label)
|
||||
}
|
||||
cliutil.ValidateRuntimeLabelKey(labelSplitted[0])
|
||||
runtimeLabels[labelSplitted[0]] = labelSplitted[1]
|
||||
@ -153,19 +161,25 @@ func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, *k3d.Cl
|
||||
// --k3s-node-label
|
||||
k3sNodeLabelsFlag, err := cmd.Flags().GetStringSlice("k3s-node-label")
|
||||
if err != nil {
|
||||
log.Errorln("No k3s-node-label specified")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("No k3s-node-label specified")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
k3sNodeLabels := make(map[string]string, len(k3sNodeLabelsFlag))
|
||||
for _, label := range k3sNodeLabelsFlag {
|
||||
labelSplitted := strings.Split(label, "=")
|
||||
if len(labelSplitted) != 2 {
|
||||
log.Fatalf("unknown k3s-node-label format format: %s, use format \"foo=bar\"", label)
|
||||
l.Log().Fatalf("unknown k3s-node-label format format: %s, use format \"foo=bar\"", label)
|
||||
}
|
||||
k3sNodeLabels[labelSplitted[0]] = labelSplitted[1]
|
||||
}
|
||||
|
||||
// --network
|
||||
networks, err := cmd.Flags().GetStringSlice("network")
|
||||
if err != nil {
|
||||
l.Log().Fatalf("failed to get --network string slice flag: %v", err)
|
||||
}
|
||||
|
||||
// generate list of nodes
|
||||
nodes := []*k3d.Node{}
|
||||
for i := 0; i < replicas; i++ {
|
||||
@ -177,9 +191,10 @@ func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, *k3d.Cl
|
||||
RuntimeLabels: runtimeLabels,
|
||||
Restart: true,
|
||||
Memory: memory,
|
||||
Networks: networks,
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
return nodes, cluster
|
||||
return nodes, clusterName
|
||||
}
|
||||
|
@ -22,11 +22,11 @@ THE SOFTWARE.
|
||||
package node
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -52,14 +52,14 @@ func NewCmdNodeDelete() *cobra.Command {
|
||||
nodeDeleteOpts := k3d.NodeDeleteOpts{SkipLBUpdate: flags.All} // do not update LB, if we're deleting all nodes anyway
|
||||
|
||||
if len(nodes) == 0 {
|
||||
log.Infoln("No nodes found")
|
||||
l.Log().Infoln("No nodes found")
|
||||
} else {
|
||||
for _, node := range nodes {
|
||||
if err := client.NodeDelete(cmd.Context(), runtimes.SelectedRuntime, node, nodeDeleteOpts); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
log.Infof("Successfully deleted %d node(s)!", len(nodes))
|
||||
l.Log().Infof("Successfully deleted %d node(s)!", len(nodes))
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -83,11 +83,11 @@ func parseDeleteNodeCmd(cmd *cobra.Command, args []string, flags *nodeDeleteFlag
|
||||
// --all
|
||||
if flags.All {
|
||||
if !flags.IncludeRegistries {
|
||||
log.Infoln("Didn't set '--registries', so won't delete registries.")
|
||||
l.Log().Infoln("Didn't set '--registries', so won't delete registries.")
|
||||
}
|
||||
nodes, err = client.NodeList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
include := k3d.ClusterInternalNodeRoles
|
||||
exclude := []k3d.Role{}
|
||||
@ -99,13 +99,13 @@ func parseDeleteNodeCmd(cmd *cobra.Command, args []string, flags *nodeDeleteFlag
|
||||
}
|
||||
|
||||
if !flags.All && len(args) < 1 {
|
||||
log.Fatalln("Expecting at least one node name if `--all` is not set")
|
||||
l.Log().Fatalln("Expecting at least one node name if `--all` is not set")
|
||||
}
|
||||
|
||||
for _, name := range args {
|
||||
node, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: name})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
@ -23,11 +23,11 @@ package node
|
||||
|
||||
import (
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -46,13 +46,13 @@ func NewCmdNodeEdit() *cobra.Command {
|
||||
|
||||
existingNode, changeset := parseEditNodeCmd(cmd, args)
|
||||
|
||||
log.Debugf("===== Current =====\n%+v\n===== Changeset =====\n%+v\n", existingNode, changeset)
|
||||
l.Log().Debugf("===== Current =====\n%+v\n===== Changeset =====\n%+v\n", existingNode, changeset)
|
||||
|
||||
if err := client.NodeEdit(cmd.Context(), runtimes.SelectedRuntime, existingNode, changeset); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
log.Infof("Successfully updated %s", existingNode.Name)
|
||||
l.Log().Infof("Successfully updated %s", existingNode.Name)
|
||||
|
||||
},
|
||||
}
|
||||
@ -71,16 +71,16 @@ func parseEditNodeCmd(cmd *cobra.Command, args []string) (*k3d.Node, *k3d.Node)
|
||||
|
||||
existingNode, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: args[0]})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
if existingNode == nil {
|
||||
log.Infof("Node %s not found", args[0])
|
||||
l.Log().Infof("Node %s not found", args[0])
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if existingNode.Role != k3d.LoadBalancerRole {
|
||||
log.Fatalln("Currently only the loadbalancer can be updated!")
|
||||
l.Log().Fatalln("Currently only the loadbalancer can be updated!")
|
||||
}
|
||||
|
||||
changeset := &k3d.Node{}
|
||||
@ -90,7 +90,7 @@ func parseEditNodeCmd(cmd *cobra.Command, args []string) (*k3d.Node, *k3d.Node)
|
||||
*/
|
||||
portFlags, err := cmd.Flags().GetStringArray("port-add")
|
||||
if err != nil {
|
||||
log.Errorln(err)
|
||||
l.Log().Errorln(err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -101,7 +101,7 @@ func parseEditNodeCmd(cmd *cobra.Command, args []string) (*k3d.Node, *k3d.Node)
|
||||
|
||||
portmappings, err := nat.ParsePortSpec(flag)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse port spec '%s': %+v", flag, err)
|
||||
l.Log().Fatalf("Failed to parse port spec '%s': %+v", flag, err)
|
||||
}
|
||||
|
||||
for _, pm := range portmappings {
|
||||
|
@ -26,13 +26,12 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/liggitt/tabwriter"
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type nodeListFlags struct {
|
||||
@ -64,14 +63,14 @@ func NewCmdNodeList() *cobra.Command {
|
||||
if len(nodes) == 0 { // Option a) no name specified -> get all nodes
|
||||
found, err := client.NodeList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
existingNodes = append(existingNodes, found...)
|
||||
} else { // Option b) cluster name specified -> get specific cluster
|
||||
for _, node := range nodes {
|
||||
found, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, node)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
existingNodes = append(existingNodes, found)
|
||||
}
|
||||
|
@ -22,12 +22,11 @@ THE SOFTWARE.
|
||||
package node
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewCmdNodeStart returns a new cobra command
|
||||
@ -42,7 +41,7 @@ func NewCmdNodeStart() *cobra.Command {
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
node := parseStartNodeCmd(cmd, args)
|
||||
if err := runtimes.SelectedRuntime.StartNode(cmd.Context(), node); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -55,7 +54,7 @@ func NewCmdNodeStart() *cobra.Command {
|
||||
func parseStartNodeCmd(cmd *cobra.Command, args []string) *k3d.Node {
|
||||
// node name // TODO: startNode: allow node filters, e.g. `k3d node start mycluster@agent` to start all agent nodes of cluster 'mycluster'
|
||||
if len(args) == 0 || len(args[0]) == 0 {
|
||||
log.Fatalln("No node name given")
|
||||
l.Log().Fatalln("No node name given")
|
||||
}
|
||||
|
||||
return &k3d.Node{Name: args[0]}
|
||||
|
@ -22,13 +22,12 @@ THE SOFTWARE.
|
||||
package node
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
)
|
||||
|
||||
// NewCmdNodeStop returns a new cobra command
|
||||
@ -43,7 +42,7 @@ func NewCmdNodeStop() *cobra.Command {
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
node := parseStopNodeCmd(cmd, args)
|
||||
if err := runtimes.SelectedRuntime.StopNode(cmd.Context(), node); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -56,7 +55,7 @@ func NewCmdNodeStop() *cobra.Command {
|
||||
func parseStopNodeCmd(cmd *cobra.Command, args []string) *k3d.Node {
|
||||
// node name // TODO: allow node filters, e.g. `k3d node stop mycluster@agent` to stop all agent nodes of cluster 'mycluster'
|
||||
if len(args) == 0 || len(args[0]) == 0 {
|
||||
log.Fatalln("No node name given")
|
||||
l.Log().Fatalln("No node name given")
|
||||
}
|
||||
|
||||
return &k3d.Node{Name: args[0]}
|
||||
|
@ -22,7 +22,7 @@ THE SOFTWARE.
|
||||
package registry
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -37,18 +37,18 @@ func NewCmdRegistry() *cobra.Command {
|
||||
Long: `Manage registry/registries`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
log.Errorln("Couldn't get help text")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
cmd.AddCommand(NewCmdRegistryCreate())
|
||||
cmd.AddCommand(NewCmdRegistryStart())
|
||||
cmd.AddCommand(NewCmdRegistryStop())
|
||||
cmd.AddCommand(NewCmdRegistryDelete())
|
||||
cmd.AddCommand(NewCmdRegistryList())
|
||||
cmd.AddCommand(NewCmdRegistryCreate(),
|
||||
NewCmdRegistryStart(),
|
||||
NewCmdRegistryStop(),
|
||||
NewCmdRegistryDelete(),
|
||||
NewCmdRegistryList())
|
||||
|
||||
// add flags
|
||||
|
||||
|
@ -24,14 +24,14 @@ package registry
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
|
||||
cliutil "github.com/rancher/k3d/v4/cmd/util"
|
||||
cliutil "github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -75,12 +75,12 @@ func NewCmdRegistryCreate() *cobra.Command {
|
||||
reg, clusters := parseCreateRegistryCmd(cmd, args, flags, ppFlags)
|
||||
regNode, err := client.RegistryRun(cmd.Context(), runtimes.SelectedRuntime, reg)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
if err := client.RegistryConnectClusters(cmd.Context(), runtimes.SelectedRuntime, regNode, clusters); err != nil {
|
||||
log.Errorln(err)
|
||||
l.Log().Errorln(err)
|
||||
}
|
||||
log.Infof("Successfully created registry '%s'", reg.Host)
|
||||
l.Log().Infof("Successfully created registry '%s'", reg.Host)
|
||||
regString := fmt.Sprintf("%s:%s", reg.Host, reg.ExposureOpts.Binding.HostPort)
|
||||
if !flags.NoHelp {
|
||||
fmt.Println(fmt.Sprintf(helptext, regString, regString, regString, regString))
|
||||
@ -93,10 +93,10 @@ func NewCmdRegistryCreate() *cobra.Command {
|
||||
// TODO: connecting to clusters requires non-existing config reload functionality in containerd
|
||||
cmd.Flags().StringArrayVarP(&ppFlags.Clusters, "cluster", "c", nil, "[NotReady] Select the cluster(s) that the registry shall connect to.")
|
||||
if err := cmd.RegisterFlagCompletionFunc("cluster", cliutil.ValidArgsAvailableClusters); err != nil {
|
||||
log.Fatalln("Failed to register flag completion for '--cluster'", err)
|
||||
l.Log().Fatalln("Failed to register flag completion for '--cluster'", err)
|
||||
}
|
||||
if err := cmd.Flags().MarkHidden("cluster"); err != nil {
|
||||
log.Fatalln("Failed to hide --cluster flag on registry create command")
|
||||
l.Log().Fatalln("Failed to hide --cluster flag on registry create command")
|
||||
}
|
||||
|
||||
cmd.Flags().StringVarP(&flags.Image, "image", "i", fmt.Sprintf("%s:%s", k3d.DefaultRegistryImageRepo, k3d.DefaultRegistryImageTag), "Specify image used for the registry")
|
||||
@ -125,8 +125,8 @@ func parseCreateRegistryCmd(cmd *cobra.Command, args []string, flags *regCreateF
|
||||
// --port
|
||||
exposePort, err := cliutil.ParsePortExposureSpec(ppFlags.Port, k3d.DefaultRegistryPort)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to parse registry port")
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorln("Failed to parse registry port")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// set the name for the registry node
|
||||
|
@ -22,11 +22,11 @@ THE SOFTWARE.
|
||||
package registry
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -51,11 +51,11 @@ func NewCmdRegistryDelete() *cobra.Command {
|
||||
nodes := parseRegistryDeleteCmd(cmd, args, &flags)
|
||||
|
||||
if len(nodes) == 0 {
|
||||
log.Infoln("No registries found")
|
||||
l.Log().Infoln("No registries found")
|
||||
} else {
|
||||
for _, node := range nodes {
|
||||
if err := client.NodeDelete(cmd.Context(), runtimes.SelectedRuntime, node, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -80,18 +80,18 @@ func parseRegistryDeleteCmd(cmd *cobra.Command, args []string, flags *registryDe
|
||||
if flags.All {
|
||||
nodes, err = client.NodeList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
if !flags.All && len(args) < 1 {
|
||||
log.Fatalln("Expecting at least one registry name if `--all` is not set")
|
||||
l.Log().Fatalln("Expecting at least one registry name if `--all` is not set")
|
||||
}
|
||||
|
||||
for _, name := range args {
|
||||
node, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: name})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
@ -26,11 +26,11 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/liggitt/tabwriter"
|
||||
"github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -64,15 +64,15 @@ func NewCmdRegistryList() *cobra.Command {
|
||||
if len(nodes) == 0 { // Option a) no name specified -> get all registries
|
||||
found, err := client.NodeList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
existingNodes = append(existingNodes, found...)
|
||||
} else { // Option b) registry name(s) specified -> get specific registries
|
||||
for _, node := range nodes {
|
||||
log.Tracef("Node %s", node.Name)
|
||||
l.Log().Tracef("Node %s", node.Name)
|
||||
found, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, node)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
existingNodes = append(existingNodes, found)
|
||||
}
|
||||
|
146
cmd/root.go
146
cmd/root.go
@ -25,24 +25,24 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/rancher/k3d/v4/cmd/cluster"
|
||||
cfg "github.com/rancher/k3d/v4/cmd/config"
|
||||
"github.com/rancher/k3d/v4/cmd/debug"
|
||||
"github.com/rancher/k3d/v4/cmd/image"
|
||||
"github.com/rancher/k3d/v4/cmd/kubeconfig"
|
||||
"github.com/rancher/k3d/v4/cmd/node"
|
||||
"github.com/rancher/k3d/v4/cmd/registry"
|
||||
cliutil "github.com/rancher/k3d/v4/cmd/util"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v4/version"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/rancher/k3d/v5/cmd/cluster"
|
||||
cfg "github.com/rancher/k3d/v5/cmd/config"
|
||||
"github.com/rancher/k3d/v5/cmd/debug"
|
||||
"github.com/rancher/k3d/v5/cmd/image"
|
||||
"github.com/rancher/k3d/v5/cmd/kubeconfig"
|
||||
"github.com/rancher/k3d/v5/cmd/node"
|
||||
"github.com/rancher/k3d/v5/cmd/registry"
|
||||
cliutil "github.com/rancher/k3d/v5/cmd/util"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v5/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus/hooks/writer"
|
||||
)
|
||||
|
||||
@ -71,7 +71,7 @@ All Nodes of a k3d cluster are part of the same docker network.`,
|
||||
printVersion()
|
||||
} else {
|
||||
if err := cmd.Usage(); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -85,40 +85,38 @@ All Nodes of a k3d cluster are part of the same docker network.`,
|
||||
rootCmd.Flags().BoolVar(&flags.version, "version", false, "Show k3d and default k3s version")
|
||||
|
||||
// add subcommands
|
||||
rootCmd.AddCommand(NewCmdCompletion(rootCmd))
|
||||
rootCmd.AddCommand(cluster.NewCmdCluster())
|
||||
rootCmd.AddCommand(kubeconfig.NewCmdKubeconfig())
|
||||
rootCmd.AddCommand(node.NewCmdNode())
|
||||
rootCmd.AddCommand(image.NewCmdImage())
|
||||
rootCmd.AddCommand(cfg.NewCmdConfig())
|
||||
rootCmd.AddCommand(registry.NewCmdRegistry())
|
||||
rootCmd.AddCommand(debug.NewCmdDebug())
|
||||
|
||||
rootCmd.AddCommand(&cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Show k3d and default k3s version",
|
||||
Long: "Show k3d and default k3s version",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
printVersion()
|
||||
rootCmd.AddCommand(NewCmdCompletion(rootCmd),
|
||||
cluster.NewCmdCluster(),
|
||||
kubeconfig.NewCmdKubeconfig(),
|
||||
node.NewCmdNode(),
|
||||
image.NewCmdImage(),
|
||||
cfg.NewCmdConfig(),
|
||||
registry.NewCmdRegistry(),
|
||||
debug.NewCmdDebug(),
|
||||
&cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Show k3d and default k3s version",
|
||||
Long: "Show k3d and default k3s version",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
printVersion()
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
rootCmd.AddCommand(&cobra.Command{
|
||||
Use: "runtime-info",
|
||||
Short: "Show runtime information",
|
||||
Long: "Show some information about the runtime environment (e.g. docker info)",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
info, err := runtimes.SelectedRuntime.Info()
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
err = yaml.NewEncoder(os.Stdout).Encode(info)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
},
|
||||
Hidden: true,
|
||||
})
|
||||
&cobra.Command{
|
||||
Use: "runtime-info",
|
||||
Short: "Show runtime information",
|
||||
Long: "Show some information about the runtime environment (e.g. docker info)",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
info, err := runtimes.SelectedRuntime.Info()
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
err = yaml.NewEncoder(os.Stdout).Encode(info)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
Hidden: true,
|
||||
})
|
||||
|
||||
// Init
|
||||
cobra.OnInitialize(initLogging, initRuntime)
|
||||
@ -136,58 +134,58 @@ func Execute() {
|
||||
if _, _, err := cmd.Find(parts); err != nil {
|
||||
pluginFound, err := cliutil.HandlePlugin(context.Background(), parts)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to execute plugin '%+v'", parts)
|
||||
log.Fatalln(err)
|
||||
l.Log().Errorf("Failed to execute plugin '%+v'", parts)
|
||||
l.Log().Fatalln(err)
|
||||
} else if pluginFound {
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := cmd.Execute(); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
// initLogging initializes the logger
|
||||
func initLogging() {
|
||||
if flags.traceLogging {
|
||||
log.SetLevel(log.TraceLevel)
|
||||
l.Log().SetLevel(logrus.TraceLevel)
|
||||
} else if flags.debugLogging {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
l.Log().SetLevel(logrus.DebugLevel)
|
||||
} else {
|
||||
switch logLevel := strings.ToUpper(os.Getenv("LOG_LEVEL")); logLevel {
|
||||
case "TRACE":
|
||||
log.SetLevel(log.TraceLevel)
|
||||
l.Log().SetLevel(logrus.TraceLevel)
|
||||
case "DEBUG":
|
||||
log.SetLevel(log.DebugLevel)
|
||||
l.Log().SetLevel(logrus.DebugLevel)
|
||||
case "WARN":
|
||||
log.SetLevel(log.WarnLevel)
|
||||
l.Log().SetLevel(logrus.WarnLevel)
|
||||
case "ERROR":
|
||||
log.SetLevel(log.ErrorLevel)
|
||||
l.Log().SetLevel(logrus.ErrorLevel)
|
||||
default:
|
||||
log.SetLevel(log.InfoLevel)
|
||||
l.Log().SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
}
|
||||
log.SetOutput(ioutil.Discard)
|
||||
log.AddHook(&writer.Hook{
|
||||
l.Log().SetOutput(io.Discard)
|
||||
l.Log().AddHook(&writer.Hook{
|
||||
Writer: os.Stderr,
|
||||
LogLevels: []log.Level{
|
||||
log.PanicLevel,
|
||||
log.FatalLevel,
|
||||
log.ErrorLevel,
|
||||
log.WarnLevel,
|
||||
LogLevels: []logrus.Level{
|
||||
logrus.PanicLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.ErrorLevel,
|
||||
logrus.WarnLevel,
|
||||
},
|
||||
})
|
||||
log.AddHook(&writer.Hook{
|
||||
l.Log().AddHook(&writer.Hook{
|
||||
Writer: os.Stdout,
|
||||
LogLevels: []log.Level{
|
||||
log.InfoLevel,
|
||||
log.DebugLevel,
|
||||
log.TraceLevel,
|
||||
LogLevels: []logrus.Level{
|
||||
logrus.InfoLevel,
|
||||
logrus.DebugLevel,
|
||||
logrus.TraceLevel,
|
||||
},
|
||||
})
|
||||
|
||||
formatter := &log.TextFormatter{
|
||||
formatter := &logrus.TextFormatter{
|
||||
ForceColors: true,
|
||||
}
|
||||
|
||||
@ -195,18 +193,18 @@ func initLogging() {
|
||||
formatter.FullTimestamp = true
|
||||
}
|
||||
|
||||
log.SetFormatter(formatter)
|
||||
l.Log().SetFormatter(formatter)
|
||||
|
||||
}
|
||||
|
||||
func initRuntime() {
|
||||
runtime, err := runtimes.GetRuntime("docker")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
runtimes.SelectedRuntime = runtime
|
||||
if rtinfo, err := runtime.Info(); err == nil {
|
||||
log.Debugf("Runtime Info:\n%+v", rtinfo)
|
||||
l.Log().Debugf("Runtime Info:\n%+v", rtinfo)
|
||||
}
|
||||
}
|
||||
|
||||
@ -286,11 +284,11 @@ PowerShell:
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if completionFunc, ok := completionFunctions[args[0]]; ok {
|
||||
if err := completionFunc(os.Stdout); err != nil {
|
||||
log.Fatalf("Failed to generate completion script for shell '%s'", args[0])
|
||||
l.Log().Fatalf("Failed to generate completion script for shell '%s'", args[0])
|
||||
}
|
||||
return
|
||||
}
|
||||
log.Fatalf("Shell '%s' not supported for completion", args[0])
|
||||
l.Log().Fatalf("Shell '%s' not supported for completion", args[0])
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
|
@ -25,10 +25,10 @@ import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
k3dcluster "github.com/rancher/k3d/v4/pkg/client"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
k3dcluster "github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -39,7 +39,7 @@ func ValidArgsAvailableClusters(cmd *cobra.Command, args []string, toComplete st
|
||||
var clusters []*k3d.Cluster
|
||||
clusters, err := k3dcluster.ClusterList(context.Background(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to get list of clusters for shell completion")
|
||||
l.Log().Errorln("Failed to get list of clusters for shell completion")
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
@ -64,7 +64,7 @@ func ValidArgsAvailableNodes(cmd *cobra.Command, args []string, toComplete strin
|
||||
var nodes []*k3d.Node
|
||||
nodes, err := k3dcluster.NodeList(context.Background(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to get list of nodes for shell completion")
|
||||
l.Log().Errorln("Failed to get list of nodes for shell completion")
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
@ -89,7 +89,7 @@ func ValidArgsAvailableRegistries(cmd *cobra.Command, args []string, toComplete
|
||||
var nodes []*k3d.Node
|
||||
nodes, err := k3dcluster.NodeList(context.Background(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to get list of nodes for shell completion")
|
||||
l.Log().Errorln("Failed to get list of nodes for shell completion")
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
|
97
cmd/util/config/config.go
Normal file
97
cmd/util/config/config.go
Normal file
@ -0,0 +1,97 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/k3d/v5/pkg/config"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/viper"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
func InitViperWithConfigFile(cfgViper *viper.Viper, configFile string) error {
|
||||
|
||||
// viper for the general config (file, env and non pre-processed flags)
|
||||
cfgViper.SetEnvPrefix("K3D")
|
||||
cfgViper.AutomaticEnv()
|
||||
|
||||
cfgViper.SetConfigType("yaml")
|
||||
|
||||
// Set config file, if specified
|
||||
if configFile != "" {
|
||||
|
||||
if _, err := os.Stat(configFile); err != nil {
|
||||
l.Log().Fatalf("Failed to stat config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
// create temporary file to expand environment variables in the config without writing that back to the original file
|
||||
// we're doing it here, because this happens just before absolutely all other processing
|
||||
tmpfile, err := os.CreateTemp(os.TempDir(), fmt.Sprintf("k3d-config-tmp-%s", filepath.Base(configFile)))
|
||||
if err != nil {
|
||||
l.Log().Fatalf("error creating temp copy of configfile %s for variable expansion: %v", configFile, err)
|
||||
}
|
||||
defer tmpfile.Close()
|
||||
|
||||
originalcontent, err := os.ReadFile(configFile)
|
||||
if err != nil {
|
||||
l.Log().Fatalf("error reading config file %s: %v", configFile, err)
|
||||
}
|
||||
expandedcontent := os.ExpandEnv(string(originalcontent))
|
||||
if _, err := tmpfile.WriteString(expandedcontent); err != nil {
|
||||
l.Log().Fatalf("error writing expanded config file contents to temp file %s: %v", tmpfile.Name(), err)
|
||||
}
|
||||
|
||||
// use temp file with expanded variables
|
||||
cfgViper.SetConfigFile(tmpfile.Name())
|
||||
|
||||
// try to read config into memory (viper map structure)
|
||||
if err := cfgViper.ReadInConfig(); err != nil {
|
||||
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
|
||||
l.Log().Fatalf("Config file %s not found: %+v", configFile, err)
|
||||
}
|
||||
// config file found but some other error happened
|
||||
l.Log().Fatalf("Failed to read config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
schema, err := config.GetSchemaByVersion(cfgViper.GetString("apiVersion"))
|
||||
if err != nil {
|
||||
l.Log().Fatalf("Cannot validate config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
if err := config.ValidateSchemaFile(tmpfile.Name(), schema); err != nil {
|
||||
l.Log().Fatalf("Schema Validation failed for config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
l.Log().Infof("Using config file %s (%s#%s)", configFile, strings.ToLower(cfgViper.GetString("apiVersion")), strings.ToLower(cfgViper.GetString("kind")))
|
||||
}
|
||||
if l.Log().GetLevel() >= logrus.DebugLevel {
|
||||
c, _ := yaml.Marshal(cfgViper.AllSettings())
|
||||
l.Log().Debugf("Configuration:\n%s", c)
|
||||
}
|
||||
return nil
|
||||
}
|
@ -25,7 +25,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
)
|
||||
|
||||
// SplitFiltersFromFlag separates a flag's value from the node filter, if there is one
|
||||
@ -50,10 +50,10 @@ func SplitFiltersFromFlag(flag string) (string, []string, error) {
|
||||
// Case 1.1: Escaped backslash
|
||||
if strings.HasSuffix(it, "\\\\") {
|
||||
it = strings.TrimSuffix(it, "\\")
|
||||
log.Warnf("The part '%s' of the flag input '%s' ends with a double backslash, so we assume you want to escape the backslash before the '@'. That's the only time we do this.", it, flag)
|
||||
l.Log().Warnf("The part '%s' of the flag input '%s' ends with a double backslash, so we assume you want to escape the backslash before the '@'. That's the only time we do this.", it, flag)
|
||||
} else {
|
||||
// Case 1.2: Unescaped backslash -> Escaping the '@' -> remove suffix and append it to buffer, followed by the escaped @ sign
|
||||
log.Tracef("Item '%s' just before an '@' ends with '\\', so we assume it's escaping a literal '@'", it)
|
||||
l.Log().Tracef("Item '%s' just before an '@' ends with '\\', so we assume it's escaping a literal '@'", it)
|
||||
buffer += strings.TrimSuffix(it, "\\") + "@"
|
||||
continue
|
||||
}
|
||||
|
@ -29,8 +29,8 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/liggitt/tabwriter"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
@ -55,7 +55,7 @@ func PrintNodes(nodes []*k3d.Node, outputFormat string, headers *[]string, nodeP
|
||||
if headers != nil {
|
||||
_, err := fmt.Fprintf(tabwriter, "%s\n", strings.Join(*headers, "\t"))
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to print headers")
|
||||
l.Log().Fatalln("Failed to print headers")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ import (
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
)
|
||||
|
||||
// HandlePlugin takes care of finding and executing a plugin based on the longest prefix
|
||||
|
@ -28,9 +28,9 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v4/pkg/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/rancher/k3d/v5/pkg/util"
|
||||
)
|
||||
|
||||
var apiPortRegexp = regexp.MustCompile(`^(?P<hostref>(?P<hostip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})|(?P<hostname>\S+):)?(?P<port>(\d{1,5}|random))$`)
|
||||
@ -55,7 +55,7 @@ func ParsePortExposureSpec(exposedPortSpec, internalPort string) (*k3d.ExposureO
|
||||
|
||||
// check if there's a host reference
|
||||
if submatches["hostname"] != "" {
|
||||
log.Tracef("Port Exposure: found hostname: %s", submatches["hostname"])
|
||||
l.Log().Tracef("Port Exposure: found hostname: %s", submatches["hostname"])
|
||||
addrs, err := net.LookupHost(submatches["hostname"])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to lookup host '%s' specified for Port Exposure: %+v", submatches["hostname"], err)
|
||||
@ -77,15 +77,15 @@ func ParsePortExposureSpec(exposedPortSpec, internalPort string) (*k3d.ExposureO
|
||||
|
||||
// port: get a free one if there's none defined or set to random
|
||||
if submatches["port"] == "" || submatches["port"] == "random" {
|
||||
log.Debugf("Port Exposure Mapping didn't specify hostPort, choosing one randomly...")
|
||||
l.Log().Debugf("Port Exposure Mapping didn't specify hostPort, choosing one randomly...")
|
||||
freePort, err := GetFreePort()
|
||||
if err != nil || freePort == 0 {
|
||||
log.Warnf("Failed to get random free port: %+v", err)
|
||||
log.Warnf("Falling back to internal port %s (may be blocked though)...", internalPort)
|
||||
l.Log().Warnf("Failed to get random free port: %+v", err)
|
||||
l.Log().Warnf("Falling back to internal port %s (may be blocked though)...", internalPort)
|
||||
submatches["port"] = internalPort
|
||||
} else {
|
||||
submatches["port"] = strconv.Itoa(freePort)
|
||||
log.Debugf("Got free port for Port Exposure: '%d'", freePort)
|
||||
l.Log().Debugf("Got free port for Port Exposure: '%d'", freePort)
|
||||
}
|
||||
}
|
||||
|
||||
@ -93,7 +93,7 @@ func ParsePortExposureSpec(exposedPortSpec, internalPort string) (*k3d.ExposureO
|
||||
|
||||
portMapping, err := nat.ParsePortSpec(realPortString)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse port spec for Port Exposure '%s': %+v", realPortString, err)
|
||||
return nil, fmt.Errorf("failed to parse port spec for Port Exposure '%s': %+v", realPortString, err)
|
||||
}
|
||||
|
||||
api.Port = portMapping[0].Port // there can be only one due to our regexp
|
||||
@ -112,14 +112,12 @@ func ValidatePortMap(portmap string) (string, error) {
|
||||
func GetFreePort() (int, error) {
|
||||
tcpAddress, err := net.ResolveTCPAddr("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
log.Errorln("Failed to resolve address")
|
||||
return 0, err
|
||||
return 0, fmt.Errorf("failed to resolve address 'localhost:0': %w", err)
|
||||
}
|
||||
|
||||
tcpListener, err := net.ListenTCP("tcp", tcpAddress)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create TCP Listener")
|
||||
return 0, err
|
||||
return 0, fmt.Errorf("failed to create tcp listener: %w", err)
|
||||
}
|
||||
defer tcpListener.Close()
|
||||
|
||||
|
@ -24,12 +24,12 @@ package util
|
||||
import (
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
)
|
||||
|
||||
// validateRuntimeLabelKey validates a given label key is not reserved for internal k3d usage
|
||||
func ValidateRuntimeLabelKey(labelKey string) {
|
||||
if strings.HasPrefix(labelKey, "k3s.") || strings.HasPrefix(labelKey, "k3d.") || labelKey == "app" {
|
||||
log.Fatalf("runtime label \"%s\" is reserved for internal usage", labelKey)
|
||||
l.Log().Fatalf("runtime label \"%s\" is reserved for internal usage", labelKey)
|
||||
}
|
||||
}
|
||||
|
@ -27,9 +27,9 @@ import (
|
||||
rt "runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
)
|
||||
|
||||
// ValidateVolumeMount checks, if the source of volume mounts exists and if the destination is an absolute path
|
||||
@ -81,7 +81,7 @@ func ValidateVolumeMount(runtime runtimes.Runtime, volumeMount string) (string,
|
||||
}
|
||||
if !isNamedVolume {
|
||||
if _, err := os.Stat(src); err != nil {
|
||||
log.Warnf("Failed to stat file/directory/named volume that you're trying to mount: '%s' in '%s' -> Please make sure it exists", src, volumeMount)
|
||||
l.Log().Warnf("Failed to stat file/directory/named volume that you're trying to mount: '%s' in '%s' -> Please make sure it exists", src, volumeMount)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -98,7 +98,7 @@ func ValidateVolumeMount(runtime runtimes.Runtime, volumeMount string) (string,
|
||||
func verifyNamedVolume(runtime runtimes.Runtime, volumeName string) error {
|
||||
volumeName, err := runtime.GetVolume(volumeName)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("Failed to verify named volume: %w", err)
|
||||
}
|
||||
if volumeName == "" {
|
||||
return fmt.Errorf("Failed to find named volume '%s'", volumeName)
|
||||
|
27
dind-manifest.tmpl
Normal file
27
dind-manifest.tmpl
Normal file
@ -0,0 +1,27 @@
|
||||
image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}{{else}}latest{{/if}}-dind
|
||||
{{#if build.tags}}
|
||||
tags:
|
||||
{{#each build.tags}}
|
||||
- {{this}}
|
||||
{{/each}}
|
||||
{{/if}}
|
||||
manifests:
|
||||
- image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}dind-linux-amd64
|
||||
platform:
|
||||
architecture: amd64
|
||||
os: linux
|
||||
- image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}dind-linux-arm64
|
||||
platform:
|
||||
variant: v8
|
||||
architecture: arm64
|
||||
os: linux
|
||||
- image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}dind-linux-arm
|
||||
platform:
|
||||
variant: v7
|
||||
architecture: arm
|
||||
os: linux
|
||||
- image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}dind-linux-arm
|
||||
platform:
|
||||
variant: v6
|
||||
architecture: arm
|
||||
os: linux
|
@ -3,20 +3,11 @@ module github.com/rancher/k3d/docgen
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/Microsoft/go-winio v0.4.17 // indirect
|
||||
github.com/containerd/cgroups v0.0.0-20210414074453-680c246289fb // indirect
|
||||
github.com/containerd/containerd v1.5.0-rc.1 // indirect
|
||||
github.com/containerd/continuity v0.0.0-20210315143101-93e15499afd5 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/rancher/k3d/v4 v4.4.7-0.20210709062205-c5f7884f7870
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/rancher/k3d/v5 v5.0.0-00010101000000-000000000000
|
||||
github.com/spf13/cobra v1.2.1
|
||||
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78 // indirect
|
||||
golang.org/x/term v0.0.0-20210406210042-72f3dc4e9b72 // indirect
|
||||
k8s.io/utils v0.0.0-20210305010621-2afb4311ab10 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.1 // indirect
|
||||
)
|
||||
|
||||
replace github.com/rancher/k3d/v4 => /PATH/TO/YOUR/REPO/DIRECTORY
|
||||
replace github.com/rancher/k3d/v5 => /PATH/TO/YOUR/REPO/DIRECTORY
|
||||
|
109
docgen/go.sum
109
docgen/go.sum
@ -44,13 +44,15 @@ github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX
|
||||
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
|
||||
github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
|
||||
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
||||
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
@ -69,8 +71,9 @@ github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEY
|
||||
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
|
||||
github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
|
||||
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
|
||||
github.com/Microsoft/hcsshim v0.8.16 h1:8/auA4LFIZFTGrqfKhGBSXwM6/4X1fHa/xniyEHu8ac=
|
||||
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
|
||||
github.com/Microsoft/hcsshim v0.8.18 h1:cYnKADiM1869gvBpos3YCteeT6sZLB48lB5dmMMs8Tg=
|
||||
github.com/Microsoft/hcsshim v0.8.18/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
||||
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
|
||||
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
@ -142,16 +145,18 @@ github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:z
|
||||
github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
|
||||
github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
|
||||
github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
|
||||
github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
|
||||
github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
|
||||
github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
|
||||
github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
|
||||
github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
|
||||
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
|
||||
github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
|
||||
github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
|
||||
github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
|
||||
github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
|
||||
github.com/containerd/cgroups v0.0.0-20210414074453-680c246289fb h1:cq9suWES/pQHVg1N4u8ltT30HWScFmcAz4sB/wJyp/I=
|
||||
github.com/containerd/cgroups v0.0.0-20210414074453-680c246289fb/go.mod h1:sgGgnAnNasYdJ1ypnikP2SO7SM0Lfgkgwk3TUc9bDO4=
|
||||
github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ=
|
||||
github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
|
||||
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||
github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
|
||||
@ -165,37 +170,41 @@ github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX
|
||||
github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.4.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
|
||||
github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
|
||||
github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
|
||||
github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
|
||||
github.com/containerd/containerd v1.5.0-rc.1 h1:7n80DQm69wXXjLGQo8sytMPC9Z+kG6B4s95hfbFLiXQ=
|
||||
github.com/containerd/containerd v1.5.0-rc.1/go.mod h1:kAwhYasTYKvQWPnWf8CoRDu3vikb17YocPLvHMQhBn4=
|
||||
github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
|
||||
github.com/containerd/containerd v1.5.5 h1:q1gxsZsGZ8ddVe98yO6pR21b5xQSMiR61lD0W96pgQo=
|
||||
github.com/containerd/containerd v1.5.5/go.mod h1:oSTh0QpT1w6jYcGmbiSbxv9OSQYaa88mPyWIuU79zyo=
|
||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
|
||||
github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
|
||||
github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
|
||||
github.com/containerd/continuity v0.0.0-20210315143101-93e15499afd5 h1:k6Dn7shF+i1q4utvCyW4+o9REsCMAeRyORM5IhXMCnw=
|
||||
github.com/containerd/continuity v0.0.0-20210315143101-93e15499afd5/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
|
||||
github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8=
|
||||
github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
|
||||
github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||
github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
|
||||
github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
|
||||
github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
|
||||
github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
|
||||
github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
|
||||
github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
|
||||
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
||||
github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
||||
github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
|
||||
github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
|
||||
github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
|
||||
github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
|
||||
github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
|
||||
github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
|
||||
github.com/containerd/imgcrypt v1.1.1-0.20210412181126-0bed51b9522c/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
|
||||
github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
|
||||
github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
|
||||
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
||||
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
|
||||
@ -204,10 +213,12 @@ github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8h
|
||||
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
||||
github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
|
||||
github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
|
||||
github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
|
||||
github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
|
||||
github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
|
||||
github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
|
||||
github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
|
||||
github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
|
||||
github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||
github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||
github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||
@ -243,6 +254,7 @@ github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1S
|
||||
github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
|
||||
github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
|
||||
github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
|
||||
github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@ -252,17 +264,17 @@ github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
||||
github.com/docker/cli v20.10.7+incompatible h1:pv/3NqibQKphWZiAskMzdz8w0PRbtTaEB+f6NwdU7Is=
|
||||
github.com/docker/cli v20.10.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v20.10.8+incompatible h1:/zO/6y9IOpcehE49yMRTV9ea0nBpb8OeqSskXLNfH1E=
|
||||
github.com/docker/cli v20.10.8+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v0.0.0-20171011171712-7484e51bf6af/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
|
||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ=
|
||||
github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=
|
||||
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/docker/docker v20.10.8+incompatible h1:RVqD337BgQicVCzYrrlhLDWhq6OAD2PJDUg2LsEUvKM=
|
||||
github.com/docker/docker v20.10.8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o=
|
||||
github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c=
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c/go.mod h1:CADgU4DSXK5QUlFslkQu2yW2TKzFZcXq/leZfM0UH5Q=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
@ -295,9 +307,11 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
@ -419,6 +433,7 @@ github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunE
|
||||
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93 h1:jc2UWq7CbdszqeH6qu1ougXMIUBfSy8Pbh/anURYbGI=
|
||||
github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
@ -455,9 +470,12 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
|
||||
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
|
||||
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
|
||||
@ -606,6 +624,7 @@ github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR
|
||||
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
|
||||
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
|
||||
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM=
|
||||
github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM=
|
||||
@ -646,12 +665,14 @@ github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+
|
||||
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
|
||||
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
@ -804,6 +825,7 @@ github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/y
|
||||
github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44=
|
||||
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
@ -983,6 +1005,7 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
@ -994,12 +1017,11 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
|
||||
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1 h1:4qWs8cYYH6PoEFy4dfhDFgoMGkwAcETd+MmPdCPMzUc=
|
||||
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
|
||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023 h1:ADo5wSpq2gqaCGQWzk7S5vd//0iyyLeAratkEoG5dLE=
|
||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@ -1077,6 +1099,7 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -1098,15 +1121,16 @@ golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
@ -1129,8 +1153,8 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@ -1263,6 +1287,7 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
@ -1351,6 +1376,7 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
@ -1365,38 +1391,44 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
inet.af/netaddr v0.0.0-20210421205553-78c777480f22 h1:TX8hopxzHycFVkIsvu6DSpCWUCqDqOvyyPj/5IK1fUQ=
|
||||
inet.af/netaddr v0.0.0-20210421205553-78c777480f22/go.mod h1:z0nx+Dh+7N7CC8V5ayHtHGpZpxLQZZxkIaaz6HN65Ls=
|
||||
inet.af/netaddr v0.0.0-20210903134321-85fa6c94624e h1:tvgqez5ZQoBBiBAGNU/fmJy247yB/7++kcLOEoMYup0=
|
||||
inet.af/netaddr v0.0.0-20210903134321-85fa6c94624e/go.mod h1:z0nx+Dh+7N7CC8V5ayHtHGpZpxLQZZxkIaaz6HN65Ls=
|
||||
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
|
||||
k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
|
||||
k8s.io/api v0.21.0 h1:gu5iGF4V6tfVCQ/R+8Hc0h7H1JuEhzyEi9S4R5LM8+Y=
|
||||
k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU=
|
||||
k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
|
||||
k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY=
|
||||
k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY=
|
||||
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||
k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||
k8s.io/apimachinery v0.21.0 h1:3Fx+41if+IRavNcKOz09FwEXDBG6ORh6iMsTSelhkMA=
|
||||
k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY=
|
||||
k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
|
||||
k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM=
|
||||
k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
|
||||
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
|
||||
k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
|
||||
k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
|
||||
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
|
||||
k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
|
||||
k8s.io/client-go v0.21.0 h1:n0zzzJsAQmJngpC0IhgFcApZyoGXPrDIAD601HD09ag=
|
||||
k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA=
|
||||
k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
|
||||
k8s.io/client-go v0.22.1 h1:jW0ZSHi8wW260FvcXHkIa0NLxFBQszTlhiAVsU5mopw=
|
||||
k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk=
|
||||
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
|
||||
k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
|
||||
k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
|
||||
k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
|
||||
k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
|
||||
k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
|
||||
k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts=
|
||||
k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||
k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM=
|
||||
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
|
||||
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE=
|
||||
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
||||
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210305010621-2afb4311ab10 h1:u5rPykqiCpL+LBfjRkXvnK71gOgIdmq3eHUEkPrbeTI=
|
||||
k8s.io/utils v0.0.0-20210305010621-2afb4311ab10/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 h1:imL9YgXQ9p7xmPzHFm/vVd/cF78jad+n4wK1ABwYtMM=
|
||||
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
|
||||
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
|
||||
mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY=
|
||||
@ -1404,10 +1436,11 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.1 h1:nYqY2A6oy37sKLYuSBXuQhbj4JVclzJK13BOIvJG5XU=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
|
@ -1,9 +1,8 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/rancher/k3d/v4/cmd"
|
||||
"github.com/rancher/k3d/v5/cmd"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/spf13/cobra/doc"
|
||||
)
|
||||
|
||||
@ -12,6 +11,6 @@ func main() {
|
||||
k3d.DisableAutoGenTag = true
|
||||
|
||||
if err := doc.GenMarkdownTree(k3d, "../docs/usage/commands"); err != nil {
|
||||
log.Fatalln(err)
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
@ -16,3 +16,7 @@ go mod tidy
|
||||
go mod vendor
|
||||
|
||||
go run ./main.go
|
||||
|
||||
sed -i "s%$REPO_DIR%$REPLACE_PLACEHOLDER%" "$CURR_DIR/go.mod"
|
||||
|
||||
rm -r "$CURR_DIR/vendor"
|
||||
|
@ -1,6 +1,6 @@
|
||||
nav:
|
||||
- index.md
|
||||
- usage
|
||||
- internals
|
||||
- design
|
||||
- faq
|
||||
collapse: false
|
||||
collapse: false
|
||||
|
@ -1,5 +1,5 @@
|
||||
title: Internals
|
||||
title: Design
|
||||
nav:
|
||||
- defaults.md
|
||||
- project.md
|
||||
- defaults.md
|
||||
- networking.md
|
60
docs/design/defaults.md
Normal file
60
docs/design/defaults.md
Normal file
@ -0,0 +1,60 @@
|
||||
# Defaults
|
||||
|
||||
## k3d reserved settings
|
||||
|
||||
When you create a K3s cluster in Docker using k3d, we make use of some K3s configuration options, making them "reserved" for k3d.
|
||||
This means, that overriding those options with your own may break the cluster setup.
|
||||
|
||||
### Environment Variables
|
||||
|
||||
The following K3s environment variables are used to configure the cluster:
|
||||
|
||||
| Variable | K3d Default | Configurable? |
|
||||
|----------|-------------|---------------|
|
||||
| `K3S_URL`| `https://$CLUSTERNAME-server-0:6443` | no |
|
||||
| `K3S_TOKEN`| random | yes (`--token`) |
|
||||
| `K3S_KUBECONFIG_OUTPUT`| `/output/kubeconfig.yaml` | no |
|
||||
|
||||
## k3d Loadbalancer
|
||||
|
||||
By default, k3d creates an Nginx loadbalancer alongside the clusters it creates to handle the port-forwarding.
|
||||
The loadbalancer can partly be configured using k3d-defined settings.
|
||||
|
||||
| Nginx setting | k3d default | k3d setting |
|
||||
|-------------|-------------|-------------|
|
||||
| `proxy_timeout` (default for all server stanzas) | `600` (s) | `settings.defaultProxyTimeout` | |
|
||||
|`worker_connections` | `1024` | `settings.workerConnections` |
|
||||
|
||||
### Overrides
|
||||
|
||||
- Example via CLI: `k3d cluster create --lb-config-override settings.defaultProxyTimeout=900`
|
||||
- Example via Config File:
|
||||
|
||||
```yaml
|
||||
# ... truncated ...
|
||||
k3d:
|
||||
loadbalancer:
|
||||
configOverrides:
|
||||
- settings.workerConnections=2048
|
||||
```
|
||||
|
||||
## Multiple server nodes
|
||||
|
||||
- by default, when `--server` > 1 and no `--datastore-x` option is set, the first server node (server-0) will be the initializing server node
|
||||
- the initializing server node will have the `--cluster-init` flag appended
|
||||
- all other server nodes will refer to the initializing server node via `--server https://<init-node>:6443`
|
||||
|
||||
## API-Ports
|
||||
|
||||
- by default, we expose the API-Port (`6443`) by forwarding traffic from the default server loadbalancer (nginx container) to the server node(s)
|
||||
- port `6443` of the loadbalancer is then mapped to a specific (`--api-port` flag) or a random (default) port on the host system
|
||||
|
||||
## Kubeconfig
|
||||
|
||||
- if `--kubeconfig-update-default` is set, we use the default loading rules to get the default kubeconfig:
|
||||
- First: kubeconfig specified via the KUBECONFIG environment variable (error out if multiple are specified)
|
||||
- Second: default kubeconfig in home directory (e.g. `$HOME/.kube/config`)
|
||||
|
||||
## Networking
|
||||
|
||||
- [by default, k3d creates a new (docker) network for every cluster](./networking)
|
@ -1,3 +1,4 @@
|
||||
title: FAQ
|
||||
nav:
|
||||
- faq.md
|
||||
- faq.md
|
||||
collapse: true
|
||||
|
@ -1,4 +1,4 @@
|
||||
# FAQ / Nice to know
|
||||
# FAQ
|
||||
|
||||
## Issues with BTRFS
|
||||
|
||||
@ -28,8 +28,8 @@
|
||||
|
||||
```bash
|
||||
k3d cluster create \
|
||||
--k3s-agent-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%' \
|
||||
--k3s-agent-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%'
|
||||
--k3s-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%@agent:*' \
|
||||
--k3s-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%@agent:*'
|
||||
```
|
||||
|
||||
## Restarting a multi-server cluster or the initializing server node fails
|
||||
@ -44,7 +44,7 @@
|
||||
|
||||
- The Problem: Passing a feature flag to the Kubernetes API Server running inside k3s.
|
||||
- Example: you want to enable the EphemeralContainers feature flag in Kubernetes
|
||||
- Solution: `#!bash k3d cluster create --k3s-server-arg '--kube-apiserver-arg=feature-gates=EphemeralContainers=true'`
|
||||
- Solution: `#!bash k3d cluster create --k3s-arg '--kube-apiserver-arg=feature-gates=EphemeralContainers=true@server:*'`
|
||||
- **Note**: Be aware of where the flags require dashes (`--`) and where not.
|
||||
- the k3s flag (`--kube-apiserver-arg`) has the dashes
|
||||
- the kube-apiserver flag `feature-gates` doesn't have them (k3s adds them internally)
|
||||
@ -53,10 +53,10 @@
|
||||
|
||||
```bash
|
||||
k3d cluster create k3d-one \
|
||||
--k3s-server-arg --cluster-cidr="10.118.0.0/17" \
|
||||
--k3s-server-arg --service-cidr="10.118.128.0/17" \
|
||||
--k3s-server-arg --disable=servicelb \
|
||||
--k3s-server-arg --disable=traefik \
|
||||
--k3s-arg "--cluster-cidr=10.118.0.0/17@server:*" \
|
||||
--k3s-arg "--service-cidr=10.118.128.0/17@server:*" \
|
||||
--k3s-arg "--disable=servicelb@server:*" \
|
||||
--k3s-arg "--disable=traefik@server:*" \
|
||||
--verbose
|
||||
```
|
||||
|
||||
@ -105,8 +105,8 @@ Some can be fixed by passing the `HTTP_PROXY` environment variables to k3d, some
|
||||
|
||||
```bash
|
||||
k3d cluster create \
|
||||
--k3s-server-arg "--kube-proxy-arg=conntrack-max-per-core=0" \
|
||||
--k3s-agent-arg "--kube-proxy-arg=conntrack-max-per-core=0" \
|
||||
--k3s-arg "--kube-proxy-arg=conntrack-max-per-core=0@server:*" \
|
||||
--k3s-arg "--kube-proxy-arg=conntrack-max-per-core=0@agent:*" \
|
||||
--image rancher/k3s:v1.20.6-k3s
|
||||
```
|
||||
|
||||
|
@ -2,14 +2,16 @@
|
||||
|
||||

|
||||
|
||||
**This page is targeting k3d v4.0.0 and newer!**
|
||||
## What is k3d?
|
||||
|
||||
k3d is a lightweight wrapper to run [k3s](https://github.com/rancher/k3s) (Rancher Lab's minimal Kubernetes distribution) in docker.
|
||||
|
||||
k3d makes it very easy to create single- and multi-node [k3s](https://github.com/rancher/k3s) clusters in docker, e.g. for local development on Kubernetes.
|
||||
|
||||
**Note:** k3d is a **community-driven project**, that is supported by Rancher (SUSE) and it's not an official Rancher (SUSE) project.
|
||||
|
||||
??? Tip "View a quick demo"
|
||||
<asciinema-player src="/static/asciicast/20200715_k3d.01.cast" cols=200 rows=32></asciinema-player>
|
||||
<asciinema-player src="/static/asciicast/20210917_k3d_v5.0.0_01.cast" cols=200 rows=32></asciinema-player>
|
||||
|
||||
## Learning
|
||||
|
||||
@ -25,7 +27,8 @@ k3d makes it very easy to create single- and multi-node [k3s](https://github.com
|
||||
|
||||
## Requirements
|
||||
|
||||
- [docker](https://docs.docker.com/install/)
|
||||
- [**docker**](https://docs.docker.com/install/) to be able to use k3d at all
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) to interact with the Kubernetes cluster
|
||||
|
||||
## Releases
|
||||
|
||||
@ -51,8 +54,8 @@ You have several options there:
|
||||
|
||||
Use the install script to grab a specific release (via `TAG` environment variable):
|
||||
|
||||
- wget: `#!bash wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v4.0.0 bash`
|
||||
- curl: `#!bash curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v4.0.0 bash`
|
||||
- wget: `#!bash wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v5.0.0 bash`
|
||||
- curl: `#!bash curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v5.0.0 bash`
|
||||
|
||||
### Other Installers
|
||||
|
||||
@ -78,7 +81,7 @@ Use the install script to grab a specific release (via `TAG` environment variabl
|
||||
|
||||
- [asdf](https://asdf-vm.com): `asdf plugin-add k3d && asdf install k3d latest`
|
||||
|
||||
*Note*: `asdf plugin-add k3d`, then `asdf install k3d <tag>` with `<tag> = latest` or `4.x.x` for a specific version (maintained by [spencergilbert/asdf-k3d](https://github.com/spencergilbert/asdf-k3d))
|
||||
*Note*: `asdf plugin-add k3d`, then `asdf install k3d <tag>` with `<tag> = latest` or `5.x.x` for a specific version (maintained by [spencergilbert/asdf-k3d](https://github.com/spencergilbert/asdf-k3d))
|
||||
|
||||
- Others
|
||||
- install via go: `#!bash go install github.com/rancher/k3d@latest` (**Note**: this will give you unreleased/bleeding-edge changes)
|
||||
|
@ -1,22 +0,0 @@
|
||||
# Defaults
|
||||
|
||||
## Multiple server nodes
|
||||
|
||||
- by default, when `--server` > 1 and no `--datastore-x` option is set, the first server node (server-0) will be the initializing server node
|
||||
- the initializing server node will have the `--cluster-init` flag appended
|
||||
- all other server nodes will refer to the initializing server node via `--server https://<init-node>:6443`
|
||||
|
||||
## API-Ports
|
||||
|
||||
- by default, we expose the API-Port (`6443`) by forwarding traffic from the default server loadbalancer (nginx container) to the server node(s)
|
||||
- port `6443` of the loadbalancer is then mapped to a specific (`--api-port` flag) or a random (default) port on the host system
|
||||
|
||||
## Kubeconfig
|
||||
|
||||
- if `--kubeconfig-update-default` is set, we use the default loading rules to get the default kubeconfig:
|
||||
- First: kubeconfig specified via the KUBECONFIG environment variable (error out if multiple are specified)
|
||||
- Second: default kubeconfig in home directory (e.g. `$HOME/.kube/config`)
|
||||
|
||||
## Networking
|
||||
|
||||
- [by default, k3d creates a new (docker) network for every cluster](./networking)
|
@ -1,7 +1,8 @@
|
||||
mkdocs
|
||||
mkdocs-material
|
||||
pymdown-extensions
|
||||
mkdocs-git-revision-date-localized-plugin
|
||||
mkdocs-awesome-pages-plugin
|
||||
mdx_truly_sane_lists
|
||||
mkdocs-include-markdown-plugin # https://github.com/mondeja/mkdocs-include-markdown-plugin
|
||||
mkdocs==1.2.2
|
||||
mkdocs-material==7.2.6
|
||||
pymdown-extensions==8.2
|
||||
mkdocs-git-revision-date-localized-plugin==0.9.3
|
||||
mkdocs-awesome-pages-plugin==2.5.0
|
||||
mdx_truly_sane_lists==1.2 # https://github.com/radude/mdx_truly_sane_lists
|
||||
mkdocs-include-markdown-plugin==3.2.2 # https://github.com/mondeja/mkdocs-include-markdown-plugin
|
||||
mike==1.1.0 # versioned docs: https://github.com/jimporter/mike
|
||||
|
162
docs/static/asciicast/20210917_k3d_v5.0.0_01.cast
vendored
Normal file
162
docs/static/asciicast/20210917_k3d_v5.0.0_01.cast
vendored
Normal file
@ -0,0 +1,162 @@
|
||||
{"version": 2, "width": 213, "height": 45, "timestamp": 1631908903, "env": {"SHELL": "bash", "TERM": "xterm-256color"}}
|
||||
[0.018381, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
|
||||
[1.636481, "o", "k"]
|
||||
[1.702291, "o", "3"]
|
||||
[1.835268, "o", "d"]
|
||||
[2.024007, "o", " "]
|
||||
[2.111734, "o", "v"]
|
||||
[2.210891, "o", "e"]
|
||||
[2.343441, "o", "r"]
|
||||
[2.516933, "o", "s"]
|
||||
[2.583471, "o", "i"]
|
||||
[2.773563, "o", "o"]
|
||||
[2.927568, "o", "n"]
|
||||
[3.159219, "o", "\r\n\u001b[?2004l\r"]
|
||||
[3.179508, "o", "k3d version v5.0.0\r\nk3s version v1.21.4-k3s1 (default)\r\n"]
|
||||
[3.180754, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
|
||||
[4.57973, "o", "k"]
|
||||
[4.656235, "o", "3"]
|
||||
[4.763252, "o", "d"]
|
||||
[4.865396, "o", " "]
|
||||
[4.986278, "o", "c"]
|
||||
[5.051494, "o", "l"]
|
||||
[5.238737, "o", "u"]
|
||||
[5.292747, "o", "s"]
|
||||
[5.381595, "o", "t"]
|
||||
[5.503508, "o", "e"]
|
||||
[5.578881, "o", "r"]
|
||||
[5.666704, "o", " "]
|
||||
[5.766742, "o", "c"]
|
||||
[5.962787, "o", "r"]
|
||||
[6.029469, "o", "e"]
|
||||
[6.061464, "o", "a"]
|
||||
[6.184275, "o", "t"]
|
||||
[6.281805, "o", "e"]
|
||||
[6.445508, "o", " "]
|
||||
[6.666863, "o", "-"]
|
||||
[7.20248, "o", "-"]
|
||||
[7.334019, "o", "a"]
|
||||
[7.490134, "o", "g"]
|
||||
[7.566087, "o", "e"]
|
||||
[7.631634, "o", "n"]
|
||||
[7.729597, "o", "t"]
|
||||
[7.897099, "o", "s"]
|
||||
[8.049496, "o", " "]
|
||||
[8.280178, "o", "3"]
|
||||
[8.499599, "o", " "]
|
||||
[8.631147, "o", "d"]
|
||||
[8.707104, "o", "e"]
|
||||
[8.773508, "o", "m"]
|
||||
[8.91407, "o", "o"]
|
||||
[9.113612, "o", "\r\n\u001b[?2004l\r"]
|
||||
[9.132118, "o", "\u001b[36mINFO\u001b[0m[0000] Prep: Network \r\n"]
|
||||
[9.183203, "o", "\u001b[36mINFO\u001b[0m[0000] Created network 'k3d-demo' \r\n"]
|
||||
[9.187229, "o", "\u001b[36mINFO\u001b[0m[0000] Created volume 'k3d-demo-images' \r\n"]
|
||||
[10.187972, "o", "\u001b[36mINFO\u001b[0m[0001] Creating node 'k3d-demo-server-0' \r\n"]
|
||||
[10.281058, "o", "\u001b[36mINFO\u001b[0m[0001] Creating node 'k3d-demo-agent-0' \r\n"]
|
||||
[10.368708, "o", "\u001b[36mINFO\u001b[0m[0001] Creating node 'k3d-demo-agent-1' \r\n"]
|
||||
[10.455282, "o", "\u001b[36mINFO\u001b[0m[0001] Creating node 'k3d-demo-agent-2' \r\n"]
|
||||
[10.536337, "o", "\u001b[36mINFO\u001b[0m[0001] Creating LoadBalancer 'k3d-demo-serverlb' \r\n"]
|
||||
[10.609539, "o", "\u001b[36mINFO\u001b[0m[0001] Using the k3d-tools node to gather environment information \r\n"]
|
||||
[10.628592, "o", "\u001b[36mINFO\u001b[0m[0001] Starting new tools node... \r\n"]
|
||||
[10.702678, "o", "\u001b[36mINFO\u001b[0m[0001] Starting Node 'k3d-demo-tools' \r\n"]
|
||||
[11.394216, "o", "\u001b[36mINFO\u001b[0m[0002] Deleted k3d-demo-tools \r\n"]
|
||||
[11.394427, "o", "\u001b[36mINFO\u001b[0m[0002] Starting cluster 'demo' \r\n\u001b[36mINFO\u001b[0m[0002] Starting servers... \r\n"]
|
||||
[11.404635, "o", "\u001b[36mINFO\u001b[0m[0002] Starting Node 'k3d-demo-server-0' \r\n"]
|
||||
[16.378372, "o", "\u001b[36mINFO\u001b[0m[0007] Starting agents... \r\n"]
|
||||
[16.388922, "o", "\u001b[36mINFO\u001b[0m[0007] Starting Node 'k3d-demo-agent-0' \r\n"]
|
||||
[16.389848, "o", "\u001b[36mINFO\u001b[0m[0007] Starting Node 'k3d-demo-agent-1' \r\n"]
|
||||
[16.397254, "o", "\u001b[36mINFO\u001b[0m[0007] Starting Node 'k3d-demo-agent-2' \r\n"]
|
||||
[31.590126, "o", "\u001b[36mINFO\u001b[0m[0022] Starting helpers... \r\n"]
|
||||
[31.637947, "o", "\u001b[36mINFO\u001b[0m[0022] Starting Node 'k3d-demo-serverlb' \r\n"]
|
||||
[38.185432, "o", "\u001b[36mINFO\u001b[0m[0029] Trying to get IP of the docker host and inject it into the cluster as 'host.k3d.internal' for easy access \r\n"]
|
||||
[50.256861, "o", "\u001b[36mINFO\u001b[0m[0041] Cluster 'demo' created successfully! \r\n\u001b[36mINFO\u001b[0m[0041] --kubeconfig-update-default=false --> sets --kubeconfig-switch-context=false \r\n"]
|
||||
[50.295453, "o", "\u001b[36mINFO\u001b[0m[0041] You can now use it like this: \r\nkubectl config use-context k3d-demo\r\nkubectl cluster-info\r\n"]
|
||||
[50.299281, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
|
||||
[52.777117, "o", "k"]
|
||||
[52.873341, "o", "3"]
|
||||
[53.006105, "o", "d"]
|
||||
[53.147707, "o", " "]
|
||||
[53.245736, "o", "c"]
|
||||
[53.343772, "o", "l"]
|
||||
[53.551038, "o", "u"]
|
||||
[53.617941, "o", "s"]
|
||||
[53.724853, "o", "t"]
|
||||
[53.878933, "o", "e"]
|
||||
[53.956281, "o", "r"]
|
||||
[54.076303, "o", " "]
|
||||
[54.21845, "o", "l"]
|
||||
[54.339561, "o", "s"]
|
||||
[54.447647, "o", "\r\n\u001b[?2004l\r"]
|
||||
[54.47118, "o", "NAME SERVERS AGENTS LOADBALANCER\r\ndemo 1/1 3/3 true\r\n"]
|
||||
[54.472506, "o", "\u001b[?2004h"]
|
||||
[54.472562, "o", "\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
|
||||
[54.838629, "o", "k"]
|
||||
[54.918551, "o", "3"]
|
||||
[55.015846, "o", "d"]
|
||||
[55.115834, "o", " "]
|
||||
[55.290514, "o", "n"]
|
||||
[55.378089, "o", "o"]
|
||||
[55.454292, "o", "d"]
|
||||
[55.508669, "o", "e"]
|
||||
[55.869687, "o", " "]
|
||||
[56.05605, "o", "l"]
|
||||
[56.176004, "o", "s"]
|
||||
[56.31685, "o", "\r\n\u001b[?2004l\r"]
|
||||
[56.341161, "o", "NAME ROLE CLUSTER STATUS\r\nk3d-demo-agent-0 agent demo running\r\nk3d-demo-agent-1 agent demo running\r\nk3d-demo-agent-2 agent demo running\r\nk3d-demo-server-0 server demo running\r\nk3d-demo-serverlb loadbalancer demo running\r\n"]
|
||||
[56.34231, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
|
||||
[57.733293, "o", "k"]
|
||||
[57.932149, "o", "u"]
|
||||
[58.059135, "o", "b"]
|
||||
[58.137901, "o", "e"]
|
||||
[58.23908, "o", "c"]
|
||||
[58.418996, "o", "t"]
|
||||
[58.496899, "o", "l"]
|
||||
[58.687091, "o", " "]
|
||||
[58.740349, "o", "g"]
|
||||
[58.832322, "o", "e"]
|
||||
[58.955499, "o", "t"]
|
||||
[59.067944, "o", " "]
|
||||
[59.246223, "o", "n"]
|
||||
[59.344781, "o", "o"]
|
||||
[59.426918, "o", "d"]
|
||||
[59.493282, "o", "e"]
|
||||
[59.672248, "o", "s"]
|
||||
[59.772331, "o", "\r\n\u001b[?2004l\r"]
|
||||
[60.41166, "o", "NAME STATUS ROLES AGE VERSION\r\nk3d-demo-agent-2 Ready <none> 29s v1.21.4+k3s1\r\nk3d-demo-server-0 Ready control-plane,master 41s v1.21.4+k3s1\r\nk3d-demo-agent-0 Ready <none> 31s v1.21.4+k3s1\r\nk3d-demo-agent-1 Ready <none> 31s v1.21.4+k3s1\r\n"]
|
||||
[60.414302, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
|
||||
[61.301105, "o", "k"]
|
||||
[61.534792, "o", "u"]
|
||||
[61.723192, "o", "b"]
|
||||
[61.800647, "o", "e"]
|
||||
[61.912191, "o", "c"]
|
||||
[62.111433, "o", "t"]
|
||||
[62.220654, "o", "l"]
|
||||
[62.400417, "o", " "]
|
||||
[62.434071, "o", "g"]
|
||||
[62.523052, "o", "e"]
|
||||
[62.634216, "o", "t"]
|
||||
[62.700412, "o", " "]
|
||||
[62.923073, "o", "p"]
|
||||
[63.120958, "o", "o"]
|
||||
[63.231192, "o", "d"]
|
||||
[63.287011, "o", "s"]
|
||||
[63.497854, "o", " "]
|
||||
[63.642017, "o", "-"]
|
||||
[63.896056, "o", "A"]
|
||||
[64.129633, "o", "\r\n\u001b[?2004l\r"]
|
||||
[64.180813, "o", "NAMESPACE NAME READY STATUS RESTARTS AGE\r\nkube-system coredns-7448499f4d-rrmh5 1/1 Running 0 34s\r\nkube-system metrics-server-86cbb8457f-6hkns 1/1 Running 0 34s\r\nkube-system local-path-provisioner-5ff76fc89d-ltzd4 1/1 Running 0 34s\r\nkube-system helm-install-traefik-crd-st9fm 0/1 Completed 0 34s\r\nkube-system traefik-97b44b794-lgljm 0/1 ContainerCreating 0 11s\r\nkube-system helm-install-traefik-6t7fr 0/1 Completed 1 "]
|
||||
[64.181, "o", "34s\r\nkube-system svclb-traefik-wztvf 2/2 Running 0 11s\r\nkube-system svclb-traefik-ksk54 2/2 Running 0 11s\r\nkube-system svclb-traefik-s286b 2/2 Running 0 11s\r\nkube-system svclb-traefik-ksbmz 2/2 Running 0 11s\r\n"]
|
||||
[64.182931, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
|
||||
[66.050907, "o", "#"]
|
||||
[66.160953, "o", " "]
|
||||
[66.559434, "o", "P"]
|
||||
[66.768444, "o", "r"]
|
||||
[66.844975, "o", "o"]
|
||||
[67.022583, "o", "f"]
|
||||
[67.098851, "o", "i"]
|
||||
[67.286285, "o", "t"]
|
||||
[67.921864, "o", "."]
|
||||
[69.59588, "o", "\r\n\u001b[?2004l\r"]
|
||||
[69.596126, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
|
||||
[70.123764, "o", "\u001b[?2004l\r\r\nexit\r\n"]
|
18
docs/static/css/extra.css
vendored
18
docs/static/css/extra.css
vendored
@ -23,10 +23,28 @@
|
||||
position: relative;
|
||||
}
|
||||
|
||||
/* This is equal to light mode */
|
||||
[data-md-color-primary=black] .md-tabs {
|
||||
|
||||
/* Set color of the tab bar */
|
||||
background-color: #0DCEFF;
|
||||
}
|
||||
|
||||
/* Dark Mode */
|
||||
[data-md-color-scheme="slate"] .md-header {
|
||||
/* keep black backgroud of title bar (header) */
|
||||
background-color: black;
|
||||
}
|
||||
|
||||
/* Tab Bar */
|
||||
.md-tabs {
|
||||
color: black;
|
||||
}
|
||||
|
||||
.md-tabs__item {
|
||||
font-weight: bolder;
|
||||
}
|
||||
|
||||
.md-tabs__link--active {
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
@ -1,7 +1,9 @@
|
||||
title: Usage
|
||||
title: Guides
|
||||
nav:
|
||||
- commands
|
||||
- configfile.md
|
||||
- kubeconfig.md
|
||||
- multiserver.md
|
||||
- guides
|
||||
- registries.md
|
||||
- exposing_services.md
|
||||
- advanced
|
||||
- commands
|
||||
|
4
docs/usage/advanced/.pages
Normal file
4
docs/usage/advanced/.pages
Normal file
@ -0,0 +1,4 @@
|
||||
title: Advanced Guides
|
||||
nav:
|
||||
- calico.md
|
||||
- cuda.md
|
@ -20,14 +20,14 @@ Or you can directly use this [calico.yaml](calico.yaml) manifest
|
||||
|
||||
On the k3s cluster creation :
|
||||
|
||||
- add the flag `--flannel-backend=none`. For this, on k3d you need to forward this flag to k3s with the option `--k3s-server-arg`.
|
||||
- add the flag `--flannel-backend=none`. For this, on k3d you need to forward this flag to k3s with the option `--k3s-arg`.
|
||||
- mount (`--volume`) the calico descriptor in the auto deploy manifest directory of k3s `/var/lib/rancher/k3s/server/manifests/`
|
||||
|
||||
So the command of the cluster creation is (when you are at root of the k3d repository)
|
||||
|
||||
```bash
|
||||
k3d cluster create "${clustername}" \
|
||||
--k3s-server-arg '--flannel-backend=none' \
|
||||
--k3s-arg '--flannel-backend=none@server:*' \
|
||||
--volume "$(pwd)/docs/usage/guides/calico.yaml:/var/lib/rancher/k3s/server/manifests/calico.yaml"
|
||||
```
|
||||
|
@ -16,8 +16,7 @@ k3d
|
||||
-e, --env # add environment variables to the nodes (quoted string, format: 'KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]', use flag multiple times)
|
||||
--gpus # [from docker CLI] add GPU devices to the node containers (string, e.g. 'all')
|
||||
-i, --image # specify which k3s image should be used for the nodes (string, default: 'docker.io/rancher/k3s:v1.20.0-k3s2', tag changes per build)
|
||||
--k3s-agent-arg # add additional arguments to the k3s agent (quoted string, use flag multiple times) (see https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/#k3s-agent-cli-help)
|
||||
--k3s-server-arg # add additional arguments to the k3s server (quoted string, use flag multiple times) (see https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/#k3s-server-cli-help)
|
||||
--k3s-arg # add additional arguments to the k3s server/agent (quoted string, use flag multiple times) (see https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/#k3s-server-cli-help & https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/#k3s-agent-cli-help)
|
||||
--kubeconfig-switch-context # (implies --kubeconfig-update-default) automatically sets the current-context of your default kubeconfig to the new cluster's context (default: true)
|
||||
--kubeconfig-update-default # enable the automated update of the default kubeconfig with the details of the newly created cluster (also sets '--wait=true') (default: true)
|
||||
-l, --label # add (docker) labels to the node containers (format: 'KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]', use flag multiple times)
|
||||
|
@ -35,15 +35,16 @@ k3d cluster create NAME [flags]
|
||||
- Example: `k3d cluster create --agents 2 --k3s-node-label "my.label@agent:0,1" --k3s-node-label "other.label=somevalue@server:0"`
|
||||
--kubeconfig-switch-context Directly switch the default kubeconfig's current-context to the new cluster's context (requires --kubeconfig-update-default) (default true)
|
||||
--kubeconfig-update-default Directly update the default kubeconfig with the new cluster's context (default true)
|
||||
--lb-config-override strings Use dotted YAML path syntax to override nginx loadbalancer settings
|
||||
--network string Join an existing network
|
||||
--no-hostip Disable the automatic injection of the Host IP as 'host.k3d.internal' into the containers and CoreDNS
|
||||
--no-image-volume Disable the creation of a volume for importing images
|
||||
--no-lb Disable the creation of a LoadBalancer in front of the server nodes
|
||||
--no-rollback Disable the automatic rollback actions, if anything goes wrong
|
||||
-p, --port [HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER] Map ports from the node containers (via the serverlb) to the host (Format: [HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER])
|
||||
- Example: `k3d cluster create --agents 2 -p 8080:80@agent:0 -p 8081@agent:1`
|
||||
--registry-config string Specify path to an extra registries.yaml file
|
||||
--registry-create Create a k3d-managed registry and connect it to the cluster
|
||||
--registry-create NAME[:HOST][:HOSTPORT] Create a k3d-managed registry and connect it to the cluster (Format: NAME[:HOST][:HOSTPORT]
|
||||
- Example: `k3d cluster create --registry-create mycluster-registry:0.0.0.0:5432`
|
||||
--registry-use stringArray Connect to one or more k3d-managed registries running locally
|
||||
--runtime-label KEY[=VALUE][@NODEFILTER[;NODEFILTER...]] Add label to container runtime (Format: KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]
|
||||
- Example: `k3d cluster create --agents 2 --runtime-label "my.label@agent:0,1" --runtime-label "other.label=somevalue@server:0"`
|
||||
|
@ -13,8 +13,9 @@ k3d cluster delete [NAME [NAME ...] | --all] [flags]
|
||||
### Options
|
||||
|
||||
```
|
||||
-a, --all Delete all existing clusters
|
||||
-h, --help help for delete
|
||||
-a, --all Delete all existing clusters
|
||||
-c, --config string Path of a config file to use
|
||||
-h, --help help for delete
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
@ -13,15 +13,17 @@ k3d node create NAME [flags]
|
||||
### Options
|
||||
|
||||
```
|
||||
-c, --cluster string Select the cluster that the node shall connect to. (default "k3s-default")
|
||||
-c, --cluster string Cluster URL or k3d cluster name to connect to. (default "k3s-default")
|
||||
-h, --help help for create
|
||||
-i, --image string Specify k3s image used for the node(s) (default "docker.io/rancher/k3s:v1.20.0-k3s2")
|
||||
-i, --image string Specify k3s image used for the node(s) (default "docker.io/rancher/k3s:v1.21.4-k3s2")
|
||||
--k3s-node-label strings Specify k3s node labels in format "foo=bar"
|
||||
--memory string Memory limit imposed on the node [From docker]
|
||||
-n, --network strings Add node to (another) runtime network
|
||||
--replicas int Number of replicas of this node specification. (default 1)
|
||||
--role string Specify node role [server, agent] (default "agent")
|
||||
--runtime-label strings Specify container runtime labels in format "foo=bar"
|
||||
--timeout duration Maximum waiting time for '--wait' before canceling/returning.
|
||||
-t, --token string Override cluster token (required when connecting to an external cluster)
|
||||
--wait Wait for the node(s) to be ready before returning. (default true)
|
||||
```
|
||||
|
||||
|
@ -1,10 +1,9 @@
|
||||
# Config File
|
||||
# Using Config Files
|
||||
|
||||
The config file feature is **available as of k3d v4.0.0**
|
||||
|
||||
## Introduction
|
||||
|
||||
As of k3d v4.0.0, released in January 2021, k3d ships with configuration file support for the `k3d cluster create` command.
|
||||
This allows you to define all the things that you defined with CLI flags before in a nice and tidy YAML (as a Kubernetes user, we know you love it ;) ).
|
||||
|
||||
!!! info "Syntax & Semantics"
|
||||
The options defined in the config file are not 100% the same as the CLI flags.
|
||||
This concerns naming and style/usage/structure, e.g.
|
||||
@ -62,6 +61,7 @@ kubeAPI: # same as `--api-port myhost.my.domain:6445` (where the name would reso
|
||||
hostPort: "6445" # where the Kubernetes API listening port will be mapped to on your host system
|
||||
image: rancher/k3s:v1.20.4-k3s1 # same as `--image rancher/k3s:v1.20.4-k3s1`
|
||||
network: my-custom-net # same as `--network my-custom-net`
|
||||
subnet: "172.28.0.0/16" # same as `--subnet 172.28.0.0/16`
|
||||
token: superSecretToken # same as `--token superSecretToken`
|
||||
volumes: # repeatable flags are represented as YAML lists
|
||||
- volume: /my/host/path:/path/in/node # same as `--volume '/my/host/path:/path/in/node@server:0;agent:*'`
|
||||
@ -77,7 +77,10 @@ env:
|
||||
nodeFilters:
|
||||
- server:0
|
||||
registries: # define how registries should be created or used
|
||||
create: true # creates a default registry to be used with the cluster; same as `--registry-create`
|
||||
create: # creates a default registry to be used with the cluster; same as `--registry-create registry.localhost`
|
||||
name: registry.localhost
|
||||
host: "0.0.0.0"
|
||||
hostPort: "5000"
|
||||
use:
|
||||
- k3d-myotherregistry:5000 # some other k3d-managed registry; same as `--registry-use 'k3d-myotherregistry:5000'`
|
||||
config: | # define contents of the `registries.yaml` file (or reference a file); same as `--registry-config /path/to/config.yaml`
|
||||
@ -92,7 +95,9 @@ options:
|
||||
disableLoadbalancer: false # same as `--no-lb`
|
||||
disableImageVolume: false # same as `--no-image-volume`
|
||||
disableRollback: false # same as `--no-Rollback`
|
||||
disableHostIPInjection: false # same as `--no-hostip`
|
||||
loadbalancer:
|
||||
configOverrides:
|
||||
- settings.workerConnections=2048
|
||||
k3s: # options passed on to K3s itself
|
||||
extraArgs: # additional arguments passed to the `k3s server|agent` command; same as `--k3s-arg`
|
||||
- arg: --tls-san=my.host.domain
|
||||
@ -129,4 +134,4 @@ For example, you use the same config file to create three clusters which only ha
|
||||
## References
|
||||
|
||||
- k3d demo repository: <https://github.com/iwilltry42/k3d-demo/blob/main/README.md#config-file-support>
|
||||
- SUSE Blog: <https://www.suse.com/c/introduction-k3d-run-k3s-docker-src/> (Search fo `The “Configuration as Code” Way`)
|
||||
- SUSE Blog: <https://www.suse.com/c/introduction-k3d-run-k3s-docker-src/> (Search for `The “Configuration as Code” Way`)
|
||||
|
@ -1,6 +0,0 @@
|
||||
title: Guides
|
||||
nav:
|
||||
- exposing_services.md
|
||||
- registries.md
|
||||
- calico.md
|
||||
- cuda.md
|
@ -1,12 +1,12 @@
|
||||
# Creating multi-server clusters
|
||||
|
||||
!!! info "Important note"
|
||||
For the best results (and less unexpected issues), choose 1, 3, 5, ... server nodes.
|
||||
For the best results (and less unexpected issues), choose 1, 3, 5, ... server nodes. (Read more on etcd quorum on [etcd.io](https://etcd.io/docs/v3.3/faq/#why-an-odd-number-of-cluster-members))
|
||||
At least 2 cores and 4GiB of RAM are recommended.
|
||||
|
||||
## Embedded etcd (old: dqlite)
|
||||
## Embedded etcd
|
||||
|
||||
Create a cluster with 3 server nodes using k3s' embedded etcd (old: dqlite) database.
|
||||
Create a cluster with 3 server nodes using k3s' embedded etcd database.
|
||||
The first server to be created will use the `--cluster-init` flag and k3d will wait for it to be up and running before creating (and connecting) the other server nodes.
|
||||
|
||||
```bash
|
||||
@ -23,4 +23,4 @@ k3d node create newserver --cluster multiserver --role server
|
||||
|
||||
!!! important "There's a trap!"
|
||||
If your cluster was initially created with only a single server node, then this will fail.
|
||||
That's because the initial server node was not started with the `--cluster-init` flag and thus is not using the etcd (old: dqlite) backend.
|
||||
That's because the initial server node was not started with the `--cluster-init` flag and thus is not using the etcd backend.
|
||||
|
@ -1,13 +1,10 @@
|
||||
# Registries
|
||||
# Using Image Registries
|
||||
|
||||
## Registries configuration file
|
||||
|
||||
You can add registries by specifying them in a `registries.yaml` and referencing it at creation time:
|
||||
`#!bash k3d cluster create mycluster --registry-config "/home/YOU/my-registries.yaml"`.
|
||||
|
||||
??? Tip "Pre v4.0.0 solution"
|
||||
Before we added the `--registry-config` flag in k3d v4.0.0, you had to bind-mount the file to the correct location: `--volume "/home/YOU/my-registries.yaml:/etc/rancher/k3s/registries.yaml"`
|
||||
|
||||
This file is a regular [k3s registries configuration file](https://rancher.com/docs/k3s/latest/en/installation/private-registry/), and looks like this:
|
||||
|
||||
```yaml
|
||||
@ -17,10 +14,7 @@ mirrors:
|
||||
- http://my.company.registry:5000
|
||||
```
|
||||
|
||||
In this example, an image with a name like `my.company.registry:5000/nginx:latest` would be
|
||||
_pulled_ from the registry running at `http://my.company.registry:5000`.
|
||||
|
||||
Note well there is an important limitation: **this configuration file will only work with k3s >= v0.10.0**. It will fail silently with previous versions of k3s, but you find in the [section below](#k3s-old) an alternative solution.
|
||||
In this example, an image with a name like `my.company.registry:5000/nginx:latest` would be _pulled_ from the registry running at `http://my.company.registry:5000`.
|
||||
|
||||
This file can also be used for providing additional information necessary for accessing some registries, like [authentication](#authenticated-registries) and [certificates](#secure-registries).
|
||||
|
||||
@ -35,7 +29,8 @@ name: test
|
||||
servers: 1
|
||||
agents: 2
|
||||
registries:
|
||||
create: true
|
||||
create:
|
||||
name: myregistry
|
||||
config: |
|
||||
mirrors:
|
||||
"my.company.registry":
|
||||
@ -43,7 +38,7 @@ registries:
|
||||
- http://my.company.registry:5000
|
||||
```
|
||||
|
||||
Here, the config for the k3d-managed registry, created by the `create: true` flag will be merged with the config specified under `config: |`.
|
||||
Here, the config for the k3d-managed registry, created by the `create: {...}` option will be merged with the config specified under `config: |`.
|
||||
|
||||
### Authenticated registries
|
||||
|
||||
@ -95,24 +90,21 @@ k3d cluster create \
|
||||
|
||||
### Using k3d-managed registries
|
||||
|
||||
!!! info "Just ported!"
|
||||
The k3d-managed registry is available again as of k3d v4.0.0 (January 2021)
|
||||
|
||||
#### Create a dedicated registry together with your cluster
|
||||
|
||||
1. `#!bash k3d cluster create mycluster --registry-create`: This creates your cluster `mycluster` together with a registry container called `k3d-mycluster-registry`
|
||||
1. `#!bash k3d cluster create mycluster --registry-create mycluster-registry`: This creates your cluster `mycluster` together with a registry container called `mycluster-registry`
|
||||
|
||||
- k3d sets everything up in the cluster for containerd to be able to pull images from that registry (using the `registries.yaml` file)
|
||||
- the port, which the registry is listening on will be mapped to a random port on your host system
|
||||
|
||||
2. Check the k3d command output or `#!bash docker ps -f name=k3d-mycluster-registry` to find the exposed port (let's use `12345` here)
|
||||
3. Pull some image (optional) `#!bash docker pull alpine:latest`, re-tag it to reference your newly created registry `#!bash docker tag alpine:latest k3d-mycluster-registry:12345/testimage:local` and push it `#!bash docker push k3d-mycluster-registry:12345/testimage:local`
|
||||
4. Use kubectl to create a new pod in your cluster using that image to see, if the cluster can pull from the new registry: `#!bash kubectl run --image k3d-mycluster-registry:12345/testimage:local testimage --command -- tail -f /dev/null` (creates a container that will not do anything but keep on running)
|
||||
2. Check the k3d command output or `#!bash docker ps -f name=mycluster-registry` to find the exposed port (let's use `12345` here)
|
||||
3. Pull some image (optional) `#!bash docker pull alpine:latest`, re-tag it to reference your newly created registry `#!bash docker tag alpine:latest mycluster-registry:12345/testimage:local` and push it `#!bash docker push mycluster-registry:12345/testimage:local`
|
||||
4. Use kubectl to create a new pod in your cluster using that image to see, if the cluster can pull from the new registry: `#!bash kubectl run --image mycluster-registry:12345/testimage:local testimage --command -- tail -f /dev/null` (creates a container that will not do anything but keep on running)
|
||||
|
||||
#### Create a customized k3d-managed registry
|
||||
|
||||
1. `#!bash k3d registry create myregistry.localhost --port 12345` creates a new registry called `k3d-myregistry.localhost` (could be used with automatic resolution of `*.localhost`, see next section - also, **note the `k3d-` prefix** that k3d adds to all resources it creates)
|
||||
2. `#!bash k3d cluster create newcluster --registry-use k3d-myregistry.localhost:12345` (make sure you use the **`k3d-` prefix** here) creates a new cluster set up to us that registry
|
||||
2. `#!bash k3d cluster create newcluster --registry-use k3d-myregistry.localhost:12345` (make sure you use the **`k3d-` prefix** here) creates a new cluster set up to use that registry
|
||||
3. continue with step 3 and 4 from the last section for testing
|
||||
|
||||
<!-- Admonition to describe usage of a non-k3d-managed registry -->
|
||||
@ -129,13 +121,13 @@ k3d cluster create \
|
||||
docker container run -d --name registry.localhost -v local_registry:/var/lib/registry --restart always -p 5000:5000 registry:2
|
||||
```
|
||||
|
||||
These commands will start your registry in `registry.localhost:5000`. In order to push to this registry, you will need to make it accessible as described in the next section.
|
||||
These commands will start your registry container with name and port `registry.localhost:5000`. In order to push to this registry, you will need to make it accessible as described in the next section.
|
||||
Once your registry is up and running, we will need to add it to your `registries.yaml` configuration file.
|
||||
Finally, you have to connect the registry network to the k3d cluster network: `#!bash docker network connect k3d-k3s-default registry.localhost`. And then you can [test your local registry](#testing-your-registry).
|
||||
|
||||
### Pushing to your local registry address
|
||||
|
||||
As per the guide above, the registry will be available at `registry.localhost:5000`.
|
||||
As per the guide above, the registry will be available as `registry.localhost:5000`.
|
||||
|
||||
All the nodes in your k3d cluster can resolve this hostname (thanks to the DNS server provided by the Docker daemon) but, in order to be able to push to this registry, this hostname also has to be resolved by your host.
|
||||
|
||||
@ -150,7 +142,9 @@ If your system does not provide/support tools that can auto-resolve specific nam
|
||||
127.0.0.1 k3d-registry.localhost
|
||||
```
|
||||
|
||||
Once again, this will only work with k3s >= v0.10.0 (see the some sections below when using k3s <= v0.9.1)
|
||||
!!! info "Just use localhost"
|
||||
Alternatively, if you don't care about pretty names, just push directly to `localhost:5000` (or whatever port you used) and it will work.
|
||||
If you later pull the image from the registry, only the repository path (e.g. `myrepo/myimage:mytag` in `registry.localhost:5000/myrepo/myimage:mytag`) matters to find your image in the targeted registry.
|
||||
|
||||
## Testing your registry
|
||||
|
||||
@ -199,44 +193,3 @@ EOF
|
||||
```
|
||||
|
||||
Then you should check that the pod is running with `kubectl get pods -l "app=nginx-test-registry"`.
|
||||
|
||||
## Configuring registries for k3s <= v0.9.1
|
||||
|
||||
k3s servers below v0.9.1 do not recognize the `registries.yaml` file as described in the in the beginning, so you will need to embed the contents of that file in a `containerd` configuration file.
|
||||
You will have to create your own `containerd` configuration file at some well-known path like `${HOME}/.k3d/config.toml.tmpl`, like this:
|
||||
|
||||
??? registriesprev091 "config.toml.tmpl"
|
||||
|
||||
```toml
|
||||
# Original section: no changes
|
||||
[plugins.opt]
|
||||
path = "{{ .NodeConfig.Containerd.Opt }}"
|
||||
[plugins.cri]
|
||||
stream_server_address = "{{ .NodeConfig.AgentConfig.NodeName }}"
|
||||
stream_server_port = "10010"
|
||||
{{- if .IsRunningInUserNS }}
|
||||
disable_cgroup = true
|
||||
disable_apparmor = true
|
||||
restrict_oom_score_adj = true
|
||||
{{ end -}}
|
||||
{{- if .NodeConfig.AgentConfig.PauseImage }}
|
||||
sandbox_image = "{{ .NodeConfig.AgentConfig.PauseImage }}"
|
||||
{{ end -}}
|
||||
{{- if not .NodeConfig.NoFlannel }}
|
||||
[plugins.cri.cni]
|
||||
bin_dir = "{{ .NodeConfig.AgentConfig.CNIBinDir }}"
|
||||
conf_dir = "{{ .NodeConfig.AgentConfig.CNIConfDir }}"
|
||||
{{ end -}}
|
||||
|
||||
# Added section: additional registries and the endpoints
|
||||
[plugins.cri.registry.mirrors]
|
||||
[plugins.cri.registry.mirrors."<b>registry.localhost:5000</b>"]
|
||||
endpoint = ["http://<b>registry.localhost:5000</b>"]
|
||||
```
|
||||
|
||||
and then mount it at `/var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl` (where `containerd` in your k3d nodes will load it) when creating the k3d cluster:
|
||||
|
||||
```bash
|
||||
k3d cluster create mycluster \
|
||||
--volume ${HOME}/.k3d/config.toml.tmpl:/var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl
|
||||
```
|
85
go.mod
85
go.mod
@ -1,16 +1,15 @@
|
||||
module github.com/rancher/k3d/v4
|
||||
module github.com/rancher/k3d/v5
|
||||
|
||||
go 1.16
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3 // indirect
|
||||
github.com/Microsoft/hcsshim v0.8.14 // indirect
|
||||
github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68 // indirect
|
||||
github.com/containerd/containerd v1.4.4 // indirect
|
||||
github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e // indirect
|
||||
github.com/docker/cli v20.10.7+incompatible
|
||||
github.com/docker/docker v20.10.7+incompatible
|
||||
github.com/docker/docker-credential-helpers v0.6.3 // indirect
|
||||
github.com/Microsoft/go-winio v0.4.17 // indirect
|
||||
github.com/Microsoft/hcsshim v0.8.18 // indirect
|
||||
github.com/containerd/cgroups v1.0.1 // indirect
|
||||
github.com/containerd/containerd v1.5.5
|
||||
github.com/docker/cli v20.10.8+incompatible
|
||||
github.com/docker/docker v20.10.8+incompatible
|
||||
github.com/docker/docker-credential-helpers v0.6.4 // indirect
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/go-units v0.4.0
|
||||
@ -37,8 +36,68 @@ require (
|
||||
golang.org/x/text v0.3.6 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gotest.tools v2.2.0+incompatible
|
||||
gotest.tools/v3 v3.0.3 // indirect
|
||||
inet.af/netaddr v0.0.0-20210421205553-78c777480f22
|
||||
k8s.io/client-go v0.21.0
|
||||
inet.af/netaddr v0.0.0-20210903134321-85fa6c94624e
|
||||
k8s.io/client-go v0.22.1
|
||||
sigs.k8s.io/yaml v1.2.0
|
||||
)
|
||||
|
||||
require github.com/spf13/pflag v1.0.5
|
||||
|
||||
require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.7.1+incompatible // indirect
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
||||
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
|
||||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
||||
github.com/go-logr/logr v0.4.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.5 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/gorilla/mux v1.7.3 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.11 // indirect
|
||||
github.com/magiconair/properties v1.8.5 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/miekg/pkcs11 v1.0.3 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.1 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/sys/mountinfo v0.4.1 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
||||
github.com/pelletier/go-toml v1.9.3 // indirect
|
||||
github.com/prometheus/client_golang v1.7.1 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.10.0 // indirect
|
||||
github.com/prometheus/procfs v0.6.0 // indirect
|
||||
github.com/spf13/afero v1.6.0 // indirect
|
||||
github.com/spf13/cast v1.3.1 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/subosito/gotenv v1.2.0 // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go4.org/intern v0.0.0-20210108033219-3eb7198706b2 // indirect
|
||||
go4.org/unsafe/assume-no-moving-gc v0.0.0-20201222180813-1025295fd063 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 // indirect
|
||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 // indirect
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c // indirect
|
||||
google.golang.org/grpc v1.38.0 // indirect
|
||||
google.golang.org/protobuf v1.26.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.62.0 // indirect
|
||||
k8s.io/apimachinery v0.22.1 // indirect
|
||||
k8s.io/klog/v2 v2.9.0 // indirect
|
||||
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect
|
||||
)
|
||||
|
2
main.go
2
main.go
@ -21,7 +21,7 @@ THE SOFTWARE.
|
||||
*/
|
||||
package main
|
||||
|
||||
import "github.com/rancher/k3d/v4/cmd"
|
||||
import "github.com/rancher/k3d/v5/cmd"
|
||||
|
||||
func main() {
|
||||
cmd.Execute()
|
||||
|
27
manifest.tmpl
Normal file
27
manifest.tmpl
Normal file
@ -0,0 +1,27 @@
|
||||
image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}{{else}}latest{{/if}}
|
||||
{{#if build.tags}}
|
||||
tags:
|
||||
{{#each build.tags}}
|
||||
- {{this}}
|
||||
{{/each}}
|
||||
{{/if}}
|
||||
manifests:
|
||||
- image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-amd64
|
||||
platform:
|
||||
architecture: amd64
|
||||
os: linux
|
||||
- image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm64
|
||||
platform:
|
||||
variant: v8
|
||||
architecture: arm64
|
||||
os: linux
|
||||
- image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm
|
||||
platform:
|
||||
variant: v7
|
||||
architecture: arm
|
||||
os: linux
|
||||
- image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm
|
||||
platform:
|
||||
variant: v6
|
||||
architecture: arm
|
||||
os: linux
|
18
mkdocs.yml
18
mkdocs.yml
@ -25,7 +25,11 @@ theme:
|
||||
name: material
|
||||
language: en
|
||||
features:
|
||||
- tabs
|
||||
- navigation.top # show back to top button
|
||||
- search.suggest # search suggestions: https://squidfunk.github.io/mkdocs-material/setup/setting-up-site-search/#search-suggestions
|
||||
- search.highlight # highlight search term on target page: https://squidfunk.github.io/mkdocs-material/setup/setting-up-site-search/#search-suggestions
|
||||
- navigation.expand
|
||||
- navigation.tabs
|
||||
palette:
|
||||
- media: "(prefers-color-scheme: light)"
|
||||
scheme: default
|
||||
@ -71,6 +75,16 @@ plugins:
|
||||
type: date
|
||||
- awesome-pages # https://squidfunk.github.io/mkdocs-material/plugins/awesome-pages/
|
||||
- include-markdown # https://github.com/mondeja/mkdocs-include-markdown-plugin
|
||||
- mike: # Versioned Docs: https://github.com/jimporter/mike
|
||||
version_selector: true # set to false to leave out the version selector
|
||||
css_dir: static/css # the directory to put the version selector's CSS
|
||||
javascript_dir: static/js # the directory to put the version selector's JS
|
||||
canonical_version: null # the version for <link rel="canonical">; `null` uses the version specified via `mike deploy`
|
||||
|
||||
# Extra mkdocs-material settings
|
||||
extra:
|
||||
version:
|
||||
provider: mike
|
||||
|
||||
# Other Settings
|
||||
strict: true # halt processing when a warning is raised
|
||||
strict: true # halt processing when a warning is raised
|
||||
|
@ -22,11 +22,16 @@ THE SOFTWARE.
|
||||
package actions
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
)
|
||||
|
||||
type WriteFileAction struct {
|
||||
@ -39,3 +44,35 @@ type WriteFileAction struct {
|
||||
func (act WriteFileAction) Run(ctx context.Context, node *k3d.Node) error {
|
||||
return act.Runtime.WriteToNode(ctx, act.Content, act.Dest, act.Mode, node)
|
||||
}
|
||||
|
||||
type RewriteFileAction struct {
|
||||
Runtime runtimes.Runtime
|
||||
Path string
|
||||
RewriteFunc func([]byte) ([]byte, error)
|
||||
Mode os.FileMode
|
||||
}
|
||||
|
||||
func (act RewriteFileAction) Run(ctx context.Context, node *k3d.Node) error {
|
||||
reader, err := act.Runtime.ReadFromNode(ctx, act.Path, node)
|
||||
if err != nil {
|
||||
return fmt.Errorf("runtime failed to read '%s' from node '%s': %w", act.Path, node.Name, err)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
file, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read file: %w", err)
|
||||
}
|
||||
|
||||
file = bytes.Trim(file[512:], "\x00") // trim control characters, etc.
|
||||
|
||||
file, err = act.RewriteFunc(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while rewriting %s in %s: %w", act.Path, node.Name, err)
|
||||
}
|
||||
|
||||
l.Log().Tracef("Rewritten:\n%s", string(file))
|
||||
|
||||
return act.Runtime.WriteToNode(ctx, file, act.Path, act.Mode, node)
|
||||
|
||||
}
|
||||
|
@ -22,13 +22,16 @@ THE SOFTWARE.
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
_ "embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
gort "runtime"
|
||||
@ -36,16 +39,17 @@ import (
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/imdario/mergo"
|
||||
copystruct "github.com/mitchellh/copystructure"
|
||||
"github.com/rancher/k3d/v4/pkg/actions"
|
||||
config "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
|
||||
k3drt "github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes/docker"
|
||||
runtimeErr "github.com/rancher/k3d/v4/pkg/runtimes/errors"
|
||||
"github.com/rancher/k3d/v4/pkg/types"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v4/pkg/types/k3s"
|
||||
"github.com/rancher/k3d/v4/pkg/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/rancher/k3d/v5/pkg/actions"
|
||||
config "github.com/rancher/k3d/v5/pkg/config/v1alpha3"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
k3drt "github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes/docker"
|
||||
runtimeErr "github.com/rancher/k3d/v5/pkg/runtimes/errors"
|
||||
"github.com/rancher/k3d/v5/pkg/types"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/rancher/k3d/v5/pkg/types/k3s"
|
||||
"github.com/rancher/k3d/v5/pkg/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
@ -59,6 +63,9 @@ func ClusterRun(ctx context.Context, runtime k3drt.Runtime, clusterConfig *confi
|
||||
return fmt.Errorf("Failed Cluster Preparation: %+v", err)
|
||||
}
|
||||
|
||||
// Create tools-node for later steps
|
||||
go EnsureToolsNode(ctx, runtime, &clusterConfig.Cluster)
|
||||
|
||||
/*
|
||||
* Step 1: Create Containers
|
||||
*/
|
||||
@ -69,15 +76,20 @@ func ClusterRun(ctx context.Context, runtime k3drt.Runtime, clusterConfig *confi
|
||||
/*
|
||||
* Step 2: Pre-Start Configuration
|
||||
*/
|
||||
// TODO: ClusterRun: add cluster configuration step here
|
||||
envInfo, err := GatherEnvironmentInfo(ctx, runtime, &clusterConfig.Cluster)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to gather environment information used for cluster creation: %w", err)
|
||||
}
|
||||
|
||||
/*
|
||||
* Step 3: Start Containers
|
||||
*/
|
||||
if err := ClusterStart(ctx, runtime, &clusterConfig.Cluster, k3d.ClusterStartOpts{
|
||||
WaitForServer: clusterConfig.ClusterCreateOpts.WaitForServer,
|
||||
Timeout: clusterConfig.ClusterCreateOpts.Timeout, // TODO: here we should consider the time used so far
|
||||
NodeHooks: clusterConfig.ClusterCreateOpts.NodeHooks,
|
||||
WaitForServer: clusterConfig.ClusterCreateOpts.WaitForServer,
|
||||
Timeout: clusterConfig.ClusterCreateOpts.Timeout, // TODO: here we should consider the time used so far
|
||||
NodeHooks: clusterConfig.ClusterCreateOpts.NodeHooks,
|
||||
EnvironmentInfo: envInfo,
|
||||
Intent: k3d.IntentClusterCreate,
|
||||
}); err != nil {
|
||||
return fmt.Errorf("Failed Cluster Start: %+v", err)
|
||||
}
|
||||
@ -89,19 +101,10 @@ func ClusterRun(ctx context.Context, runtime k3drt.Runtime, clusterConfig *confi
|
||||
* Additional Cluster Preparation *
|
||||
**********************************/
|
||||
|
||||
/*
|
||||
* Networking Magic
|
||||
*/
|
||||
|
||||
// add /etc/hosts and CoreDNS entry for host.k3d.internal, referring to the host system
|
||||
if !clusterConfig.ClusterCreateOpts.PrepDisableHostIPInjection {
|
||||
prepInjectHostIP(ctx, runtime, &clusterConfig.Cluster)
|
||||
}
|
||||
|
||||
// create the registry hosting configmap
|
||||
if len(clusterConfig.ClusterCreateOpts.Registries.Use) > 0 {
|
||||
if err := prepCreateLocalRegistryHostingConfigMap(ctx, runtime, &clusterConfig.Cluster); err != nil {
|
||||
log.Warnf("Failed to create LocalRegistryHosting ConfigMap: %+v", err)
|
||||
l.Log().Warnf("Failed to create LocalRegistryHosting ConfigMap: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -148,14 +151,14 @@ func ClusterPrep(ctx context.Context, runtime k3drt.Runtime, clusterConfig *conf
|
||||
|
||||
// Ensure referenced registries
|
||||
for _, reg := range clusterConfig.ClusterCreateOpts.Registries.Use {
|
||||
log.Debugf("Trying to find registry %s", reg.Host)
|
||||
l.Log().Debugf("Trying to find registry %s", reg.Host)
|
||||
regNode, err := runtime.GetNode(ctx, &k3d.Node{Name: reg.Host})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to find registry node '%s': %+v", reg.Host, err)
|
||||
}
|
||||
regFromNode, err := RegistryFromNode(regNode)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to translate node to registry spec: %w", err)
|
||||
}
|
||||
*reg = *regFromNode
|
||||
}
|
||||
@ -173,7 +176,7 @@ func ClusterPrep(ctx context.Context, runtime k3drt.Runtime, clusterConfig *conf
|
||||
}
|
||||
|
||||
// Use existing registries (including the new one, if created)
|
||||
log.Tracef("Using Registries: %+v", clusterConfig.ClusterCreateOpts.Registries.Use)
|
||||
l.Log().Tracef("Using Registries: %+v", clusterConfig.ClusterCreateOpts.Registries.Use)
|
||||
|
||||
var registryConfig *k3s.Registry
|
||||
|
||||
@ -200,7 +203,7 @@ func ClusterPrep(ctx context.Context, runtime k3drt.Runtime, clusterConfig *conf
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to generate LocalRegistryHosting configmap: %+v", err)
|
||||
}
|
||||
log.Tracef("Writing LocalRegistryHosting YAML:\n%s", string(regCm))
|
||||
l.Log().Tracef("Writing LocalRegistryHosting YAML:\n%s", string(regCm))
|
||||
clusterConfig.ClusterCreateOpts.NodeHooks = append(clusterConfig.ClusterCreateOpts.NodeHooks, k3d.NodeHook{
|
||||
Stage: k3d.LifecycleStagePreStart,
|
||||
Action: actions.WriteFileAction{
|
||||
@ -220,7 +223,7 @@ func ClusterPrep(ctx context.Context, runtime k3drt.Runtime, clusterConfig *conf
|
||||
if err := RegistryMergeConfig(ctx, registryConfig, clusterConfig.ClusterCreateOpts.Registries.Config); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Tracef("Merged registry config: %+v", registryConfig)
|
||||
l.Log().Tracef("Merged registry config: %+v", registryConfig)
|
||||
} else {
|
||||
registryConfig = clusterConfig.ClusterCreateOpts.Registries.Config
|
||||
}
|
||||
@ -247,7 +250,7 @@ func ClusterPrep(ctx context.Context, runtime k3drt.Runtime, clusterConfig *conf
|
||||
|
||||
// ClusterPrepNetwork creates a new cluster network, if needed or sets everything up to re-use an existing network
|
||||
func ClusterPrepNetwork(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, clusterCreateOpts *k3d.ClusterCreateOpts) error {
|
||||
log.Infoln("Prep: Network")
|
||||
l.Log().Infoln("Prep: Network")
|
||||
|
||||
// error out if external cluster network should be used but no name was set
|
||||
if cluster.Network.Name == "" && cluster.Network.External {
|
||||
@ -273,8 +276,7 @@ func ClusterPrepNetwork(ctx context.Context, runtime k3drt.Runtime, cluster *k3d
|
||||
// create cluster network or use an existing one
|
||||
network, networkExists, err := runtime.CreateNetworkIfNotPresent(ctx, &cluster.Network)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create cluster network")
|
||||
return err
|
||||
return fmt.Errorf("failed to create cluster network: %w", err)
|
||||
}
|
||||
cluster.Network = *network
|
||||
clusterCreateOpts.GlobalLabels[k3d.LabelNetworkID] = network.ID
|
||||
@ -282,7 +284,7 @@ func ClusterPrepNetwork(ctx context.Context, runtime k3drt.Runtime, cluster *k3d
|
||||
clusterCreateOpts.GlobalLabels[k3d.LabelNetworkIPRange] = cluster.Network.IPAM.IPPrefix.String()
|
||||
clusterCreateOpts.GlobalLabels[k3d.LabelNetworkExternal] = strconv.FormatBool(cluster.Network.External)
|
||||
if networkExists {
|
||||
log.Infof("Re-using existing network '%s' (%s)", network.Name, network.ID)
|
||||
l.Log().Infof("Re-using existing network '%s' (%s)", network.Name, network.ID)
|
||||
clusterCreateOpts.GlobalLabels[k3d.LabelNetworkExternal] = "true" // if the network wasn't created, we say that it's managed externally (important for cluster deletion)
|
||||
}
|
||||
|
||||
@ -296,11 +298,11 @@ func ClusterPrepImageVolume(ctx context.Context, runtime k3drt.Runtime, cluster
|
||||
*/
|
||||
imageVolumeName := fmt.Sprintf("%s-%s-images", k3d.DefaultObjectNamePrefix, cluster.Name)
|
||||
if err := runtime.CreateVolume(ctx, imageVolumeName, map[string]string{k3d.LabelClusterName: cluster.Name}); err != nil {
|
||||
log.Errorf("Failed to create image volume '%s' for cluster '%s'", imageVolumeName, cluster.Name)
|
||||
return err
|
||||
return fmt.Errorf("failed to create image volume '%s' for cluster '%s': %w", imageVolumeName, cluster.Name, err)
|
||||
}
|
||||
|
||||
clusterCreateOpts.GlobalLabels[k3d.LabelImageVolume] = imageVolumeName
|
||||
cluster.ImageVolume = imageVolumeName
|
||||
|
||||
// attach volume to nodes
|
||||
for _, node := range cluster.Nodes {
|
||||
@ -314,7 +316,7 @@ func ClusterPrepImageVolume(ctx context.Context, runtime k3drt.Runtime, cluster
|
||||
// - a docker network
|
||||
func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, clusterCreateOpts *k3d.ClusterCreateOpts) error {
|
||||
|
||||
log.Tracef(`
|
||||
l.Log().Tracef(`
|
||||
===== Creating Cluster =====
|
||||
|
||||
Runtime:
|
||||
@ -345,16 +347,16 @@ ClusterCreatOpts:
|
||||
*/
|
||||
if cluster.KubeAPI.Host == k3d.DefaultAPIHost && runtime == k3drt.Docker {
|
||||
if gort.GOOS == "windows" || gort.GOOS == "darwin" {
|
||||
log.Tracef("Running on %s: checking if it's using docker-machine", gort.GOOS)
|
||||
l.Log().Tracef("Running on %s: checking if it's using docker-machine", gort.GOOS)
|
||||
machineIP, err := runtime.(docker.Docker).GetDockerMachineIP()
|
||||
if err != nil {
|
||||
log.Warnf("Using docker-machine, but failed to get it's IP: %+v", err)
|
||||
l.Log().Warnf("Using docker-machine, but failed to get it's IP: %+v", err)
|
||||
} else if machineIP != "" {
|
||||
log.Infof("Using the docker-machine IP %s to connect to the Kubernetes API", machineIP)
|
||||
l.Log().Infof("Using the docker-machine IP %s to connect to the Kubernetes API", machineIP)
|
||||
cluster.KubeAPI.Host = machineIP
|
||||
cluster.KubeAPI.Binding.HostIP = machineIP
|
||||
} else {
|
||||
log.Traceln("Not using docker-machine")
|
||||
l.Log().Traceln("Not using docker-machine")
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -378,7 +380,7 @@ ClusterCreatOpts:
|
||||
// connection url is always the name of the first server node (index 0) // TODO: change this to the server loadbalancer
|
||||
connectionURL := fmt.Sprintf("https://%s:%s", GenerateNodeName(cluster.Name, k3d.ServerRole, 0), k3d.DefaultAPIPort)
|
||||
clusterCreateOpts.GlobalLabels[k3d.LabelClusterURL] = connectionURL
|
||||
clusterCreateOpts.GlobalEnv = append(clusterCreateOpts.GlobalEnv, fmt.Sprintf("K3S_TOKEN=%s", cluster.Token))
|
||||
clusterCreateOpts.GlobalEnv = append(clusterCreateOpts.GlobalEnv, fmt.Sprintf("%s=%s", k3s.EnvClusterToken, cluster.Token))
|
||||
|
||||
nodeSetup := func(node *k3d.Node) error {
|
||||
// cluster specific settings
|
||||
@ -400,7 +402,7 @@ ClusterCreatOpts:
|
||||
if cluster.Network.IPAM.Managed {
|
||||
ip, err := GetIP(ctx, runtime, &cluster.Network)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to find free IP in network %s: %w", cluster.Network.Name, err)
|
||||
}
|
||||
cluster.Network.IPAM.IPsUsed = append(cluster.Network.IPAM.IPsUsed, ip) // make sure that we're not reusing the same IP next time
|
||||
node.IP.Static = true
|
||||
@ -412,12 +414,12 @@ ClusterCreatOpts:
|
||||
|
||||
// the cluster has an init server node, but its not this one, so connect it to the init node
|
||||
if cluster.InitNode != nil && !node.ServerOpts.IsInit {
|
||||
node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", connectionURL))
|
||||
node.Env = append(node.Env, fmt.Sprintf("%s=%s", k3s.EnvClusterConnectURL, connectionURL))
|
||||
node.RuntimeLabels[k3d.LabelServerIsInit] = "false" // set label, that this server node is not the init server
|
||||
}
|
||||
|
||||
} else if node.Role == k3d.AgentRole {
|
||||
node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", connectionURL))
|
||||
node.Env = append(node.Env, fmt.Sprintf("%s=%s", k3s.EnvClusterConnectURL, connectionURL))
|
||||
}
|
||||
|
||||
node.Networks = []string{cluster.Network.Name}
|
||||
@ -425,12 +427,11 @@ ClusterCreatOpts:
|
||||
node.GPURequest = clusterCreateOpts.GPURequest
|
||||
|
||||
// create node
|
||||
log.Infof("Creating node '%s'", node.Name)
|
||||
l.Log().Infof("Creating node '%s'", node.Name)
|
||||
if err := NodeCreate(clusterCreateCtx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
|
||||
log.Errorln("Failed to create node")
|
||||
return err
|
||||
return fmt.Errorf("failed to create node: %w", err)
|
||||
}
|
||||
log.Debugf("Created node '%s'", node.Name)
|
||||
l.Log().Debugf("Created node '%s'", node.Name)
|
||||
|
||||
// start node
|
||||
//return NodeStart(clusterCreateCtx, runtime, node, k3d.NodeStartOpts{PreStartActions: clusterCreateOpts.NodeHookActions})
|
||||
@ -442,7 +443,7 @@ ClusterCreatOpts:
|
||||
|
||||
// create init node first
|
||||
if cluster.InitNode != nil {
|
||||
log.Infoln("Creating initializing server node")
|
||||
l.Log().Infoln("Creating initializing server node")
|
||||
cluster.InitNode.Args = append(cluster.InitNode.Args, "--cluster-init")
|
||||
if cluster.InitNode.RuntimeLabels == nil {
|
||||
cluster.InitNode.RuntimeLabels = map[string]string{}
|
||||
@ -458,7 +459,7 @@ ClusterCreatOpts:
|
||||
}
|
||||
|
||||
if err := nodeSetup(cluster.InitNode); err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed init node setup: %w", err)
|
||||
}
|
||||
serverCount++
|
||||
|
||||
@ -486,14 +487,14 @@ ClusterCreatOpts:
|
||||
}
|
||||
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
|
||||
if err := nodeSetup(node); err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed setup of server/agent node %s: %w", node.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WARN, if there are exactly two server nodes: that means we're using etcd, but don't have fault tolerance
|
||||
if serverCount == 2 {
|
||||
log.Warnln("You're creating 2 server nodes: Please consider creating at least 3 to achieve etcd quorum & fault tolerance")
|
||||
l.Log().Warnln("You're creating 2 server nodes: Please consider creating at least 3 to achieve etcd quorum & fault tolerance")
|
||||
}
|
||||
|
||||
/*
|
||||
@ -502,9 +503,10 @@ ClusterCreatOpts:
|
||||
// *** ServerLoadBalancer ***
|
||||
if !clusterCreateOpts.DisableLoadBalancer {
|
||||
if cluster.ServerLoadBalancer == nil {
|
||||
l.Log().Infof("No loadbalancer specified, creating a default one...")
|
||||
lbNode, err := LoadbalancerPrepare(ctx, runtime, cluster, &k3d.LoadbalancerCreateOpts{Labels: clusterCreateOpts.GlobalLabels})
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to prepare loadbalancer: %w", err)
|
||||
}
|
||||
cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback
|
||||
}
|
||||
@ -522,7 +524,7 @@ ClusterCreatOpts:
|
||||
// prepare to write config to lb container
|
||||
configyaml, err := yaml.Marshal(cluster.ServerLoadBalancer.Config)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to marshal loadbalancer config: %w", err)
|
||||
}
|
||||
|
||||
writeLbConfigAction := k3d.NodeHook{
|
||||
@ -537,12 +539,11 @@ ClusterCreatOpts:
|
||||
|
||||
cluster.ServerLoadBalancer.Node.HookActions = append(cluster.ServerLoadBalancer.Node.HookActions, writeLbConfigAction)
|
||||
|
||||
log.Infof("Creating LoadBalancer '%s'", cluster.ServerLoadBalancer.Node.Name)
|
||||
l.Log().Infof("Creating LoadBalancer '%s'", cluster.ServerLoadBalancer.Node.Name)
|
||||
if err := NodeCreate(ctx, runtime, cluster.ServerLoadBalancer.Node, k3d.NodeCreateOpts{}); err != nil {
|
||||
return fmt.Errorf("error creating loadbalancer: %v", err)
|
||||
}
|
||||
log.Debugf("Created loadbalancer '%s'", cluster.ServerLoadBalancer.Node.Name)
|
||||
return err
|
||||
l.Log().Debugf("Created loadbalancer '%s'", cluster.ServerLoadBalancer.Node.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -551,18 +552,18 @@ ClusterCreatOpts:
|
||||
// ClusterDelete deletes an existing cluster
|
||||
func ClusterDelete(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, opts k3d.ClusterDeleteOpts) error {
|
||||
|
||||
log.Infof("Deleting cluster '%s'", cluster.Name)
|
||||
l.Log().Infof("Deleting cluster '%s'", cluster.Name)
|
||||
cluster, err := ClusterGet(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to get cluster: %w", err)
|
||||
}
|
||||
log.Debugf("Cluster Details: %+v", cluster)
|
||||
l.Log().Debugf("Cluster Details: %+v", cluster)
|
||||
|
||||
failed := 0
|
||||
for _, node := range cluster.Nodes {
|
||||
// registry: only delete, if not connected to other networks
|
||||
if node.Role == k3d.RegistryRole && !opts.SkipRegistryCheck {
|
||||
log.Tracef("Registry Node has %d networks: %+v", len(node.Networks), node)
|
||||
l.Log().Tracef("Registry Node has %d networks: %+v", len(node.Networks), node)
|
||||
|
||||
// check if node is connected to other networks, that are not
|
||||
// - the cluster network
|
||||
@ -576,21 +577,21 @@ func ClusterDelete(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
if net == "bridge" || net == "host" {
|
||||
continue
|
||||
}
|
||||
log.Tracef("net: %s", net)
|
||||
l.Log().Tracef("net: %s", net)
|
||||
connectedToOtherNet = true
|
||||
break
|
||||
}
|
||||
if connectedToOtherNet {
|
||||
log.Infof("Registry %s is also connected to other (non-default) networks (%+v), not deleting it...", node.Name, node.Networks)
|
||||
l.Log().Infof("Registry %s is also connected to other (non-default) networks (%+v), not deleting it...", node.Name, node.Networks)
|
||||
if err := runtime.DisconnectNodeFromNetwork(ctx, node, cluster.Network.Name); err != nil {
|
||||
log.Warnf("Failed to disconnect registry %s from cluster network %s", node.Name, cluster.Network.Name)
|
||||
l.Log().Warnf("Failed to disconnect registry %s from cluster network %s", node.Name, cluster.Network.Name)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if err := NodeDelete(ctx, runtime, node, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil {
|
||||
log.Warningf("Failed to delete node '%s': Try to delete it manually", node.Name)
|
||||
l.Log().Warningf("Failed to delete node '%s': Try to delete it manually", node.Name)
|
||||
failed++
|
||||
continue
|
||||
}
|
||||
@ -599,48 +600,48 @@ func ClusterDelete(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
// Delete the cluster network, if it was created for/by this cluster (and if it's not in use anymore)
|
||||
if cluster.Network.Name != "" {
|
||||
if !cluster.Network.External {
|
||||
log.Infof("Deleting cluster network '%s'", cluster.Network.Name)
|
||||
l.Log().Infof("Deleting cluster network '%s'", cluster.Network.Name)
|
||||
if err := runtime.DeleteNetwork(ctx, cluster.Network.Name); err != nil {
|
||||
if errors.Is(err, runtimeErr.ErrRuntimeNetworkNotEmpty) { // there are still containers connected to that network
|
||||
|
||||
connectedNodes, err := runtime.GetNodesInNetwork(ctx, cluster.Network.Name) // check, if there are any k3d nodes connected to the cluster
|
||||
if err != nil {
|
||||
log.Warningf("Failed to check cluster network for connected nodes: %+v", err)
|
||||
l.Log().Warningf("Failed to check cluster network for connected nodes: %+v", err)
|
||||
}
|
||||
|
||||
if len(connectedNodes) > 0 { // there are still k3d-managed containers (aka nodes) connected to the network
|
||||
connectedRegistryNodes := util.FilterNodesByRole(connectedNodes, k3d.RegistryRole)
|
||||
if len(connectedRegistryNodes) == len(connectedNodes) { // only registry node(s) left in the network
|
||||
for _, node := range connectedRegistryNodes {
|
||||
log.Debugf("Disconnecting registry node %s from the network...", node.Name)
|
||||
l.Log().Debugf("Disconnecting registry node %s from the network...", node.Name)
|
||||
if err := runtime.DisconnectNodeFromNetwork(ctx, node, cluster.Network.Name); err != nil {
|
||||
log.Warnf("Failed to disconnect registry %s from network %s", node.Name, cluster.Network.Name)
|
||||
l.Log().Warnf("Failed to disconnect registry %s from network %s", node.Name, cluster.Network.Name)
|
||||
} else {
|
||||
if err := runtime.DeleteNetwork(ctx, cluster.Network.Name); err != nil {
|
||||
log.Warningf("Failed to delete cluster network, even after disconnecting registry node(s): %+v", err)
|
||||
l.Log().Warningf("Failed to delete cluster network, even after disconnecting registry node(s): %+v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else { // besides the registry node(s), there are still other nodes... maybe they still need a registry
|
||||
log.Debugf("There are some non-registry nodes left in the network")
|
||||
l.Log().Debugf("There are some non-registry nodes left in the network")
|
||||
}
|
||||
} else {
|
||||
log.Warningf("Failed to delete cluster network '%s' because it's still in use: is there another cluster using it?", cluster.Network.Name)
|
||||
l.Log().Warningf("Failed to delete cluster network '%s' because it's still in use: is there another cluster using it?", cluster.Network.Name)
|
||||
}
|
||||
} else {
|
||||
log.Warningf("Failed to delete cluster network '%s': '%+v'", cluster.Network.Name, err)
|
||||
l.Log().Warningf("Failed to delete cluster network '%s': '%+v'", cluster.Network.Name, err)
|
||||
}
|
||||
}
|
||||
} else if cluster.Network.External {
|
||||
log.Debugf("Skip deletion of cluster network '%s' because it's managed externally", cluster.Network.Name)
|
||||
l.Log().Debugf("Skip deletion of cluster network '%s' because it's managed externally", cluster.Network.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// delete image volume
|
||||
if cluster.ImageVolume != "" {
|
||||
log.Infof("Deleting image volume '%s'", cluster.ImageVolume)
|
||||
l.Log().Infof("Deleting image volume '%s'", cluster.ImageVolume)
|
||||
if err := runtime.DeleteVolume(ctx, cluster.ImageVolume); err != nil {
|
||||
log.Warningf("Failed to delete image volume '%s' of cluster '%s': Try to delete it manually", cluster.ImageVolume, cluster.Name)
|
||||
l.Log().Warningf("Failed to delete image volume '%s' of cluster '%s': Try to delete it manually", cluster.ImageVolume, cluster.Name)
|
||||
}
|
||||
}
|
||||
|
||||
@ -653,26 +654,25 @@ func ClusterDelete(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
|
||||
// ClusterList returns a list of all existing clusters
|
||||
func ClusterList(ctx context.Context, runtime k3drt.Runtime) ([]*k3d.Cluster, error) {
|
||||
log.Traceln("Listing Clusters...")
|
||||
l.Log().Traceln("Listing Clusters...")
|
||||
nodes, err := runtime.GetNodesByLabel(ctx, k3d.DefaultRuntimeLabels)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to get clusters")
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("runtime failed to list nodes: %w", err)
|
||||
}
|
||||
|
||||
log.Debugf("Found %d nodes", len(nodes))
|
||||
if log.GetLevel() == log.TraceLevel {
|
||||
l.Log().Debugf("Found %d nodes", len(nodes))
|
||||
if l.Log().GetLevel() == logrus.TraceLevel {
|
||||
for _, node := range nodes {
|
||||
log.Tracef("Found node %s of role %s", node.Name, node.Role)
|
||||
l.Log().Tracef("Found node %s of role %s", node.Name, node.Role)
|
||||
}
|
||||
}
|
||||
|
||||
nodes = NodeFilterByRoles(nodes, k3d.ClusterInternalNodeRoles, k3d.ClusterExternalNodeRoles)
|
||||
|
||||
log.Tracef("Found %d cluster-internal nodes", len(nodes))
|
||||
if log.GetLevel() == log.TraceLevel {
|
||||
l.Log().Tracef("Found %d cluster-internal nodes", len(nodes))
|
||||
if l.Log().GetLevel() == logrus.TraceLevel {
|
||||
for _, node := range nodes {
|
||||
log.Tracef("Found cluster-internal node %s of role %s belonging to cluster %s", node.Name, node.Role, node.RuntimeLabels[k3d.LabelClusterName])
|
||||
l.Log().Tracef("Found cluster-internal node %s of role %s belonging to cluster %s", node.Name, node.Role, node.RuntimeLabels[k3d.LabelClusterName])
|
||||
}
|
||||
}
|
||||
|
||||
@ -699,11 +699,11 @@ func ClusterList(ctx context.Context, runtime k3drt.Runtime) ([]*k3d.Cluster, er
|
||||
// enrich cluster structs with label values
|
||||
for _, cluster := range clusters {
|
||||
if err := populateClusterFieldsFromLabels(cluster); err != nil {
|
||||
log.Warnf("Failed to populate cluster fields from node label values for cluster '%s'", cluster.Name)
|
||||
log.Warnln(err)
|
||||
l.Log().Warnf("Failed to populate cluster fields from node label values for cluster '%s'", cluster.Name)
|
||||
l.Log().Warnln(err)
|
||||
}
|
||||
}
|
||||
log.Debugf("Found %d clusters", len(clusters))
|
||||
l.Log().Debugf("Found %d clusters", len(clusters))
|
||||
return clusters, nil
|
||||
}
|
||||
|
||||
@ -756,7 +756,7 @@ func ClusterGet(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster
|
||||
// get nodes that belong to the selected cluster
|
||||
nodes, err := runtime.GetNodesByLabel(ctx, map[string]string{k3d.LabelClusterName: cluster.Name})
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get nodes for cluster '%s': %v", cluster.Name, err)
|
||||
l.Log().Errorf("Failed to get nodes for cluster '%s': %v", cluster.Name, err)
|
||||
}
|
||||
|
||||
if len(nodes) == 0 {
|
||||
@ -797,15 +797,14 @@ func ClusterGet(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster
|
||||
if cluster.ServerLoadBalancer != nil && cluster.ServerLoadBalancer.Node != nil {
|
||||
lbcfg, err := GetLoadbalancerConfig(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
log.Errorf("error getting loadbalancer config from %s: %v", cluster.ServerLoadBalancer.Node.Name, err)
|
||||
l.Log().Errorf("error getting loadbalancer config from %s: %v", cluster.ServerLoadBalancer.Node.Name, err)
|
||||
}
|
||||
cluster.ServerLoadBalancer.Config = &lbcfg
|
||||
}
|
||||
}
|
||||
|
||||
if err := populateClusterFieldsFromLabels(cluster); err != nil {
|
||||
log.Warnf("Failed to populate cluster fields from node labels")
|
||||
log.Warnln(err)
|
||||
l.Log().Warnf("Failed to populate cluster fields from node labels: %v", err)
|
||||
}
|
||||
|
||||
return cluster, nil
|
||||
@ -821,12 +820,16 @@ func GenerateNodeName(cluster string, role k3d.Role, suffix int) string {
|
||||
}
|
||||
|
||||
// ClusterStart starts a whole cluster (i.e. all nodes of the cluster)
|
||||
func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, startClusterOpts types.ClusterStartOpts) error {
|
||||
log.Infof("Starting cluster '%s'", cluster.Name)
|
||||
func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, clusterStartOpts types.ClusterStartOpts) error {
|
||||
l.Log().Infof("Starting cluster '%s'", cluster.Name)
|
||||
|
||||
if startClusterOpts.Timeout > 0*time.Second {
|
||||
if clusterStartOpts.Intent == "" {
|
||||
clusterStartOpts.Intent = k3d.IntentClusterStart
|
||||
}
|
||||
|
||||
if clusterStartOpts.Timeout > 0*time.Second {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, startClusterOpts.Timeout)
|
||||
ctx, cancel = context.WithTimeout(ctx, clusterStartOpts.Timeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
@ -849,28 +852,21 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: remove trace logs below
|
||||
log.Traceln("Servers before sort:")
|
||||
for i, n := range servers {
|
||||
log.Tracef("Server %d - %s", i, n.Name)
|
||||
}
|
||||
// sort list of servers for properly ordered sequential start
|
||||
sort.Slice(servers, func(i, j int) bool {
|
||||
return servers[i].Name < servers[j].Name
|
||||
})
|
||||
log.Traceln("Servers after sort:")
|
||||
for i, n := range servers {
|
||||
log.Tracef("Server %d - %s", i, n.Name)
|
||||
}
|
||||
|
||||
/*
|
||||
* Init Node
|
||||
*/
|
||||
if initNode != nil {
|
||||
log.Infoln("Starting the initializing server...")
|
||||
if err := NodeStart(ctx, runtime, initNode, k3d.NodeStartOpts{
|
||||
l.Log().Infoln("Starting the initializing server...")
|
||||
if err := NodeStart(ctx, runtime, initNode, &k3d.NodeStartOpts{
|
||||
Wait: true, // always wait for the init node
|
||||
NodeHooks: startClusterOpts.NodeHooks,
|
||||
ReadyLogMessage: "Running kube-apiserver", // initNode means, that we're using etcd -> this will need quorum, so "k3s is up and running" won't happen right now
|
||||
NodeHooks: clusterStartOpts.NodeHooks,
|
||||
ReadyLogMessage: types.GetReadyLogMessage(initNode, clusterStartOpts.Intent), // initNode means, that we're using etcd -> this will need quorum, so "k3s is up and running" won't happen right now
|
||||
EnvironmentInfo: clusterStartOpts.EnvironmentInfo,
|
||||
}); err != nil {
|
||||
return fmt.Errorf("Failed to start initializing server node: %+v", err)
|
||||
}
|
||||
@ -879,13 +875,13 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
|
||||
/*
|
||||
* Server Nodes
|
||||
*/
|
||||
log.Infoln("Starting servers...")
|
||||
nodeStartOpts := k3d.NodeStartOpts{
|
||||
Wait: true,
|
||||
NodeHooks: startClusterOpts.NodeHooks,
|
||||
}
|
||||
l.Log().Infoln("Starting servers...")
|
||||
for _, serverNode := range servers {
|
||||
if err := NodeStart(ctx, runtime, serverNode, nodeStartOpts); err != nil {
|
||||
if err := NodeStart(ctx, runtime, serverNode, &k3d.NodeStartOpts{
|
||||
Wait: true,
|
||||
NodeHooks: append(clusterStartOpts.NodeHooks, serverNode.HookActions...),
|
||||
EnvironmentInfo: clusterStartOpts.EnvironmentInfo,
|
||||
}); err != nil {
|
||||
return fmt.Errorf("Failed to start server %s: %+v", serverNode.Name, err)
|
||||
}
|
||||
}
|
||||
@ -896,11 +892,15 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
|
||||
|
||||
agentWG, aCtx := errgroup.WithContext(ctx)
|
||||
|
||||
log.Infoln("Starting agents...")
|
||||
l.Log().Infoln("Starting agents...")
|
||||
for _, agentNode := range agents {
|
||||
currentAgentNode := agentNode
|
||||
agentWG.Go(func() error {
|
||||
return NodeStart(aCtx, runtime, currentAgentNode, nodeStartOpts)
|
||||
return NodeStart(aCtx, runtime, currentAgentNode, &k3d.NodeStartOpts{
|
||||
Wait: true,
|
||||
NodeHooks: clusterStartOpts.NodeHooks,
|
||||
EnvironmentInfo: clusterStartOpts.EnvironmentInfo,
|
||||
})
|
||||
})
|
||||
}
|
||||
if err := agentWG.Wait(); err != nil {
|
||||
@ -912,13 +912,14 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
|
||||
*/
|
||||
|
||||
helperWG, hCtx := errgroup.WithContext(ctx)
|
||||
log.Infoln("Starting helpers...")
|
||||
l.Log().Infoln("Starting helpers...")
|
||||
for _, helperNode := range aux {
|
||||
currentHelperNode := helperNode
|
||||
|
||||
helperWG.Go(func() error {
|
||||
nodeStartOpts := k3d.NodeStartOpts{
|
||||
NodeHooks: currentHelperNode.HookActions,
|
||||
nodeStartOpts := &k3d.NodeStartOpts{
|
||||
NodeHooks: currentHelperNode.HookActions,
|
||||
EnvironmentInfo: clusterStartOpts.EnvironmentInfo,
|
||||
}
|
||||
if currentHelperNode.Role == k3d.LoadBalancerRole {
|
||||
nodeStartOpts.Wait = true
|
||||
@ -932,17 +933,96 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
|
||||
return fmt.Errorf("Failed to add one or more helper nodes: %w", err)
|
||||
}
|
||||
|
||||
/*
|
||||
* Additional Cluster Preparation (post start)
|
||||
*/
|
||||
|
||||
postStartErrgrp, postStartErrgrpCtx := errgroup.WithContext(ctx)
|
||||
|
||||
/*** DNS ***/
|
||||
|
||||
// add host.k3d.internal record to /etc/hosts in all nodes
|
||||
postStartErrgrp.Go(func() error {
|
||||
return prepInjectHostIP(postStartErrgrpCtx, runtime, cluster, &clusterStartOpts)
|
||||
})
|
||||
|
||||
postStartErrgrp.Go(func() error {
|
||||
|
||||
hosts := fmt.Sprintf("%s %s\n", clusterStartOpts.EnvironmentInfo.HostGateway.String(), k3d.DefaultK3dInternalHostRecord)
|
||||
|
||||
net, err := runtime.GetNetwork(ctx, &cluster.Network)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get cluster network %s to inject host records into CoreDNS: %w", cluster.Network.Name, err)
|
||||
}
|
||||
for _, member := range net.Members {
|
||||
hosts += fmt.Sprintf("%s %s\n", member.IP.String(), member.Name)
|
||||
}
|
||||
|
||||
l.Log().Infof("Injecting records for host.k3d.internal and for %d network members into CoreDNS configmap...", len(net.Members))
|
||||
act := actions.RewriteFileAction{
|
||||
Runtime: runtime,
|
||||
Path: "/var/lib/rancher/k3s/server/manifests/coredns.yaml",
|
||||
Mode: 0744,
|
||||
RewriteFunc: func(input []byte) ([]byte, error) {
|
||||
split, err := util.SplitYAML(input)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error splitting yaml: %w", err)
|
||||
}
|
||||
|
||||
var outputBuf bytes.Buffer
|
||||
outputEncoder := yaml.NewEncoder(&outputBuf)
|
||||
|
||||
for _, d := range split {
|
||||
var doc map[string]interface{}
|
||||
if err := yaml.Unmarshal(d, &doc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if kind, ok := doc["kind"]; ok {
|
||||
if strings.ToLower(kind.(string)) == "configmap" {
|
||||
configmapData := doc["data"].(map[interface{}]interface{})
|
||||
configmapData["NodeHosts"] = hosts
|
||||
}
|
||||
}
|
||||
if err := outputEncoder.Encode(doc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
outputEncoder.Close()
|
||||
return outputBuf.Bytes(), nil
|
||||
},
|
||||
}
|
||||
|
||||
// get the first server in the list and run action on it once it's ready for it
|
||||
for _, n := range cluster.Nodes {
|
||||
if n.Role == k3d.ServerRole {
|
||||
ts, err := time.Parse("2006-01-02T15:04:05.999999999Z", n.State.Started)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := NodeWaitForLogMessage(ctx, runtime, n, "Cluster dns configmap", ts.Truncate(time.Second)); err != nil {
|
||||
return err
|
||||
}
|
||||
return act.Run(ctx, n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err := postStartErrgrp.Wait(); err != nil {
|
||||
return fmt.Errorf("error during post-start cluster preparation: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClusterStop stops a whole cluster (i.e. all nodes of the cluster)
|
||||
func ClusterStop(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
|
||||
log.Infof("Stopping cluster '%s'", cluster.Name)
|
||||
l.Log().Infof("Stopping cluster '%s'", cluster.Name)
|
||||
|
||||
failed := 0
|
||||
for _, node := range cluster.Nodes {
|
||||
if err := runtime.StopNode(ctx, node); err != nil {
|
||||
log.Warningf("Failed to stop node '%s': Try to stop it manually", node.Name)
|
||||
l.Log().Warningf("Failed to stop node '%s': Try to stop it manually", node.Name)
|
||||
failed++
|
||||
continue
|
||||
}
|
||||
@ -951,6 +1031,8 @@ func ClusterStop(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluste
|
||||
if failed > 0 {
|
||||
return fmt.Errorf("Failed to stop %d nodes: Try to stop them manually", failed)
|
||||
}
|
||||
|
||||
l.Log().Infof("Stopped cluster '%s'", cluster.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -962,60 +1044,85 @@ func SortClusters(clusters []*k3d.Cluster) []*k3d.Cluster {
|
||||
return clusters
|
||||
}
|
||||
|
||||
// prepInjectHostIP adds /etc/hosts and CoreDNS entry for host.k3d.internal, referring to the host system
|
||||
func prepInjectHostIP(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) {
|
||||
log.Infoln("(Optional) Trying to get IP of the docker host and inject it into the cluster as 'host.k3d.internal' for easy access")
|
||||
hostIP, err := GetHostIP(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to get HostIP: %+v", err)
|
||||
}
|
||||
if hostIP != nil {
|
||||
hostRecordSuccessMessage := ""
|
||||
etcHostsFailureCount := 0
|
||||
hostsEntry := fmt.Sprintf("%s %s", hostIP, k3d.DefaultK3dInternalHostRecord)
|
||||
log.Debugf("Adding extra host entry '%s'...", hostsEntry)
|
||||
for _, node := range cluster.Nodes {
|
||||
if err := runtime.ExecInNode(ctx, node, []string{"sh", "-c", fmt.Sprintf("echo '%s' >> /etc/hosts", hostsEntry)}); err != nil {
|
||||
log.Warnf("Failed to add extra entry '%s' to /etc/hosts in node '%s'", hostsEntry, node.Name)
|
||||
etcHostsFailureCount++
|
||||
}
|
||||
}
|
||||
if etcHostsFailureCount < len(cluster.Nodes) {
|
||||
hostRecordSuccessMessage += fmt.Sprintf("Successfully added host record to /etc/hosts in %d/%d nodes", (len(cluster.Nodes) - etcHostsFailureCount), len(cluster.Nodes))
|
||||
}
|
||||
|
||||
patchCmd := `patch=$(kubectl get cm coredns -n kube-system --template='{{.data.NodeHosts}}' | sed -n -E -e '/[0-9\.]{4,12}\s+host\.k3d\.internal$/!p' -e '$a` + hostsEntry + `' | tr '\n' '^' | busybox xargs -0 printf '{"data": {"NodeHosts":"%s"}}'| sed -E 's%\^%\\n%g') && kubectl patch cm coredns -n kube-system -p="$patch"`
|
||||
successInjectCoreDNSEntry := false
|
||||
for _, node := range cluster.Nodes {
|
||||
|
||||
if node.Role == k3d.AgentRole || node.Role == k3d.ServerRole {
|
||||
logreader, err := runtime.ExecInNodeGetLogs(ctx, node, []string{"sh", "-c", patchCmd})
|
||||
if err == nil {
|
||||
successInjectCoreDNSEntry = true
|
||||
break
|
||||
} else {
|
||||
msg := fmt.Sprintf("error patching the CoreDNS ConfigMap to include entry '%s': %+v", hostsEntry, err)
|
||||
readlogs, err := ioutil.ReadAll(logreader)
|
||||
if err != nil {
|
||||
log.Debugf("error reading the logs from failed CoreDNS patch exec process in node %s: %v", node.Name, err)
|
||||
} else {
|
||||
msg += fmt.Sprintf("\nLogs: %s", string(readlogs))
|
||||
}
|
||||
log.Debugln(msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
if successInjectCoreDNSEntry == false {
|
||||
log.Warnf("Failed to patch CoreDNS ConfigMap to include entry '%s' (see debug logs)", hostsEntry)
|
||||
// corednsAddHost adds a host entry to the CoreDNS configmap if it doesn't exist (a host entry is a single line of the form "IP HOST")
|
||||
func corednsAddHost(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, ip string, name string) error {
|
||||
retries := 3
|
||||
if v, ok := os.LookupEnv(k3d.K3dEnvDebugCorednsRetries); ok && v != "" {
|
||||
l.Log().Debugf("Running with %s=%s", k3d.K3dEnvDebugCorednsRetries, v)
|
||||
if r, err := strconv.Atoi(v); err == nil {
|
||||
retries = r
|
||||
} else {
|
||||
hostRecordSuccessMessage += " and to the CoreDNS ConfigMap"
|
||||
return fmt.Errorf("Invalid value set for env var %s (%s): %w", k3d.K3dEnvDebugCorednsRetries, v, err)
|
||||
}
|
||||
|
||||
if hostRecordSuccessMessage != "" {
|
||||
log.Infoln(hostRecordSuccessMessage)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// select any server node
|
||||
var node *k3d.Node
|
||||
for _, n := range cluster.Nodes {
|
||||
if n.Role == k3d.ServerRole {
|
||||
node = n
|
||||
}
|
||||
}
|
||||
|
||||
hostsEntry := fmt.Sprintf("%s %s", ip, name)
|
||||
patchCmd := `patch=$(kubectl get cm coredns -n kube-system --template='{{.data.NodeHosts}}' | sed -n -E -e '/[0-9\.]{4,12}\s` + name + `$/!p' -e '$a` + hostsEntry + `' | tr '\n' '^' | busybox xargs -0 printf '{"data": {"NodeHosts":"%s"}}'| sed -E 's%\^%\\n%g') && kubectl patch cm coredns -n kube-system -p="$patch"`
|
||||
successInjectCoreDNSEntry := false
|
||||
|
||||
// try 3 (or K3D_DEBUG_COREDNS_RETRIES value) times, as e.g. on cluster startup it may take some time for the Configmap to be available and the server to be responsive
|
||||
for i := 0; i < retries; i++ {
|
||||
l.Log().Debugf("Running CoreDNS patch in node %s to add %s (try %d/%d)...", node.Name, hostsEntry, i, retries)
|
||||
logreader, err := runtime.ExecInNodeGetLogs(ctx, node, []string{"sh", "-c", patchCmd})
|
||||
if err == nil {
|
||||
successInjectCoreDNSEntry = true
|
||||
break
|
||||
} else {
|
||||
msg := fmt.Sprintf("(try %d/%d) error patching the CoreDNS ConfigMap to include entry '%s': %+v", i, retries, hostsEntry, err)
|
||||
if logreader != nil {
|
||||
readlogs, err := io.ReadAll(logreader)
|
||||
if err != nil {
|
||||
l.Log().Debugf("(try %d/%d) error reading the logs from failed CoreDNS patch exec process in node %s: %v", i, retries, node.Name, err)
|
||||
} else {
|
||||
msg += fmt.Sprintf("\nLogs: %s", string(readlogs))
|
||||
}
|
||||
} else {
|
||||
l.Log().Debugf("(try %d/%d) error reading the logs from failed CoreDNS patch exec process in node %s: no logreader returned for exec process", i, retries, node.Name)
|
||||
}
|
||||
l.Log().Debugln(msg)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
if !successInjectCoreDNSEntry {
|
||||
return fmt.Errorf("failed to patch CoreDNS ConfigMap to include entry '%s' (%d tries, see debug logs)", hostsEntry, retries)
|
||||
}
|
||||
l.Log().Debugf("Successfully patched CoreDNS Configmap with record '%s'", hostsEntry)
|
||||
return nil
|
||||
}
|
||||
|
||||
// prepInjectHostIP adds /etc/hosts and CoreDNS entry for host.k3d.internal, referring to the host system
|
||||
func prepInjectHostIP(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, clusterStartOpts *k3d.ClusterStartOpts) error {
|
||||
if cluster.Network.Name == "host" {
|
||||
l.Log().Tracef("Not injecting hostIP as clusternetwork is 'host'")
|
||||
return nil
|
||||
}
|
||||
|
||||
hostIP := clusterStartOpts.EnvironmentInfo.HostGateway
|
||||
hostsEntry := fmt.Sprintf("%s %s", hostIP.String(), k3d.DefaultK3dInternalHostRecord)
|
||||
l.Log().Infof("Injecting '%s' into /etc/hosts of all nodes...", hostsEntry)
|
||||
|
||||
// entry in /etc/hosts
|
||||
errgrp, errgrpctx := errgroup.WithContext(ctx)
|
||||
for _, node := range cluster.Nodes {
|
||||
n := node
|
||||
errgrp.Go(func() error {
|
||||
return runtime.ExecInNode(errgrpctx, n, []string{"sh", "-c", fmt.Sprintf("echo '%s' >> /etc/hosts", hostsEntry)})
|
||||
})
|
||||
}
|
||||
if err := errgrp.Wait(); err != nil {
|
||||
return fmt.Errorf("failed to add hosts entry %s: %w", hostsEntry, err)
|
||||
}
|
||||
l.Log().Debugf("Successfully added host record \"%s\" to /etc/hosts in all nodes", hostsEntry)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func prepCreateLocalRegistryHostingConfigMap(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
|
||||
@ -1027,12 +1134,12 @@ func prepCreateLocalRegistryHostingConfigMap(ctx context.Context, runtime k3drt.
|
||||
success = true
|
||||
break
|
||||
} else {
|
||||
log.Debugf("Failed to create LocalRegistryHosting ConfigMap in node %s: %+v", node.Name, err)
|
||||
l.Log().Debugf("Failed to create LocalRegistryHosting ConfigMap in node %s: %+v", node.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if success == false {
|
||||
log.Warnf("Failed to create LocalRegistryHosting ConfigMap")
|
||||
l.Log().Warnf("Failed to create LocalRegistryHosting ConfigMap")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -1069,7 +1176,7 @@ func ClusterEditChangesetSimple(ctx context.Context, runtime k3drt.Runtime, clus
|
||||
for _, portWithNodeFilters := range changeset.Ports {
|
||||
filteredNodes, err := util.FilterNodesWithSuffix(nodeList, portWithNodeFilters.NodeFilters)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to filter nodes: %w", err)
|
||||
}
|
||||
|
||||
for suffix := range filteredNodes {
|
||||
@ -1089,12 +1196,12 @@ func ClusterEditChangesetSimple(ctx context.Context, runtime k3drt.Runtime, clus
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("ORIGINAL:\n> Ports: %+v\n> Config: %+v\nCHANGESET:\n> Ports: %+v\n> Config: %+v", existingLB.Node.Ports, existingLB.Config, lbChangeset.Node.Ports, lbChangeset.Config)
|
||||
l.Log().Debugf("ORIGINAL:\n> Ports: %+v\n> Config: %+v\nCHANGESET:\n> Ports: %+v\n> Config: %+v", existingLB.Node.Ports, existingLB.Config, lbChangeset.Node.Ports, lbChangeset.Config)
|
||||
|
||||
// prepare to write config to lb container
|
||||
configyaml, err := yaml.Marshal(lbChangeset.Config)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to marshal loadbalancer config changeset: %w", err)
|
||||
}
|
||||
writeLbConfigAction := k3d.NodeHook{
|
||||
Stage: k3d.LifecycleStagePreStart,
|
||||
|
@ -24,7 +24,7 @@ package client
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v5/pkg/types"
|
||||
)
|
||||
|
||||
// CheckName ensures that a cluster name is also a valid host name according to RFC 1123.
|
||||
|
62
pkg/client/environment.go
Normal file
62
pkg/client/environment.go
Normal file
@ -0,0 +1,62 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
)
|
||||
|
||||
func GatherEnvironmentInfo(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) (*k3d.EnvironmentInfo, error) {
|
||||
|
||||
envInfo := &k3d.EnvironmentInfo{}
|
||||
|
||||
rtimeInfo, err := runtime.Info()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
envInfo.RuntimeInfo = *rtimeInfo
|
||||
|
||||
l.Log().Infof("Using the k3d-tools node to gather environment information")
|
||||
toolsNode, err := EnsureToolsNode(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
go NodeDelete(ctx, runtime, toolsNode, k3d.NodeDeleteOpts{SkipLBUpdate: true})
|
||||
}()
|
||||
|
||||
hostIP, err := GetHostIP(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
return envInfo, fmt.Errorf("failed to get host IP: %w", err)
|
||||
}
|
||||
|
||||
envInfo.HostGateway = hostIP
|
||||
|
||||
return envInfo, nil
|
||||
|
||||
}
|
@ -25,28 +25,28 @@ import (
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v4/pkg/types/fixes"
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v5/pkg/types/fixes"
|
||||
)
|
||||
|
||||
// FIXME: FixCgroupV2 - to be removed when fixed upstream
|
||||
func EnableCgroupV2FixIfNeeded(runtime runtimes.Runtime) {
|
||||
if _, isSet := os.LookupEnv(fixes.EnvFixCgroupV2); !isSet {
|
||||
if _, isSet := os.LookupEnv(string(fixes.EnvFixCgroupV2)); !isSet {
|
||||
runtimeInfo, err := runtime.Info()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to get runtime information: %+v", err)
|
||||
l.Log().Warnf("Failed to get runtime information: %+v", err)
|
||||
return
|
||||
}
|
||||
cgroupVersion, err := strconv.Atoi(runtimeInfo.CgroupVersion)
|
||||
if err != nil {
|
||||
log.Debugf("Failed to parse cgroupVersion: %+v", err)
|
||||
l.Log().Debugf("Failed to parse cgroupVersion: %+v", err)
|
||||
return
|
||||
}
|
||||
if cgroupVersion == 2 {
|
||||
log.Debugf("Detected CgroupV2, enabling custom entrypoint (disable by setting %s=false)", fixes.EnvFixCgroupV2)
|
||||
if err := os.Setenv(fixes.EnvFixCgroupV2, "true"); err != nil {
|
||||
log.Errorf("Detected CgroupsV2 but failed to enable k3d's hotfix (try `export %s=true`): %+v", fixes.EnvFixCgroupV2, err)
|
||||
l.Log().Debugf("Detected CgroupV2, enabling custom entrypoint (disable by setting %s=false)", fixes.EnvFixCgroupV2)
|
||||
if err := os.Setenv(string(fixes.EnvFixCgroupV2), "true"); err != nil {
|
||||
l.Log().Errorf("Detected CgroupsV2 but failed to enable k3d's hotfix (try `export %s=true`): %+v", fixes.EnvFixCgroupV2, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -27,45 +27,74 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"regexp"
|
||||
"runtime"
|
||||
goruntime "runtime"
|
||||
"strings"
|
||||
|
||||
rt "github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v4/pkg/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/rancher/k3d/v5/pkg/util"
|
||||
)
|
||||
|
||||
var nsLookupAddressRegexp = regexp.MustCompile(`^Address:\s+(?P<ip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})$`)
|
||||
type ResolveHostCmd struct {
|
||||
Cmd string
|
||||
LogMatcher *regexp.Regexp
|
||||
}
|
||||
|
||||
var (
|
||||
ResolveHostCmdNSLookup = ResolveHostCmd{
|
||||
Cmd: "nslookup %s",
|
||||
LogMatcher: regexp.MustCompile(`^Address:\s+(?P<ip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})$`),
|
||||
}
|
||||
|
||||
ResolveHostCmdGetEnt = ResolveHostCmd{
|
||||
Cmd: "getent ahostsv4 '%s'",
|
||||
LogMatcher: regexp.MustCompile(`(?P<ip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s+STREAM.+`), // e.g. `192.168.47.4 STREAM host.docker.internal`,
|
||||
}
|
||||
)
|
||||
|
||||
// GetHostIP returns the routable IP address to be able to access services running on the host system from inside the cluster.
|
||||
// This depends on the Operating System and the chosen Runtime.
|
||||
func GetHostIP(ctx context.Context, rtime rt.Runtime, cluster *k3d.Cluster) (net.IP, error) {
|
||||
func GetHostIP(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) (net.IP, error) {
|
||||
|
||||
rtimeInfo, err := runtime.Info()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l.Log().Tracef("GOOS: %s / Runtime OS: %s (%s)", goruntime.GOOS, rtimeInfo.OSType, rtimeInfo.OS)
|
||||
|
||||
isDockerDesktop := func(os string) bool {
|
||||
return strings.ToLower(os) == "docker desktop"
|
||||
}
|
||||
|
||||
// Docker Runtime
|
||||
if rtime == rt.Docker {
|
||||
|
||||
log.Tracef("Runtime GOOS: %s", runtime.GOOS)
|
||||
|
||||
// "native" Docker on Linux
|
||||
if runtime.GOOS == "linux" {
|
||||
ip, err := rtime.GetHostIP(ctx, cluster.Network.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ip, nil
|
||||
}
|
||||
if runtime == runtimes.Docker {
|
||||
|
||||
// Docker (for Desktop) on MacOS or Windows
|
||||
if runtime.GOOS == "windows" || runtime.GOOS == "darwin" {
|
||||
ip, err := resolveHostnameFromInside(ctx, rtime, cluster.Nodes[0], "host.docker.internal")
|
||||
if isDockerDesktop(rtimeInfo.OS) {
|
||||
|
||||
toolsNode, err := EnsureToolsNode(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to ensure that k3d-tools node is running to get host IP :%w", err)
|
||||
}
|
||||
return ip, nil
|
||||
|
||||
ip, err := resolveHostnameFromInside(ctx, runtime, toolsNode, "host.docker.internal", ResolveHostCmdGetEnt)
|
||||
if err == nil {
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
l.Log().Warnf("failed to resolve 'host.docker.internal' from inside the k3d-tools node: %v", err)
|
||||
|
||||
}
|
||||
|
||||
// Catch all other GOOS cases
|
||||
return nil, fmt.Errorf("GetHostIP only implemented for Linux, MacOS (Darwin) and Windows")
|
||||
l.Log().Infof("HostIP: using network gateway...")
|
||||
ip, err := runtime.GetHostIP(ctx, cluster.Network.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("runtime failed to get host IP: %w", err)
|
||||
}
|
||||
|
||||
return ip, nil
|
||||
|
||||
}
|
||||
|
||||
@ -74,9 +103,9 @@ func GetHostIP(ctx context.Context, rtime rt.Runtime, cluster *k3d.Cluster) (net
|
||||
|
||||
}
|
||||
|
||||
func resolveHostnameFromInside(ctx context.Context, rtime rt.Runtime, node *k3d.Node, hostname string) (net.IP, error) {
|
||||
func resolveHostnameFromInside(ctx context.Context, rtime runtimes.Runtime, node *k3d.Node, hostname string, cmd ResolveHostCmd) (net.IP, error) {
|
||||
|
||||
logreader, execErr := rtime.ExecInNodeGetLogs(ctx, node, []string{"sh", "-c", fmt.Sprintf("nslookup %s", hostname)})
|
||||
logreader, execErr := rtime.ExecInNodeGetLogs(ctx, node, []string{"sh", "-c", fmt.Sprintf(cmd.Cmd, hostname)})
|
||||
|
||||
if logreader == nil {
|
||||
if execErr != nil {
|
||||
@ -94,28 +123,28 @@ func resolveHostnameFromInside(ctx context.Context, rtime rt.Runtime, node *k3d.
|
||||
return nil, fmt.Errorf("Failed to scan logs for host IP: Could not create scanner from logreader")
|
||||
}
|
||||
if scanner != nil && execErr != nil {
|
||||
log.Debugln("Exec Process Failed, but we still got logs, so we're at least trying to get the IP from there...")
|
||||
log.Tracef("-> Exec Process Error was: %+v", execErr)
|
||||
l.Log().Debugln("Exec Process Failed, but we still got logs, so we're at least trying to get the IP from there...")
|
||||
l.Log().Tracef("-> Exec Process Error was: %+v", execErr)
|
||||
}
|
||||
for scanner.Scan() {
|
||||
log.Tracef("Scanning Log Line '%s'", scanner.Text())
|
||||
match := nsLookupAddressRegexp.FindStringSubmatch(scanner.Text())
|
||||
l.Log().Tracef("Scanning Log Line '%s'", scanner.Text())
|
||||
match := cmd.LogMatcher.FindStringSubmatch(scanner.Text())
|
||||
if len(match) == 0 {
|
||||
continue
|
||||
}
|
||||
log.Tracef("-> Match(es): '%+v'", match)
|
||||
submatches = util.MapSubexpNames(nsLookupAddressRegexp.SubexpNames(), match)
|
||||
log.Tracef(" -> Submatch(es): %+v", submatches)
|
||||
l.Log().Tracef("-> Match(es): '%+v'", match)
|
||||
submatches = util.MapSubexpNames(cmd.LogMatcher.SubexpNames(), match)
|
||||
l.Log().Tracef(" -> Submatch(es): %+v", submatches)
|
||||
break
|
||||
}
|
||||
if _, ok := submatches["ip"]; !ok {
|
||||
if execErr != nil {
|
||||
log.Errorln(execErr)
|
||||
l.Log().Errorln(execErr)
|
||||
}
|
||||
return nil, fmt.Errorf("Failed to read address for '%s' from nslookup response", hostname)
|
||||
return nil, fmt.Errorf("Failed to read address for '%s' from command output", hostname)
|
||||
}
|
||||
|
||||
log.Debugf("Hostname '%s' -> Address '%s'", hostname, submatches["ip"])
|
||||
l.Log().Debugf("Hostname '%s' -> Address '%s'", hostname, submatches["ip"])
|
||||
|
||||
return net.ParseIP(submatches["ip"]), nil
|
||||
|
||||
|
@ -23,18 +23,20 @@ package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
k3drt "github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
k3drt "github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"inet.af/netaddr"
|
||||
)
|
||||
|
||||
// GetIP checks a given network for a free IP and returns it, if possible
|
||||
func GetIP(ctx context.Context, runtime k3drt.Runtime, network *k3d.ClusterNetwork) (netaddr.IP, error) {
|
||||
|
||||
network, err := runtime.GetNetwork(ctx, network)
|
||||
if err != nil {
|
||||
return netaddr.IP{}, err
|
||||
return netaddr.IP{}, fmt.Errorf("runtime failed to get network '%s': %w", network.Name, err)
|
||||
}
|
||||
|
||||
var ipsetbuilder netaddr.IPSetBuilder
|
||||
@ -46,14 +48,17 @@ func GetIP(ctx context.Context, runtime k3drt.Runtime, network *k3d.ClusterNetwo
|
||||
}
|
||||
|
||||
// exclude first and last address
|
||||
ipsetbuilder.Remove(network.IPAM.IPPrefix.Range().From)
|
||||
ipsetbuilder.Remove(network.IPAM.IPPrefix.Range().To)
|
||||
ipsetbuilder.Remove(network.IPAM.IPPrefix.Range().From())
|
||||
ipsetbuilder.Remove(network.IPAM.IPPrefix.Range().To())
|
||||
|
||||
ipset := ipsetbuilder.IPSet()
|
||||
ipset, err := ipsetbuilder.IPSet()
|
||||
if err != nil {
|
||||
return netaddr.IP{}, err
|
||||
}
|
||||
|
||||
ip := ipset.Ranges()[0].From
|
||||
ip := ipset.Ranges()[0].From()
|
||||
|
||||
log.Debugf("Found free IP %s in network %s", ip.String(), network.Name)
|
||||
l.Log().Debugf("Found free IP %s in network %s", ip.String(), network.Name)
|
||||
|
||||
return ip, nil
|
||||
}
|
||||
|
@ -25,14 +25,14 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
@ -53,14 +53,14 @@ func KubeconfigGetWrite(ctx context.Context, runtime runtimes.Runtime, cluster *
|
||||
// get kubeconfig from cluster node
|
||||
kubeconfig, err := KubeconfigGet(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
return output, err
|
||||
return output, fmt.Errorf("failed to get kubeconfig for cluster '%s': %w", cluster.Name, err)
|
||||
}
|
||||
|
||||
// empty output parameter = write to default
|
||||
if output == "" {
|
||||
output, err = KubeconfigGetDefaultPath()
|
||||
if err != nil {
|
||||
return output, err
|
||||
return output, fmt.Errorf("failed to get default kubeconfig path: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -78,19 +78,17 @@ func KubeconfigGetWrite(ctx context.Context, runtime runtimes.Runtime, cluster *
|
||||
|
||||
// the output file does not exist: try to create it and try again
|
||||
if os.IsNotExist(err) && firstRun {
|
||||
log.Debugf("Output path '%s' doesn't exist, trying to create it...", output)
|
||||
l.Log().Debugf("Output path '%s' doesn't exist, trying to create it...", output)
|
||||
|
||||
// create directory path
|
||||
if err := os.MkdirAll(filepath.Dir(output), 0755); err != nil {
|
||||
log.Errorf("Failed to create output directory '%s'", filepath.Dir(output))
|
||||
return output, err
|
||||
return output, fmt.Errorf("failed to create output directory '%s': %w", filepath.Dir(output), err)
|
||||
}
|
||||
|
||||
// try create output file
|
||||
f, err := os.Create(output)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create output file '%s'", output)
|
||||
return output, err
|
||||
return output, fmt.Errorf("failed to create output file '%s': %w", output, err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
@ -98,8 +96,7 @@ func KubeconfigGetWrite(ctx context.Context, runtime runtimes.Runtime, cluster *
|
||||
firstRun = false
|
||||
continue
|
||||
}
|
||||
log.Errorf("Failed to open output file '%s' or it's not a KubeConfig", output)
|
||||
return output, err
|
||||
return output, fmt.Errorf("failed to open output file '%s' or it's not a kubeconfig: %w", output, err)
|
||||
}
|
||||
break
|
||||
}
|
||||
@ -117,11 +114,10 @@ func KubeconfigGet(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.C
|
||||
// TODO: getKubeconfig: we should make sure, that the server node we're trying to fetch from is actually running
|
||||
serverNodes, err := runtime.GetNodesByLabel(ctx, map[string]string{k3d.LabelClusterName: cluster.Name, k3d.LabelRole: string(k3d.ServerRole)})
|
||||
if err != nil {
|
||||
log.Errorln("Failed to get server nodes")
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("runtime failed to get server nodes for cluster '%s': %w", cluster.Name, err)
|
||||
}
|
||||
if len(serverNodes) == 0 {
|
||||
return nil, fmt.Errorf("Didn't find any server node")
|
||||
return nil, fmt.Errorf("didn't find any server node for cluster '%s'", cluster.Name)
|
||||
}
|
||||
|
||||
// prefer a server node, which actually has the port exposed
|
||||
@ -147,15 +143,13 @@ func KubeconfigGet(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.C
|
||||
// get the kubeconfig from the first server node
|
||||
reader, err := runtime.GetKubeconfig(ctx, chosenServer)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get kubeconfig from node '%s'", chosenServer.Name)
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("runtime failed to pull kubeconfig from node '%s': %w", chosenServer.Name, err)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
readBytes, err := ioutil.ReadAll(reader)
|
||||
readBytes, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
log.Errorln("Couldn't read kubeconfig file")
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to read kubeconfig file: %w", err)
|
||||
}
|
||||
|
||||
// drop the first 512 bytes which contain file metadata/control characters
|
||||
@ -167,8 +161,7 @@ func KubeconfigGet(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.C
|
||||
*/
|
||||
kc, err := clientcmd.Load(trimBytes)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to parse the KubeConfig")
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to parse kubeconfig: %w", err)
|
||||
}
|
||||
|
||||
// update the server URL
|
||||
@ -196,7 +189,7 @@ func KubeconfigGet(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.C
|
||||
// set current-context to new context name
|
||||
kc.CurrentContext = newContextName
|
||||
|
||||
log.Tracef("Modified Kubeconfig: %+v", kc)
|
||||
l.Log().Tracef("Modified Kubeconfig: %+v", kc)
|
||||
|
||||
return kc, nil
|
||||
}
|
||||
@ -212,25 +205,22 @@ func KubeconfigWriteToPath(ctx context.Context, kubeconfig *clientcmdapi.Config,
|
||||
} else {
|
||||
output, err = os.Create(path)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create file '%s'", path)
|
||||
return err
|
||||
return fmt.Errorf("failed to create file '%s': %w", path, err)
|
||||
}
|
||||
defer output.Close()
|
||||
}
|
||||
|
||||
kubeconfigBytes, err := clientcmd.Write(*kubeconfig)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to write KubeConfig")
|
||||
return err
|
||||
return fmt.Errorf("failed to write kubeconfig: %w", err)
|
||||
}
|
||||
|
||||
_, err = output.Write(kubeconfigBytes)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to write to file '%s'", output.Name())
|
||||
return err
|
||||
return fmt.Errorf("failed to write file '%s': %w", output.Name(), err)
|
||||
}
|
||||
|
||||
log.Debugf("Wrote kubeconfig to '%s'", output.Name())
|
||||
l.Log().Debugf("Wrote kubeconfig to '%s'", output.Name())
|
||||
|
||||
return nil
|
||||
|
||||
@ -239,7 +229,7 @@ func KubeconfigWriteToPath(ctx context.Context, kubeconfig *clientcmdapi.Config,
|
||||
// KubeconfigMerge merges a new kubeconfig into an existing kubeconfig and returns the result
|
||||
func KubeconfigMerge(ctx context.Context, newKubeConfig *clientcmdapi.Config, existingKubeConfig *clientcmdapi.Config, outPath string, overwriteConflicting bool, updateCurrentContext bool) error {
|
||||
|
||||
log.Tracef("Merging new Kubeconfig:\n%+v\n>>> into existing Kubeconfig:\n%+v", newKubeConfig, existingKubeConfig)
|
||||
l.Log().Tracef("Merging new Kubeconfig:\n%+v\n>>> into existing Kubeconfig:\n%+v", newKubeConfig, existingKubeConfig)
|
||||
|
||||
// Overwrite values in existing kubeconfig
|
||||
for k, v := range newKubeConfig.Clusters {
|
||||
@ -274,7 +264,7 @@ func KubeconfigMerge(ctx context.Context, newKubeConfig *clientcmdapi.Config, ex
|
||||
updateCurrentContext = true
|
||||
}
|
||||
if updateCurrentContext {
|
||||
log.Debugf("Setting new current-context '%s'", newKubeConfig.CurrentContext)
|
||||
l.Log().Debugf("Setting new current-context '%s'", newKubeConfig.CurrentContext)
|
||||
existingKubeConfig.CurrentContext = newKubeConfig.CurrentContext
|
||||
}
|
||||
|
||||
@ -285,17 +275,15 @@ func KubeconfigMerge(ctx context.Context, newKubeConfig *clientcmdapi.Config, ex
|
||||
func KubeconfigWrite(ctx context.Context, kubeconfig *clientcmdapi.Config, path string) error {
|
||||
tempPath := fmt.Sprintf("%s.k3d_%s", path, time.Now().Format("20060102_150405.000000"))
|
||||
if err := clientcmd.WriteToFile(*kubeconfig, tempPath); err != nil {
|
||||
log.Errorf("Failed to write merged kubeconfig to temporary file '%s'", tempPath)
|
||||
return err
|
||||
return fmt.Errorf("failed to write merged kubeconfig to temporary file '%s': %w", tempPath, err)
|
||||
}
|
||||
|
||||
// Move temporary file over existing KubeConfig
|
||||
if err := os.Rename(tempPath, path); err != nil {
|
||||
log.Errorf("Failed to overwrite existing KubeConfig '%s' with new KubeConfig '%s'", path, tempPath)
|
||||
return err
|
||||
return fmt.Errorf("failed to overwrite existing KubeConfig '%s' with new kubeconfig '%s': %w", path, tempPath, err)
|
||||
}
|
||||
|
||||
log.Debugf("Wrote kubeconfig to '%s'", path)
|
||||
l.Log().Debugf("Wrote kubeconfig to '%s'", path)
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -304,9 +292,9 @@ func KubeconfigWrite(ctx context.Context, kubeconfig *clientcmdapi.Config, path
|
||||
func KubeconfigGetDefaultFile() (*clientcmdapi.Config, error) {
|
||||
path, err := KubeconfigGetDefaultPath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to get default kubeconfig path: %w", err)
|
||||
}
|
||||
log.Debugf("Using default kubeconfig '%s'", path)
|
||||
l.Log().Debugf("Using default kubeconfig '%s'", path)
|
||||
return clientcmd.LoadFromFile(path)
|
||||
}
|
||||
|
||||
@ -314,7 +302,7 @@ func KubeconfigGetDefaultFile() (*clientcmdapi.Config, error) {
|
||||
func KubeconfigGetDefaultPath() (string, error) {
|
||||
defaultKubeConfigLoadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
if len(defaultKubeConfigLoadingRules.GetLoadingPrecedence()) > 1 {
|
||||
return "", fmt.Errorf("Multiple kubeconfigs specified via KUBECONFIG env var: Please reduce to one entry, unset KUBECONFIG or explicitly choose an output")
|
||||
return "", fmt.Errorf("multiple kubeconfigs specified via KUBECONFIG env var: Please reduce to one entry, unset KUBECONFIG or explicitly choose an output")
|
||||
}
|
||||
return defaultKubeConfigLoadingRules.GetDefaultFilename(), nil
|
||||
}
|
||||
@ -323,11 +311,11 @@ func KubeconfigGetDefaultPath() (string, error) {
|
||||
func KubeconfigRemoveClusterFromDefaultConfig(ctx context.Context, cluster *k3d.Cluster) error {
|
||||
defaultKubeConfigPath, err := KubeconfigGetDefaultPath()
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to get default kubeconfig path: %w", err)
|
||||
}
|
||||
kubeconfig, err := KubeconfigGetDefaultFile()
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to get default kubeconfig file: %w", err)
|
||||
}
|
||||
kubeconfig = KubeconfigRemoveCluster(ctx, cluster, kubeconfig)
|
||||
return KubeconfigWrite(ctx, kubeconfig, defaultKubeConfigPath)
|
||||
|
@ -26,15 +26,18 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/go-test/deep"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v4/pkg/types"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/imdario/mergo"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v5/pkg/types"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/viper"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
@ -51,8 +54,7 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu
|
||||
// update cluster details to ensure that we have the latest node list
|
||||
cluster, err = ClusterGet(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to update details for cluster '%s'", cluster.Name)
|
||||
return err
|
||||
return fmt.Errorf("failed to update details for cluster '%s': %w", cluster.Name, err)
|
||||
}
|
||||
|
||||
currentConfig, err := GetLoadbalancerConfig(ctx, runtime, cluster)
|
||||
@ -60,23 +62,23 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu
|
||||
return fmt.Errorf("error getting current config from loadbalancer: %w", err)
|
||||
}
|
||||
|
||||
log.Tracef("Current loadbalancer config:\n%+v", currentConfig)
|
||||
l.Log().Tracef("Current loadbalancer config:\n%+v", currentConfig)
|
||||
|
||||
newLBConfig, err := LoadbalancerGenerateConfig(cluster)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error generating new loadbalancer config: %w", err)
|
||||
}
|
||||
log.Tracef("New loadbalancer config:\n%+v", currentConfig)
|
||||
l.Log().Tracef("New loadbalancer config:\n%+v", currentConfig)
|
||||
|
||||
if diff := deep.Equal(currentConfig, newLBConfig); diff != nil {
|
||||
log.Debugf("Updating the loadbalancer with this diff: %+v", diff)
|
||||
l.Log().Debugf("Updating the loadbalancer with this diff: %+v", diff)
|
||||
}
|
||||
|
||||
newLbConfigYaml, err := yaml.Marshal(&newLBConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error marshalling the new loadbalancer config: %w", err)
|
||||
}
|
||||
log.Debugf("Writing lb config:\n%s", string(newLbConfigYaml))
|
||||
l.Log().Debugf("Writing lb config:\n%s", string(newLbConfigYaml))
|
||||
startTime := time.Now().Truncate(time.Second).UTC()
|
||||
if err := runtime.WriteToNode(ctx, newLbConfigYaml, k3d.DefaultLoadbalancerConfigPath, 0744, cluster.ServerLoadBalancer.Node); err != nil {
|
||||
return fmt.Errorf("error writing new loadbalancer config to container: %w", err)
|
||||
@ -84,25 +86,25 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu
|
||||
|
||||
successCtx, successCtxCancel := context.WithDeadline(ctx, time.Now().Add(5*time.Second))
|
||||
defer successCtxCancel()
|
||||
err = NodeWaitForLogMessage(successCtx, runtime, cluster.ServerLoadBalancer.Node, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], startTime)
|
||||
err = NodeWaitForLogMessage(successCtx, runtime, cluster.ServerLoadBalancer.Node, k3d.GetReadyLogMessage(cluster.ServerLoadBalancer.Node, k3d.IntentAny), startTime)
|
||||
if err != nil {
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
failureCtx, failureCtxCancel := context.WithDeadline(ctx, time.Now().Add(5*time.Second))
|
||||
defer failureCtxCancel()
|
||||
err = NodeWaitForLogMessage(failureCtx, runtime, cluster.ServerLoadBalancer.Node, "host not found in upstream", startTime)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to check if the loadbalancer was configured correctly or if it broke. Please check it manually or try again: %v", err)
|
||||
l.Log().Warnf("Failed to check if the loadbalancer was configured correctly or if it broke. Please check it manually or try again: %v", err)
|
||||
return ErrLBConfigFailedTest
|
||||
} else {
|
||||
log.Warnln("Failed to configure loadbalancer because one of the nodes seems to be down! Run `k3d node list` to see which one it could be.")
|
||||
l.Log().Warnln("Failed to configure loadbalancer because one of the nodes seems to be down! Run `k3d node list` to see which one it could be.")
|
||||
return ErrLBConfigHostNotFound
|
||||
}
|
||||
} else {
|
||||
log.Warnf("Failed to ensure that loadbalancer was configured correctly. Please check it manually or try again: %v", err)
|
||||
l.Log().Warnf("Failed to ensure that loadbalancer was configured correctly. Please check it manually or try again: %v", err)
|
||||
return ErrLBConfigFailedTest
|
||||
}
|
||||
}
|
||||
log.Infof("Successfully configured loadbalancer %s!", cluster.ServerLoadBalancer.Node.Name)
|
||||
l.Log().Infof("Successfully configured loadbalancer %s!", cluster.ServerLoadBalancer.Node.Name)
|
||||
|
||||
time.Sleep(1 * time.Second) // waiting for a second, to avoid issues with too fast lb updates which would screw up the log waits
|
||||
|
||||
@ -120,7 +122,7 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste
|
||||
var err error
|
||||
cluster.ServerLoadBalancer.Node, err = NodeGet(ctx, runtime, node)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
return cfg, fmt.Errorf("failed to get loadbalancer node '%s': %w", node.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -128,13 +130,13 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste
|
||||
|
||||
reader, err := runtime.ReadFromNode(ctx, types.DefaultLoadbalancerConfigPath, cluster.ServerLoadBalancer.Node)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
return cfg, fmt.Errorf("runtime failed to read loadbalancer config '%s' from node '%s': %w", types.DefaultLoadbalancerConfigPath, cluster.ServerLoadBalancer.Node.Name, err)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
file, err := ioutil.ReadAll(reader)
|
||||
file, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
return cfg, fmt.Errorf("failed to read loadbalancer config file: %w", err)
|
||||
}
|
||||
|
||||
file = bytes.Trim(file[512:], "\x00") // trim control characters, etc.
|
||||
@ -170,7 +172,7 @@ func LoadbalancerGenerateConfig(cluster *k3d.Cluster) (k3d.LoadbalancerConfig, e
|
||||
}
|
||||
|
||||
// some additional nginx settings
|
||||
lbConfig.Settings.WorkerProcesses = k3d.DefaultLoadbalancerWorkerProcesses + len(cluster.ServerLoadBalancer.Node.Ports)*len(servers)
|
||||
lbConfig.Settings.WorkerConnections = k3d.DefaultLoadbalancerWorkerConnections + len(cluster.ServerLoadBalancer.Node.Ports)*len(servers)
|
||||
|
||||
return lbConfig, nil
|
||||
}
|
||||
@ -193,6 +195,22 @@ func LoadbalancerPrepare(ctx context.Context, runtime runtimes.Runtime, cluster
|
||||
}
|
||||
}
|
||||
|
||||
if opts != nil && opts.ConfigOverrides != nil && len(opts.ConfigOverrides) > 0 {
|
||||
tmpViper := viper.New()
|
||||
for _, override := range opts.ConfigOverrides {
|
||||
kv := strings.SplitN(override, "=", 2)
|
||||
l.Log().Tracef("Overriding LB config with %s...", kv)
|
||||
tmpViper.Set(kv[0], kv[1])
|
||||
}
|
||||
lbConfigOverride := &k3d.LoadbalancerConfig{}
|
||||
if err := tmpViper.Unmarshal(lbConfigOverride); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal loadbalancer config override into loadbalancer config: %w", err)
|
||||
}
|
||||
if err := mergo.MergeWithOverwrite(cluster.ServerLoadBalancer.Config, lbConfigOverride); err != nil {
|
||||
return nil, fmt.Errorf("failed to override loadbalancer config: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create LB as a modified node with loadbalancerRole
|
||||
lbNode := &k3d.Node{
|
||||
Name: fmt.Sprintf("%s-%s-serverlb", k3d.DefaultObjectNamePrefix, cluster.Name),
|
||||
@ -213,7 +231,7 @@ func loadbalancerAddPortConfigs(loadbalancer *k3d.Loadbalancer, portmapping nat.
|
||||
nodenames := []string{}
|
||||
for _, node := range targetNodes {
|
||||
if node.Role == k3d.LoadBalancerRole {
|
||||
return fmt.Errorf("error adding port config to loadbalancer: cannot add port config referencing the loadbalancer itself (loop)")
|
||||
return fmt.Errorf("cannot add port config referencing the loadbalancer itself (loop)")
|
||||
}
|
||||
nodenames = append(nodenames, node.Name)
|
||||
}
|
||||
|
@ -23,13 +23,15 @@ THE SOFTWARE.
|
||||
package client
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -39,14 +41,17 @@ import (
|
||||
"github.com/docker/go-connections/nat"
|
||||
dockerunits "github.com/docker/go-units"
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/rancher/k3d/v4/pkg/actions"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes/docker"
|
||||
runtimeErrors "github.com/rancher/k3d/v4/pkg/runtimes/errors"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v4/pkg/types/fixes"
|
||||
"github.com/rancher/k3d/v4/pkg/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/rancher/k3d/v5/pkg/actions"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes/docker"
|
||||
runtimeTypes "github.com/rancher/k3d/v5/pkg/runtimes/types"
|
||||
|
||||
runtimeErrors "github.com/rancher/k3d/v5/pkg/runtimes/errors"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/rancher/k3d/v5/pkg/types/fixes"
|
||||
"github.com/rancher/k3d/v5/pkg/types/k3s"
|
||||
"github.com/rancher/k3d/v5/pkg/util"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
@ -55,12 +60,15 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
||||
targetClusterName := cluster.Name
|
||||
cluster, err := ClusterGet(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to find specified cluster '%s'", targetClusterName)
|
||||
return err
|
||||
return fmt.Errorf("Failed to find specified cluster '%s': %w", targetClusterName, err)
|
||||
}
|
||||
|
||||
// network
|
||||
node.Networks = []string{cluster.Network.Name}
|
||||
// networks: ensure that cluster network is on index 0
|
||||
networks := []string{cluster.Network.Name}
|
||||
if node.Networks != nil {
|
||||
networks = append(networks, node.Networks...)
|
||||
}
|
||||
node.Networks = networks
|
||||
|
||||
// skeleton
|
||||
if node.RuntimeLabels == nil {
|
||||
@ -80,7 +88,7 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
||||
}
|
||||
// if we didn't find a node with the same role in the cluster, just choose any other node
|
||||
if srcNode == nil {
|
||||
log.Debugf("Didn't find node with role '%s' in cluster '%s'. Choosing any other node (and using defaults)...", node.Role, cluster.Name)
|
||||
l.Log().Debugf("Didn't find node with role '%s' in cluster '%s'. Choosing any other node (and using defaults)...", node.Role, cluster.Name)
|
||||
node.Cmd = k3d.DefaultRoleCmds[node.Role]
|
||||
for _, existingNode := range cluster.Nodes {
|
||||
if existingNode.Role != k3d.LoadBalancerRole { // any role except for the LoadBalancer role
|
||||
@ -105,7 +113,7 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
||||
for _, forbiddenMount := range util.DoNotCopyVolumeSuffices {
|
||||
for i, mount := range node.Volumes {
|
||||
if strings.Contains(mount, forbiddenMount) {
|
||||
log.Tracef("Dropping copied volume mount %s to avoid issues...", mount)
|
||||
l.Log().Tracef("Dropping copied volume mount %s to avoid issues...", mount)
|
||||
node.Volumes = util.RemoveElementFromStringSlice(node.Volumes, i)
|
||||
}
|
||||
}
|
||||
@ -120,37 +128,37 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
||||
for i, cmd := range srcNode.Cmd {
|
||||
// cut out the '--cluster-init' flag as this should only be done by the initializing server node
|
||||
if cmd == forbiddenCmd {
|
||||
log.Tracef("Dropping '%s' from source node's cmd", forbiddenCmd)
|
||||
l.Log().Tracef("Dropping '%s' from source node's cmd", forbiddenCmd)
|
||||
srcNode.Cmd = append(srcNode.Cmd[:i], srcNode.Cmd[i+1:]...)
|
||||
}
|
||||
}
|
||||
for i, arg := range node.Args {
|
||||
// cut out the '--cluster-init' flag as this should only be done by the initializing server node
|
||||
if arg == forbiddenCmd {
|
||||
log.Tracef("Dropping '%s' from source node's args", forbiddenCmd)
|
||||
l.Log().Tracef("Dropping '%s' from source node's args", forbiddenCmd)
|
||||
srcNode.Args = append(srcNode.Args[:i], srcNode.Args[i+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Adding node %s to cluster %s based on existing (sanitized) node %s", node.Name, cluster.Name, srcNode.Name)
|
||||
log.Tracef("Sanitized Source Node: %+v\nNew Node: %+v", srcNode, node)
|
||||
l.Log().Debugf("Adding node %s to cluster %s based on existing (sanitized) node %s", node.Name, cluster.Name, srcNode.Name)
|
||||
l.Log().Tracef("Sanitized Source Node: %+v\nNew Node: %+v", srcNode, node)
|
||||
|
||||
// fetch registry config
|
||||
registryConfigBytes := []byte{}
|
||||
registryConfigReader, err := runtime.ReadFromNode(ctx, k3d.DefaultRegistriesFilePath, srcNode)
|
||||
if err != nil {
|
||||
if !errors.Is(err, runtimeErrors.ErrRuntimeFileNotFound) {
|
||||
log.Warnf("Failed to read registry config from node %s: %+v", node.Name, err)
|
||||
l.Log().Warnf("Failed to read registry config from node %s: %+v", node.Name, err)
|
||||
}
|
||||
} else {
|
||||
defer registryConfigReader.Close()
|
||||
|
||||
var err error
|
||||
registryConfigBytes, err = ioutil.ReadAll(registryConfigReader)
|
||||
registryConfigBytes, err = io.ReadAll(registryConfigReader)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to read registry config from node %s: %+v", node.Name, err)
|
||||
l.Log().Warnf("Failed to read registry config from node %s: %+v", node.Name, err)
|
||||
}
|
||||
registryConfigReader.Close()
|
||||
registryConfigBytes = bytes.Trim(registryConfigBytes[512:], "\x00") // trim control characters, etc.
|
||||
@ -158,28 +166,35 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
||||
|
||||
// merge node config of new node into existing node config
|
||||
if err := mergo.MergeWithOverwrite(srcNode, *node); err != nil {
|
||||
log.Errorln("Failed to merge new node config into existing node config")
|
||||
return err
|
||||
return fmt.Errorf("failed to merge new node config into existing node config: %w", err)
|
||||
}
|
||||
|
||||
node = srcNode
|
||||
|
||||
log.Debugf("Resulting node %+v", node)
|
||||
l.Log().Tracef("Resulting node %+v", node)
|
||||
|
||||
k3sURLFound := false
|
||||
for _, envVar := range node.Env {
|
||||
if strings.HasPrefix(envVar, "K3S_URL") {
|
||||
k3sURLFound = true
|
||||
break
|
||||
k3sURLEnvFound := false
|
||||
k3sTokenEnvFoundIndex := -1
|
||||
for index, envVar := range node.Env {
|
||||
if strings.HasPrefix(envVar, k3s.EnvClusterConnectURL) {
|
||||
k3sURLEnvFound = true
|
||||
}
|
||||
if strings.HasPrefix(envVar, k3s.EnvClusterToken) {
|
||||
k3sTokenEnvFoundIndex = index
|
||||
}
|
||||
}
|
||||
if !k3sURLFound {
|
||||
if !k3sURLEnvFound {
|
||||
if url, ok := node.RuntimeLabels[k3d.LabelClusterURL]; ok {
|
||||
node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", url))
|
||||
node.Env = append(node.Env, fmt.Sprintf("%s=%s", k3s.EnvClusterConnectURL, url))
|
||||
} else {
|
||||
log.Warnln("Failed to find K3S_URL value!")
|
||||
l.Log().Warnln("Failed to find K3S_URL value!")
|
||||
}
|
||||
}
|
||||
if k3sTokenEnvFoundIndex != -1 && createNodeOpts.ClusterToken != "" {
|
||||
l.Log().Debugln("Overriding copied cluster token with value from nodeCreateOpts...")
|
||||
node.Env[k3sTokenEnvFoundIndex] = fmt.Sprintf("%s=%s", k3s.EnvClusterToken, createNodeOpts.ClusterToken)
|
||||
node.RuntimeLabels[k3d.LabelClusterToken] = createNodeOpts.ClusterToken
|
||||
}
|
||||
|
||||
// add node actions
|
||||
if len(registryConfigBytes) != 0 {
|
||||
@ -202,12 +217,12 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
||||
node.State.Status = ""
|
||||
|
||||
if err := NodeRun(ctx, runtime, node, createNodeOpts); err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to run node '%s': %w", node.Name, err)
|
||||
}
|
||||
|
||||
// if it's a server node, then update the loadbalancer configuration
|
||||
if node.Role == k3d.ServerRole {
|
||||
log.Infoln("Updating loadbalancer config to include new server node(s)")
|
||||
l.Log().Infoln("Updating loadbalancer config to include new server node(s)")
|
||||
if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil {
|
||||
if !errors.Is(err, ErrLBConfigHostNotFound) {
|
||||
return fmt.Errorf("error updating loadbalancer: %w", err)
|
||||
@ -218,6 +233,33 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
||||
return nil
|
||||
}
|
||||
|
||||
func NodeAddToClusterRemote(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, clusterRef string, createNodeOpts k3d.NodeCreateOpts) error {
|
||||
// runtime labels
|
||||
if node.RuntimeLabels == nil {
|
||||
node.RuntimeLabels = map[string]string{}
|
||||
}
|
||||
|
||||
node.FillRuntimeLabels()
|
||||
|
||||
node.RuntimeLabels[k3d.LabelClusterName] = clusterRef
|
||||
node.RuntimeLabels[k3d.LabelClusterURL] = clusterRef
|
||||
node.RuntimeLabels[k3d.LabelClusterExternal] = "true"
|
||||
node.RuntimeLabels[k3d.LabelClusterToken] = createNodeOpts.ClusterToken
|
||||
|
||||
if node.Env == nil {
|
||||
node.Env = []string{}
|
||||
}
|
||||
|
||||
node.Env = append(node.Env, fmt.Sprintf("%s=%s", k3s.EnvClusterConnectURL, clusterRef))
|
||||
node.Env = append(node.Env, fmt.Sprintf("%s=%s", k3s.EnvClusterToken, createNodeOpts.ClusterToken))
|
||||
|
||||
if err := NodeRun(ctx, runtime, node, createNodeOpts); err != nil {
|
||||
return fmt.Errorf("failed to run node '%s': %w", node.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NodeAddToClusterMulti adds multiple nodes to a chosen cluster
|
||||
func NodeAddToClusterMulti(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d.Node, cluster *k3d.Cluster, createNodeOpts k3d.NodeCreateOpts) error {
|
||||
if createNodeOpts.Timeout > 0*time.Second {
|
||||
@ -234,7 +276,28 @@ func NodeAddToClusterMulti(ctx context.Context, runtime runtimes.Runtime, nodes
|
||||
})
|
||||
}
|
||||
if err := nodeWaitGroup.Wait(); err != nil {
|
||||
return fmt.Errorf("Failed to add one or more nodes: %w", err)
|
||||
return fmt.Errorf("failed to add one or more nodes: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NodeAddToClusterMultiRemote(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d.Node, clusterRef string, createNodeOpts k3d.NodeCreateOpts) error {
|
||||
if createNodeOpts.Timeout > 0*time.Second {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, createNodeOpts.Timeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
nodeWaitGroup, ctx := errgroup.WithContext(ctx)
|
||||
for _, node := range nodes {
|
||||
currentNode := node
|
||||
nodeWaitGroup.Go(func() error {
|
||||
return NodeAddToClusterRemote(ctx, runtime, currentNode, clusterRef, createNodeOpts)
|
||||
})
|
||||
}
|
||||
if err := nodeWaitGroup.Wait(); err != nil {
|
||||
return fmt.Errorf("failed to add one or more nodes: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -251,26 +314,24 @@ func NodeCreateMulti(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d
|
||||
nodeWaitGroup, ctx := errgroup.WithContext(ctx)
|
||||
for _, node := range nodes {
|
||||
if err := NodeCreate(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
|
||||
log.Error(err)
|
||||
l.Log().Error(err)
|
||||
}
|
||||
if createNodeOpts.Wait {
|
||||
currentNode := node
|
||||
nodeWaitGroup.Go(func() error {
|
||||
log.Debugf("Starting to wait for node '%s'", currentNode.Name)
|
||||
readyLogMessage := k3d.ReadyLogMessageByRole[currentNode.Role]
|
||||
l.Log().Debugf("Starting to wait for node '%s'", currentNode.Name)
|
||||
readyLogMessage := k3d.GetReadyLogMessage(currentNode, k3d.IntentNodeCreate)
|
||||
if readyLogMessage != "" {
|
||||
return NodeWaitForLogMessage(ctx, runtime, currentNode, readyLogMessage, time.Time{})
|
||||
}
|
||||
log.Warnf("NodeCreateMulti: Set to wait for node %s to get ready, but there's no target log message defined", currentNode.Name)
|
||||
l.Log().Warnf("NodeCreateMulti: Set to wait for node %s to get ready, but there's no target log message defined", currentNode.Name)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if err := nodeWaitGroup.Wait(); err != nil {
|
||||
log.Errorln("Failed to bring up all nodes in time. Check the logs:")
|
||||
log.Errorf(">>> %+v", err)
|
||||
return fmt.Errorf("Failed to create nodes")
|
||||
return fmt.Errorf("failed to create nodes: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -279,33 +340,145 @@ func NodeCreateMulti(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d
|
||||
// NodeRun creates and starts a node
|
||||
func NodeRun(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, nodeCreateOpts k3d.NodeCreateOpts) error {
|
||||
if err := NodeCreate(ctx, runtime, node, nodeCreateOpts); err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to create node '%s': %w", node.Name, err)
|
||||
}
|
||||
|
||||
if err := NodeStart(ctx, runtime, node, k3d.NodeStartOpts{
|
||||
Wait: nodeCreateOpts.Wait,
|
||||
Timeout: nodeCreateOpts.Timeout,
|
||||
NodeHooks: nodeCreateOpts.NodeHooks,
|
||||
if err := NodeStart(ctx, runtime, node, &k3d.NodeStartOpts{
|
||||
Wait: nodeCreateOpts.Wait,
|
||||
Timeout: nodeCreateOpts.Timeout,
|
||||
NodeHooks: nodeCreateOpts.NodeHooks,
|
||||
EnvironmentInfo: nodeCreateOpts.EnvironmentInfo,
|
||||
Intent: k3d.IntentNodeCreate,
|
||||
}); err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to start node '%s': %w", node.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NodeStart starts an existing node
|
||||
func NodeStart(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, nodeStartOpts k3d.NodeStartOpts) error {
|
||||
func NodeStart(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, nodeStartOpts *k3d.NodeStartOpts) error {
|
||||
|
||||
// return early, if the node is already running
|
||||
if node.State.Running {
|
||||
log.Infof("Node %s is already running", node.Name)
|
||||
l.Log().Infof("Node %s is already running", node.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// FIXME: FixCgroupV2 - to be removed when fixed upstream
|
||||
if err := enableFixes(ctx, runtime, node, nodeStartOpts); err != nil {
|
||||
return fmt.Errorf("failed to enable k3d fixes: %w", err)
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
l.Log().Debugf("Node %s Start Time: %+v", node.Name, startTime)
|
||||
|
||||
// execute lifecycle hook actions
|
||||
for _, hook := range nodeStartOpts.NodeHooks {
|
||||
if hook.Stage == k3d.LifecycleStagePreStart {
|
||||
l.Log().Tracef("Node %s: Executing preStartAction '%s'", node.Name, reflect.TypeOf(hook))
|
||||
if err := hook.Action.Run(ctx, node); err != nil {
|
||||
l.Log().Errorf("Node %s: Failed executing preStartAction '%+v': %+v", node.Name, hook, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// start the node
|
||||
l.Log().Tracef("Starting node '%s'", node.Name)
|
||||
|
||||
if err := runtime.StartNode(ctx, node); err != nil {
|
||||
return fmt.Errorf("runtime failed to start node '%s': %w", node.Name, err)
|
||||
}
|
||||
|
||||
if node.State.Started != "" {
|
||||
ts, err := time.Parse("2006-01-02T15:04:05.999999999Z", node.State.Started)
|
||||
if err != nil {
|
||||
l.Log().Debugf("Failed to parse '%s.State.Started' timestamp '%s', falling back to calulated time", node.Name, node.State.Started)
|
||||
}
|
||||
startTime = ts.Truncate(time.Second)
|
||||
l.Log().Debugf("Truncated %s to %s", ts, startTime)
|
||||
}
|
||||
|
||||
if nodeStartOpts.Wait {
|
||||
if nodeStartOpts.ReadyLogMessage == "" {
|
||||
nodeStartOpts.ReadyLogMessage = k3d.GetReadyLogMessage(node, nodeStartOpts.Intent)
|
||||
}
|
||||
if nodeStartOpts.ReadyLogMessage != "" {
|
||||
l.Log().Debugf("Waiting for node %s to get ready (Log: '%s')", node.Name, nodeStartOpts.ReadyLogMessage)
|
||||
if err := NodeWaitForLogMessage(ctx, runtime, node, nodeStartOpts.ReadyLogMessage, startTime); err != nil {
|
||||
return fmt.Errorf("Node %s failed to get ready: %+v", node.Name, err)
|
||||
}
|
||||
} else {
|
||||
l.Log().Warnf("NodeStart: Set to wait for node %s to be ready, but there's no target log message defined", node.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// execute lifecycle hook actions
|
||||
for _, hook := range nodeStartOpts.NodeHooks {
|
||||
if hook.Stage == k3d.LifecycleStagePostStart {
|
||||
l.Log().Tracef("Node %s: Executing postStartAction '%s'", node.Name, reflect.TypeOf(hook))
|
||||
if err := hook.Action.Run(ctx, node); err != nil {
|
||||
l.Log().Errorf("Node %s: Failed executing postStartAction '%+v': %+v", node.Name, hook, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func enableFixes(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, nodeStartOpts *k3d.NodeStartOpts) error {
|
||||
|
||||
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
|
||||
|
||||
// FIXME: FixCgroupV2 - to be removed when fixed upstream
|
||||
// auto-enable, if needed
|
||||
EnableCgroupV2FixIfNeeded(runtime)
|
||||
if fixes.FixCgroupV2Enabled() {
|
||||
|
||||
// early exit if we don't need any fix
|
||||
if !fixes.FixEnabledAny() {
|
||||
l.Log().Debugln("No fix enabled.")
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensure nodehook list
|
||||
if nodeStartOpts.NodeHooks == nil {
|
||||
nodeStartOpts.NodeHooks = []k3d.NodeHook{}
|
||||
}
|
||||
|
||||
// write umbrella entrypoint
|
||||
nodeStartOpts.NodeHooks = append(nodeStartOpts.NodeHooks, k3d.NodeHook{
|
||||
Stage: k3d.LifecycleStagePreStart,
|
||||
Action: actions.WriteFileAction{
|
||||
Runtime: runtime,
|
||||
Content: fixes.K3DEntrypoint,
|
||||
Dest: "/bin/k3d-entrypoint.sh",
|
||||
Mode: 0744,
|
||||
},
|
||||
})
|
||||
|
||||
// DNS Fix
|
||||
if fixes.FixEnabled(fixes.EnvFixDNS) {
|
||||
l.Log().Debugln(">>> enabling dns magic")
|
||||
|
||||
if nodeStartOpts.EnvironmentInfo == nil || nodeStartOpts.EnvironmentInfo.HostGateway == nil {
|
||||
return fmt.Errorf("Cannot enable DNS fix, as Host Gateway IP is missing!")
|
||||
}
|
||||
|
||||
data := []byte(strings.ReplaceAll(string(fixes.DNSMagicEntrypoint), "GATEWAY_IP", nodeStartOpts.EnvironmentInfo.HostGateway.String()))
|
||||
|
||||
nodeStartOpts.NodeHooks = append(nodeStartOpts.NodeHooks, k3d.NodeHook{
|
||||
Stage: k3d.LifecycleStagePreStart,
|
||||
Action: actions.WriteFileAction{
|
||||
Runtime: runtime,
|
||||
Content: data,
|
||||
Dest: "/bin/k3d-entrypoint-dns.sh",
|
||||
Mode: 0744,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// CGroupsV2Fix
|
||||
if fixes.FixEnabled(fixes.EnvFixCgroupV2) {
|
||||
l.Log().Debugf(">>> enabling cgroupsv2 magic")
|
||||
|
||||
if nodeStartOpts.NodeHooks == nil {
|
||||
nodeStartOpts.NodeHooks = []k3d.NodeHook{}
|
||||
@ -316,57 +489,12 @@ func NodeStart(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, no
|
||||
Action: actions.WriteFileAction{
|
||||
Runtime: runtime,
|
||||
Content: fixes.CgroupV2Entrypoint,
|
||||
Dest: "/bin/entrypoint.sh",
|
||||
Dest: "/bin/k3d-entrypoint-cgroupv2.sh",
|
||||
Mode: 0744,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
log.Debugf("Node %s Start Time: %+v", node.Name, startTime)
|
||||
|
||||
// execute lifecycle hook actions
|
||||
for _, hook := range nodeStartOpts.NodeHooks {
|
||||
if hook.Stage == k3d.LifecycleStagePreStart {
|
||||
log.Tracef("Node %s: Executing preStartAction '%s'", node.Name, reflect.TypeOf(hook))
|
||||
if err := hook.Action.Run(ctx, node); err != nil {
|
||||
log.Errorf("Node %s: Failed executing preStartAction '%+v': %+v", node.Name, hook, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// start the node
|
||||
log.Tracef("Starting node '%s'", node.Name)
|
||||
|
||||
if err := runtime.StartNode(ctx, node); err != nil {
|
||||
log.Errorf("Failed to start node '%s'", node.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
if node.State.Started != "" {
|
||||
ts, err := time.Parse("2006-01-02T15:04:05.999999999Z", node.State.Started)
|
||||
if err != nil {
|
||||
log.Debugf("Failed to parse '%s.State.Started' timestamp '%s', falling back to calulated time", node.Name, node.State.Started)
|
||||
}
|
||||
startTime = ts.Truncate(time.Second)
|
||||
log.Debugf("Truncated %s to %s", ts, startTime)
|
||||
}
|
||||
|
||||
if nodeStartOpts.Wait {
|
||||
if nodeStartOpts.ReadyLogMessage == "" {
|
||||
nodeStartOpts.ReadyLogMessage = k3d.ReadyLogMessageByRole[node.Role]
|
||||
}
|
||||
if nodeStartOpts.ReadyLogMessage != "" {
|
||||
log.Debugf("Waiting for node %s to get ready (Log: '%s')", node.Name, nodeStartOpts.ReadyLogMessage)
|
||||
if err := NodeWaitForLogMessage(ctx, runtime, node, nodeStartOpts.ReadyLogMessage, startTime); err != nil {
|
||||
return fmt.Errorf("Node %s failed to get ready: %+v", node.Name, err)
|
||||
}
|
||||
} else {
|
||||
log.Warnf("NodeStart: Set to wait for node %s to be ready, but there's no target log message defined", node.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -374,7 +502,7 @@ func NodeStart(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, no
|
||||
func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, createNodeOpts k3d.NodeCreateOpts) error {
|
||||
// FIXME: FixCgroupV2 - to be removed when fixed upstream
|
||||
EnableCgroupV2FixIfNeeded(runtime)
|
||||
log.Tracef("Creating node from spec\n%+v", node)
|
||||
l.Log().Tracef("Creating node from spec\n%+v", node)
|
||||
|
||||
/*
|
||||
* CONFIGURATION
|
||||
@ -395,39 +523,39 @@ func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, c
|
||||
// specify options depending on node role
|
||||
if node.Role == k3d.AgentRole { // TODO: check here AND in CLI or only here?
|
||||
if err := patchAgentSpec(node); err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to patch agent spec on node %s: %w", node.Name, err)
|
||||
}
|
||||
} else if node.Role == k3d.ServerRole {
|
||||
if err := patchServerSpec(node, runtime); err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to patch server spec on node %s: %w", node.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// memory limits
|
||||
if node.Memory != "" {
|
||||
if runtime != runtimes.Docker {
|
||||
log.Warn("ignoring specified memory limits as runtime is not Docker")
|
||||
l.Log().Warn("ignoring specified memory limits as runtime is not Docker")
|
||||
} else {
|
||||
memory, err := dockerunits.RAMInBytes(node.Memory)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid memory limit format: %+v", err)
|
||||
return fmt.Errorf("invalid memory limit format: %w", err)
|
||||
}
|
||||
// mount fake meminfo as readonly
|
||||
fakemempath, err := util.MakeFakeMeminfo(memory, node.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create fake meminfo: %+v", err)
|
||||
return fmt.Errorf("failed to create fake meminfo: %w", err)
|
||||
}
|
||||
node.Volumes = append(node.Volumes, fmt.Sprintf("%s:%s:ro", fakemempath, util.MemInfoPath))
|
||||
// mount empty edac folder, but only if it exists
|
||||
exists, err := docker.CheckIfDirectoryExists(ctx, node.Image, util.EdacFolderPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to check for the existence of edac folder: %+v", err)
|
||||
return fmt.Errorf("failed to check for the existence of edac folder: %w", err)
|
||||
}
|
||||
if exists {
|
||||
log.Debugln("Found edac folder")
|
||||
l.Log().Debugln("Found edac folder")
|
||||
fakeedacpath, err := util.MakeFakeEdac(node.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create fake edac: %+v", err)
|
||||
return fmt.Errorf("failed to create fake edac: %w", err)
|
||||
}
|
||||
node.Volumes = append(node.Volumes, fmt.Sprintf("%s:%s:ro", fakeedacpath, util.EdacFolderPath))
|
||||
}
|
||||
@ -438,7 +566,7 @@ func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, c
|
||||
* CREATION
|
||||
*/
|
||||
if err := runtime.CreateNode(ctx, node); err != nil {
|
||||
return err
|
||||
return fmt.Errorf("runtime failed to create node '%s': %w", node.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -448,17 +576,17 @@ func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, c
|
||||
func NodeDelete(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, opts k3d.NodeDeleteOpts) error {
|
||||
// delete node
|
||||
if err := runtime.DeleteNode(ctx, node); err != nil {
|
||||
log.Error(err)
|
||||
l.Log().Error(err)
|
||||
}
|
||||
|
||||
// delete fake folder created for limits
|
||||
if node.Memory != "" {
|
||||
log.Debug("Cleaning fake files folder from k3d config dir for this node...")
|
||||
l.Log().Debug("Cleaning fake files folder from k3d config dir for this node...")
|
||||
filepath, err := util.GetNodeFakerDirOrCreate(node.Name)
|
||||
err = os.RemoveAll(filepath)
|
||||
if err != nil {
|
||||
// this err prob should not be fatal, just log it
|
||||
log.Errorf("Could not remove fake files folder for node %s: %+v", node.Name, err)
|
||||
l.Log().Errorf("Could not remove fake files folder for node %s: %+v", node.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -466,15 +594,14 @@ func NodeDelete(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, o
|
||||
if !opts.SkipLBUpdate && (node.Role == k3d.ServerRole || node.Role == k3d.AgentRole) {
|
||||
cluster, err := ClusterGet(ctx, runtime, &k3d.Cluster{Name: node.RuntimeLabels[k3d.LabelClusterName]})
|
||||
if err != nil {
|
||||
log.Errorf("Failed to find cluster for node '%s'", node.Name)
|
||||
return err
|
||||
return fmt.Errorf("failed fo find cluster for node '%s': %w", node.Name, err)
|
||||
}
|
||||
|
||||
// if it's a server node, then update the loadbalancer configuration
|
||||
if node.Role == k3d.ServerRole {
|
||||
if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil {
|
||||
if !errors.Is(err, ErrLBConfigHostNotFound) {
|
||||
return fmt.Errorf("Failed to update cluster loadbalancer: %w", err)
|
||||
return fmt.Errorf("failed to update cluster loadbalancer: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -510,7 +637,7 @@ func patchServerSpec(node *k3d.Node, runtime runtimes.Runtime) error {
|
||||
dockerHost := runtime.GetHost()
|
||||
if dockerHost != "" {
|
||||
dockerHost = strings.Split(dockerHost, ":")[0] // remove the port
|
||||
log.Tracef("Using docker host %s", dockerHost)
|
||||
l.Log().Tracef("Using docker host %s", dockerHost)
|
||||
node.RuntimeLabels[k3d.LabelServerAPIHostIP] = dockerHost
|
||||
node.RuntimeLabels[k3d.LabelServerAPIHost] = dockerHost
|
||||
}
|
||||
@ -525,8 +652,7 @@ func patchServerSpec(node *k3d.Node, runtime runtimes.Runtime) error {
|
||||
func NodeList(ctx context.Context, runtime runtimes.Runtime) ([]*k3d.Node, error) {
|
||||
nodes, err := runtime.GetNodesByLabel(ctx, k3d.DefaultRuntimeLabels)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to get nodes")
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to list nodes: %w", err)
|
||||
}
|
||||
|
||||
return nodes, nil
|
||||
@ -537,8 +663,7 @@ func NodeGet(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) (*k3
|
||||
// get node
|
||||
node, err := runtime.GetNode(ctx, node)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get node '%s'", node.Name)
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to get node '%s': %w", node.Name, err)
|
||||
}
|
||||
|
||||
return node, nil
|
||||
@ -546,62 +671,101 @@ func NodeGet(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) (*k3
|
||||
|
||||
// NodeWaitForLogMessage follows the logs of a node container and returns if it finds a specific line in there (or timeout is reached)
|
||||
func NodeWaitForLogMessage(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, message string, since time.Time) error {
|
||||
log.Tracef("NodeWaitForLogMessage: Node '%s' waiting for log message '%s' since '%+v'", node.Name, message, since)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
d, ok := ctx.Deadline()
|
||||
if ok {
|
||||
log.Debugf("NodeWaitForLogMessage: Context Deadline (%s) > Current Time (%s)", d, time.Now())
|
||||
}
|
||||
return fmt.Errorf("Context deadline exceeded while waiting for log message '%s' of node %s: %w", message, node.Name, ctx.Err())
|
||||
l.Log().Tracef("NodeWaitForLogMessage: Node '%s' waiting for log message '%s' since '%+v'", node.Name, message, since)
|
||||
|
||||
// specify max number of retries if container is in crashloop (as defined by last seen message being a fatal log)
|
||||
backOffLimit := k3d.DefaultNodeWaitForLogMessageCrashLoopBackOffLimit
|
||||
if l, ok := os.LookupEnv(k3d.K3dEnvDebugNodeWaitBackOffLimit); ok {
|
||||
limit, err := strconv.Atoi(l)
|
||||
if err == nil {
|
||||
backOffLimit = limit
|
||||
}
|
||||
}
|
||||
|
||||
// start a goroutine to print a warning continuously if a node is restarting for quite some time already
|
||||
donechan := make(chan struct{})
|
||||
defer close(donechan)
|
||||
go func(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, since time.Time, donechan chan struct{}) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-donechan:
|
||||
return
|
||||
default:
|
||||
}
|
||||
return ctx.Err()
|
||||
default:
|
||||
// check if the container is restarting
|
||||
running, status, _ := runtime.GetNodeStatus(ctx, node)
|
||||
if running && status == k3d.NodeStatusRestarting && time.Now().Sub(since) > k3d.NodeWaitForLogMessageRestartWarnTime {
|
||||
l.Log().Warnf("Node '%s' is restarting for more than %s now. Possibly it will recover soon (e.g. when it's waiting to join). Consider using a creation timeout to avoid waiting forever in a Restart Loop.", node.Name, k3d.NodeWaitForLogMessageRestartWarnTime)
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
|
||||
// read the logs
|
||||
out, err := runtime.GetNodeLogs(ctx, node, since)
|
||||
}(ctx, runtime, node, since, donechan)
|
||||
|
||||
// Start loop to check log stream for specified log message.
|
||||
// We're looping here, as sometimes the containers run into a crash loop, but *may* recover from that
|
||||
// e.g. when a new server is joining an existing cluster and has to wait for another member to finish learning.
|
||||
// The logstream returned by docker ends everytime the container restarts, so we have to start from the beginning.
|
||||
for i := 0; i < backOffLimit; i++ {
|
||||
|
||||
// get the log stream (reader is following the logstream)
|
||||
out, err := runtime.GetNodeLogs(ctx, node, since, &runtimeTypes.NodeLogsOpts{Follow: true})
|
||||
if out != nil {
|
||||
defer out.Close()
|
||||
}
|
||||
if err != nil {
|
||||
if out != nil {
|
||||
out.Close()
|
||||
}
|
||||
return fmt.Errorf("Failed waiting for log message '%s' from node '%s': %w", message, node.Name, err)
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
nRead, _ := buf.ReadFrom(out)
|
||||
out.Close()
|
||||
output := buf.String()
|
||||
// We're scanning the logstream continuously line-by-line
|
||||
scanner := bufio.NewScanner(out)
|
||||
var previousline string
|
||||
|
||||
if nRead > 0 && strings.Contains(os.Getenv("K3D_LOG_NODE_WAIT_LOGS"), string(node.Role)) {
|
||||
log.Tracef("=== Read logs since %s ===\n%s\n", since, output)
|
||||
}
|
||||
// check if we can find the specified line in the log
|
||||
if nRead > 0 && strings.Contains(output, message) {
|
||||
if log.GetLevel() >= log.TraceLevel {
|
||||
temp := strings.Split(output, "\n")
|
||||
for _, l := range temp {
|
||||
if strings.Contains(l, message) {
|
||||
log.Tracef("Found target log line: `%s`", l)
|
||||
for scanner.Scan() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
d, ok := ctx.Deadline()
|
||||
if ok {
|
||||
l.Log().Debugf("NodeWaitForLogMessage: Context Deadline (%s) > Current Time (%s)", d, time.Now())
|
||||
}
|
||||
return fmt.Errorf("Context deadline exceeded while waiting for log message '%s' of node %s: %w", message, node.Name, ctx.Err())
|
||||
}
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
if strings.Contains(os.Getenv(k3d.K3dEnvLogNodeWaitLogs), string(node.Role)) {
|
||||
l.Log().Tracef(">>> Parsing log line: `%s`", scanner.Text())
|
||||
}
|
||||
// check if we can find the specified line in the log
|
||||
if strings.Contains(scanner.Text(), message) {
|
||||
l.Log().Tracef("Found target message `%s` in log line `%s`", message, scanner.Text())
|
||||
l.Log().Debugf("Finished waiting for log message '%s' from node '%s'", message, node.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
previousline = scanner.Text()
|
||||
|
||||
}
|
||||
|
||||
out.Close() // no more input on scanner, but target log not yet found -> close current logreader (precautionary)
|
||||
|
||||
// we got here, because the logstream ended (no more input on scanner), so we check if maybe the container crashed
|
||||
if strings.Contains(previousline, "level=fatal") {
|
||||
// case 1: last log line we saw contained a fatal error, so probably it crashed and we want to retry on restart
|
||||
l.Log().Warnf("warning: encountered fatal log from node %s (retrying %d/%d): %s", node.Name, i, backOffLimit, previousline)
|
||||
out.Close()
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
continue
|
||||
} else {
|
||||
// case 2: last log line we saw did not contain a fatal error, so we break the loop here and return a generic error
|
||||
break
|
||||
}
|
||||
|
||||
// check if the container is restarting
|
||||
running, status, _ := runtime.GetNodeStatus(ctx, node)
|
||||
if running && status == k3d.NodeStatusRestarting && time.Now().Sub(since) > k3d.NodeWaitForLogMessageRestartWarnTime {
|
||||
log.Warnf("Node '%s' is restarting for more than a minute now. Possibly it will recover soon (e.g. when it's waiting to join). Consider using a creation timeout to avoid waiting forever in a Restart Loop.", node.Name)
|
||||
}
|
||||
|
||||
time.Sleep(500 * time.Millisecond) // wait for half a second to avoid overloading docker (error `socket: too many open files`)
|
||||
}
|
||||
log.Debugf("Finished waiting for log message '%s' from node '%s'", message, node.Name)
|
||||
return nil
|
||||
return fmt.Errorf("error waiting for log line `%s` from node '%s': stopped returning log lines", message, node.Name)
|
||||
}
|
||||
|
||||
// NodeFilterByRoles filters a list of nodes by their roles
|
||||
@ -610,7 +774,7 @@ func NodeFilterByRoles(nodes []*k3d.Node, includeRoles, excludeRoles []k3d.Role)
|
||||
for _, includeRole := range includeRoles {
|
||||
for _, excludeRole := range excludeRoles {
|
||||
if includeRole == excludeRole {
|
||||
log.Warnf("You've specified the same role ('%s') for inclusion and exclusion. Exclusion precedes inclusion.", includeRole)
|
||||
l.Log().Warnf("You've specified the same role ('%s') for inclusion and exclusion. Exclusion precedes inclusion.", includeRole)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -634,7 +798,7 @@ nodeLoop:
|
||||
}
|
||||
}
|
||||
|
||||
log.Tracef("Filteres %d nodes by roles (in: %+v | ex: %+v), got %d left", len(nodes), includeRoles, excludeRoles, len(resultList))
|
||||
l.Log().Tracef("Filteres %d nodes by roles (in: %+v | ex: %+v), got %d left", len(nodes), includeRoles, excludeRoles, len(resultList))
|
||||
|
||||
return resultList
|
||||
}
|
||||
@ -648,7 +812,7 @@ func NodeEdit(ctx context.Context, runtime runtimes.Runtime, existingNode, chang
|
||||
|
||||
result, err := CopyNode(ctx, existingNode, CopyNodeOpts{keepState: false})
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to copy node %s: %w", existingNode.Name, err)
|
||||
}
|
||||
|
||||
/*
|
||||
@ -666,11 +830,11 @@ func NodeEdit(ctx context.Context, runtime runtimes.Runtime, existingNode, chang
|
||||
// loop over existing portbindings to avoid port collisions (docker doesn't check for it)
|
||||
for _, existingPB := range result.Ports[port] {
|
||||
if util.IsPortBindingEqual(portbinding, existingPB) { // also matches on "equal" HostIPs (127.0.0.1, "", 0.0.0.0)
|
||||
log.Tracef("Skipping existing PortBinding: %+v", existingPB)
|
||||
l.Log().Tracef("Skipping existing PortBinding: %+v", existingPB)
|
||||
continue loopChangesetPortbindings
|
||||
}
|
||||
}
|
||||
log.Tracef("Adding portbinding %+v for port %s", portbinding, port.Port())
|
||||
l.Log().Tracef("Adding portbinding %+v for port %s", portbinding, port.Port())
|
||||
result.Ports[port] = append(result.Ports[port], portbinding)
|
||||
}
|
||||
}
|
||||
@ -693,7 +857,7 @@ func NodeEdit(ctx context.Context, runtime runtimes.Runtime, existingNode, chang
|
||||
// prepare to write config to lb container
|
||||
configyaml, err := yaml.Marshal(lbConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to marshal loadbalancer config: %w", err)
|
||||
}
|
||||
|
||||
writeLbConfigAction := k3d.NodeHook{
|
||||
@ -718,14 +882,14 @@ func NodeReplace(ctx context.Context, runtime runtimes.Runtime, old, new *k3d.No
|
||||
// rename existing node
|
||||
oldNameTemp := fmt.Sprintf("%s-%s", old.Name, util.GenerateRandomString(5))
|
||||
oldNameOriginal := old.Name
|
||||
log.Infof("Renaming existing node %s to %s...", old.Name, oldNameTemp)
|
||||
l.Log().Infof("Renaming existing node %s to %s...", old.Name, oldNameTemp)
|
||||
if err := runtime.RenameNode(ctx, old, oldNameTemp); err != nil {
|
||||
return err
|
||||
return fmt.Errorf("runtime failed to rename node '%s': %w", old.Name, err)
|
||||
}
|
||||
old.Name = oldNameTemp
|
||||
|
||||
// create (not start) new node
|
||||
log.Infof("Creating new node %s...", new.Name)
|
||||
l.Log().Infof("Creating new node %s...", new.Name)
|
||||
if err := NodeCreate(ctx, runtime, new, k3d.NodeCreateOpts{Wait: true}); err != nil {
|
||||
if err := runtime.RenameNode(ctx, old, oldNameOriginal); err != nil {
|
||||
return fmt.Errorf("Failed to create new node. Also failed to rename %s back to %s: %+v", old.Name, oldNameOriginal, err)
|
||||
@ -734,14 +898,14 @@ func NodeReplace(ctx context.Context, runtime runtimes.Runtime, old, new *k3d.No
|
||||
}
|
||||
|
||||
// stop existing/old node
|
||||
log.Infof("Stopping existing node %s...", old.Name)
|
||||
l.Log().Infof("Stopping existing node %s...", old.Name)
|
||||
if err := runtime.StopNode(ctx, old); err != nil {
|
||||
return err
|
||||
return fmt.Errorf("runtime failed to stop node '%s': %w", old.Name, err)
|
||||
}
|
||||
|
||||
// start new node
|
||||
log.Infof("Starting new node %s...", new.Name)
|
||||
if err := NodeStart(ctx, runtime, new, k3d.NodeStartOpts{Wait: true, NodeHooks: new.HookActions}); err != nil {
|
||||
l.Log().Infof("Starting new node %s...", new.Name)
|
||||
if err := NodeStart(ctx, runtime, new, &k3d.NodeStartOpts{Wait: true, NodeHooks: new.HookActions}); err != nil {
|
||||
if err := NodeDelete(ctx, runtime, new, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil {
|
||||
return fmt.Errorf("Failed to start new node. Also failed to rollback: %+v", err)
|
||||
}
|
||||
@ -749,16 +913,16 @@ func NodeReplace(ctx context.Context, runtime runtimes.Runtime, old, new *k3d.No
|
||||
return fmt.Errorf("Failed to start new node. Also failed to rename %s back to %s: %+v", old.Name, oldNameOriginal, err)
|
||||
}
|
||||
old.Name = oldNameOriginal
|
||||
if err := NodeStart(ctx, runtime, old, k3d.NodeStartOpts{Wait: true}); err != nil {
|
||||
if err := NodeStart(ctx, runtime, old, &k3d.NodeStartOpts{Wait: true}); err != nil {
|
||||
return fmt.Errorf("Failed to start new node. Also failed to restart old node: %+v", err)
|
||||
}
|
||||
return fmt.Errorf("Failed to start new node. Rolled back: %+v", err)
|
||||
}
|
||||
|
||||
// cleanup: delete old node
|
||||
log.Infof("Deleting old node %s...", old.Name)
|
||||
l.Log().Infof("Deleting old node %s...", old.Name)
|
||||
if err := NodeDelete(ctx, runtime, old, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to delete old node '%s': %w", old.Name, err)
|
||||
}
|
||||
|
||||
// done
|
||||
@ -773,7 +937,7 @@ func CopyNode(ctx context.Context, src *k3d.Node, opts CopyNodeOpts) (*k3d.Node,
|
||||
|
||||
targetCopy, err := copystruct.Copy(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to copy node struct: %w", err)
|
||||
}
|
||||
|
||||
result := targetCopy.(*k3d.Node)
|
||||
@ -783,5 +947,5 @@ func CopyNode(ctx context.Context, src *k3d.Node, opts CopyNodeOpts) (*k3d.Node,
|
||||
result.State = k3d.NodeState{}
|
||||
}
|
||||
|
||||
return result, err
|
||||
return result, nil
|
||||
}
|
||||
|
@ -28,12 +28,13 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/rancher/k3d/v4/pkg/config/types"
|
||||
config "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v4/pkg/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/rancher/k3d/v5/pkg/config/types"
|
||||
config "github.com/rancher/k3d/v5/pkg/config/v1alpha3"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/rancher/k3d/v5/pkg/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
@ -46,15 +47,15 @@ func TransformPorts(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.
|
||||
nodeList := cluster.Nodes
|
||||
|
||||
for _, portWithNodeFilters := range portsWithNodeFilters {
|
||||
log.Tracef("inspecting port mapping for %s with nodefilters %s", portWithNodeFilters.Port, portWithNodeFilters.NodeFilters)
|
||||
l.Log().Tracef("inspecting port mapping for %s with nodefilters %s", portWithNodeFilters.Port, portWithNodeFilters.NodeFilters)
|
||||
if len(portWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 {
|
||||
log.Infof("portmapping '%s' lacks a nodefilter, but there's more than one node: defaulting to %s", portWithNodeFilters.Port, types.DefaultTargetsNodefiltersPortMappings)
|
||||
l.Log().Infof("portmapping '%s' lacks a nodefilter, but there's more than one node: defaulting to %s", portWithNodeFilters.Port, types.DefaultTargetsNodefiltersPortMappings)
|
||||
portWithNodeFilters.NodeFilters = types.DefaultTargetsNodefiltersPortMappings
|
||||
}
|
||||
|
||||
for _, f := range portWithNodeFilters.NodeFilters {
|
||||
if strings.HasPrefix(f, "loadbalancer") {
|
||||
log.Infof("portmapping '%s' targets the loadbalancer: defaulting to %s", portWithNodeFilters.Port, types.DefaultTargetsNodefiltersPortMappings)
|
||||
l.Log().Infof("portmapping '%s' targets the loadbalancer: defaulting to %s", portWithNodeFilters.Port, types.DefaultTargetsNodefiltersPortMappings)
|
||||
portWithNodeFilters.NodeFilters = types.DefaultTargetsNodefiltersPortMappings
|
||||
break
|
||||
}
|
||||
@ -80,7 +81,7 @@ func TransformPorts(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.
|
||||
}
|
||||
for _, pm := range portmappings {
|
||||
if err := loadbalancerAddPortConfigs(cluster.ServerLoadBalancer, pm, nodes); err != nil {
|
||||
return err
|
||||
return fmt.Errorf("error adding port config to loadbalancer: %w", err)
|
||||
}
|
||||
}
|
||||
} else if suffix == "direct" {
|
||||
@ -99,13 +100,14 @@ func TransformPorts(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.
|
||||
|
||||
}
|
||||
|
||||
// print generated loadbalancer config
|
||||
if log.GetLevel() >= log.DebugLevel {
|
||||
// print generated loadbalancer config if exists
|
||||
// (avoid segmentation fault if loadbalancer is disabled)
|
||||
if l.Log().GetLevel() >= logrus.DebugLevel && cluster.ServerLoadBalancer != nil {
|
||||
yamlized, err := yaml.Marshal(cluster.ServerLoadBalancer.Config)
|
||||
if err != nil {
|
||||
log.Errorf("error printing loadbalancer config: %v", err)
|
||||
l.Log().Errorf("error printing loadbalancer config: %v", err)
|
||||
} else {
|
||||
log.Debugf("generated loadbalancer config:\n%s", string(yamlized))
|
||||
l.Log().Debugf("generated loadbalancer config:\n%s", string(yamlized))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -28,12 +28,12 @@ import (
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes/docker"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v4/pkg/types/k3s"
|
||||
"github.com/rancher/k3d/v4/pkg/types/k8s"
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes/docker"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/rancher/k3d/v5/pkg/types/k3s"
|
||||
"github.com/rancher/k3d/v5/pkg/types/k8s"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
@ -43,11 +43,11 @@ func RegistryRun(ctx context.Context, runtime runtimes.Runtime, reg *k3d.Registr
|
||||
return nil, fmt.Errorf("Failed to create registry: %+v", err)
|
||||
}
|
||||
|
||||
if err := NodeStart(ctx, runtime, regNode, k3d.NodeStartOpts{}); err != nil {
|
||||
if err := NodeStart(ctx, runtime, regNode, &k3d.NodeStartOpts{}); err != nil {
|
||||
return nil, fmt.Errorf("Failed to start registry: %+v", err)
|
||||
}
|
||||
|
||||
return regNode, err
|
||||
return regNode, nil
|
||||
}
|
||||
|
||||
// RegistryCreate creates a registry node
|
||||
@ -58,8 +58,8 @@ func RegistryCreate(ctx context.Context, runtime runtimes.Runtime, reg *k3d.Regi
|
||||
reg.Host = k3d.DefaultRegistryName
|
||||
}
|
||||
// if err := ValidateHostname(reg.Host); err != nil {
|
||||
// log.Errorln("Invalid name for registry")
|
||||
// log.Fatalln(err)
|
||||
// l.Log().Errorln("Invalid name for registry")
|
||||
// l.Log().Fatalln(err)
|
||||
// }
|
||||
|
||||
registryNode := &k3d.Node{
|
||||
@ -97,13 +97,12 @@ func RegistryCreate(ctx context.Context, runtime runtimes.Runtime, reg *k3d.Regi
|
||||
registryNode.Ports[reg.ExposureOpts.Port] = []nat.PortBinding{reg.ExposureOpts.Binding}
|
||||
|
||||
// create the registry node
|
||||
log.Infof("Creating node '%s'", registryNode.Name)
|
||||
l.Log().Infof("Creating node '%s'", registryNode.Name)
|
||||
if err := NodeCreate(ctx, runtime, registryNode, k3d.NodeCreateOpts{}); err != nil {
|
||||
log.Errorln("Failed to create registry node")
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to create registry node '%s': %w", registryNode.Name, err)
|
||||
}
|
||||
|
||||
log.Infof("Successfully created registry '%s'", registryNode.Name)
|
||||
l.Log().Infof("Successfully created registry '%s'", registryNode.Name)
|
||||
|
||||
return registryNode, nil
|
||||
|
||||
@ -115,8 +114,7 @@ func RegistryConnectClusters(ctx context.Context, runtime runtimes.Runtime, regi
|
||||
// find registry node
|
||||
registryNode, err := NodeGet(ctx, runtime, registryNode)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to find registry node '%s'", registryNode.Name)
|
||||
return err
|
||||
return fmt.Errorf("Failed to find registry node '%s': %w", registryNode.Name, err)
|
||||
}
|
||||
|
||||
// get cluster details and connect
|
||||
@ -124,13 +122,13 @@ func RegistryConnectClusters(ctx context.Context, runtime runtimes.Runtime, regi
|
||||
for _, c := range clusters {
|
||||
cluster, err := ClusterGet(ctx, runtime, c)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to connect to cluster '%s': Cluster not found", c.Name)
|
||||
l.Log().Warnf("Failed to connect to cluster '%s': Cluster not found", c.Name)
|
||||
failed++
|
||||
continue
|
||||
}
|
||||
if err := runtime.ConnectNodeToNetwork(ctx, registryNode, cluster.Network.Name); err != nil {
|
||||
log.Warnf("Failed to connect to cluster '%s': Connection failed", cluster.Name)
|
||||
log.Warnln(err)
|
||||
l.Log().Warnf("Failed to connect to cluster '%s': Connection failed", cluster.Name)
|
||||
l.Log().Warnln(err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
@ -148,16 +146,15 @@ func RegistryConnectNetworks(ctx context.Context, runtime runtimes.Runtime, regi
|
||||
// find registry node
|
||||
registryNode, err := NodeGet(ctx, runtime, registryNode)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to find registry node '%s'", registryNode.Name)
|
||||
return err
|
||||
return fmt.Errorf("Failed to find registry node '%s': %w", registryNode.Name, err)
|
||||
}
|
||||
|
||||
// get cluster details and connect
|
||||
failed := 0
|
||||
for _, net := range networks {
|
||||
if err := runtime.ConnectNodeToNetwork(ctx, registryNode, net); err != nil {
|
||||
log.Warnf("Failed to connect to network '%s': Connection failed", net)
|
||||
log.Warnln(err)
|
||||
l.Log().Warnf("Failed to connect to network '%s': Connection failed", net)
|
||||
l.Log().Warnln(err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
@ -247,7 +244,7 @@ func RegistryFromNode(node *k3d.Node) (*k3d.Registry, error) {
|
||||
}
|
||||
}
|
||||
|
||||
log.Tracef("Got registry %+v from node %+v", registry, node)
|
||||
l.Log().Tracef("Got registry %+v from node %+v", registry, node)
|
||||
|
||||
return registry, nil
|
||||
|
||||
@ -273,11 +270,11 @@ func RegistryGenerateLocalRegistryHostingConfigMapYAML(ctx context.Context, runt
|
||||
}
|
||||
|
||||
if len(registries) > 1 {
|
||||
log.Warnf("More than one registry specified, but the LocalRegistryHostingV1 spec only supports one -> Selecting the first one: %s", registries[0].Host)
|
||||
l.Log().Warnf("More than one registry specified, but the LocalRegistryHostingV1 spec only supports one -> Selecting the first one: %s", registries[0].Host)
|
||||
}
|
||||
|
||||
if len(registries) < 1 {
|
||||
log.Debugln("No registry specified, not generating local registry hosting configmap")
|
||||
l.Log().Debugln("No registry specified, not generating local registry hosting configmap")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -290,15 +287,15 @@ func RegistryGenerateLocalRegistryHostingConfigMapYAML(ctx context.Context, runt
|
||||
// if the host is now 0.0.0.0, check if we can set it to the IP of the docker-machine, if it's used
|
||||
if host == k3d.DefaultAPIHost && runtime == runtimes.Docker {
|
||||
if gort.GOOS == "windows" || gort.GOOS == "darwin" {
|
||||
log.Tracef("Running on %s: checking if it's using docker-machine", gort.GOOS)
|
||||
l.Log().Tracef("Running on %s: checking if it's using docker-machine", gort.GOOS)
|
||||
machineIP, err := runtime.(docker.Docker).GetDockerMachineIP()
|
||||
if err != nil {
|
||||
log.Warnf("Using docker-machine, but failed to get it's IP for usage in LocalRegistryHosting Config Map: %+v", err)
|
||||
l.Log().Warnf("Using docker-machine, but failed to get it's IP for usage in LocalRegistryHosting Config Map: %+v", err)
|
||||
} else if machineIP != "" {
|
||||
log.Infof("Using the docker-machine IP %s in the LocalRegistryHosting Config Map", machineIP)
|
||||
l.Log().Infof("Using the docker-machine IP %s in the LocalRegistryHosting Config Map", machineIP)
|
||||
host = machineIP
|
||||
} else {
|
||||
log.Traceln("Not using docker-machine")
|
||||
l.Log().Traceln("Not using docker-machine")
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -313,11 +310,12 @@ func RegistryGenerateLocalRegistryHostingConfigMapYAML(ctx context.Context, runt
|
||||
k8s.LocalRegistryHostingV1{
|
||||
Host: fmt.Sprintf("%s:%s", host, registries[0].ExposureOpts.Binding.HostPort),
|
||||
HostFromContainerRuntime: fmt.Sprintf("%s:%s", registries[0].Host, registries[0].ExposureOpts.Port.Port()),
|
||||
HostFromClusterNetwork: fmt.Sprintf("%s:%s", registries[0].Host, registries[0].ExposureOpts.Port.Port()),
|
||||
Help: "https://k3d.io/usage/guides/registries/#using-a-local-registry",
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to marshal LocalRegistryHosting configmap data: %w", err)
|
||||
}
|
||||
|
||||
cm := configmap{
|
||||
@ -334,10 +332,10 @@ func RegistryGenerateLocalRegistryHostingConfigMapYAML(ctx context.Context, runt
|
||||
|
||||
cmYaml, err := yaml.Marshal(cm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to marshal LocalRegistryHosting configmap: %w", err)
|
||||
}
|
||||
|
||||
log.Tracef("LocalRegistryHostingConfigMapYaml: %s", string(cmYaml))
|
||||
l.Log().Tracef("LocalRegistryHostingConfigMapYaml: %s", string(cmYaml))
|
||||
|
||||
return cmYaml, nil
|
||||
}
|
||||
@ -345,7 +343,7 @@ func RegistryGenerateLocalRegistryHostingConfigMapYAML(ctx context.Context, runt
|
||||
// RegistryMergeConfig merges a source registry config into an existing dest registry cofnig
|
||||
func RegistryMergeConfig(ctx context.Context, dest, src *k3s.Registry) error {
|
||||
if err := mergo.MergeWithOverwrite(dest, src); err != nil {
|
||||
return fmt.Errorf("Failed to merge registry configs: %+v", err)
|
||||
return fmt.Errorf("failed to merge registry configs: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -27,8 +27,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
)
|
||||
|
||||
func TestRegistryGenerateLocalRegistryHostingConfigMapYAML(t *testing.T) {
|
||||
@ -42,6 +42,7 @@ metadata:
|
||||
data:
|
||||
localRegistryHosting.v1: |
|
||||
host: test-host:5432
|
||||
hostFromClusterNetwork: test-host:1234
|
||||
hostFromContainerRuntime: test-host:1234
|
||||
help: https://k3d.io/usage/guides/registries/#using-a-local-registry
|
||||
`
|
||||
|
@ -20,7 +20,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
|
||||
package tools
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -31,11 +31,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
k3dc "github.com/rancher/k3d/v4/pkg/client"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v4/version"
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
)
|
||||
|
||||
// ImageImportIntoClusterMulti starts up a k3d tools container for the selected cluster and uses it to export
|
||||
@ -43,7 +41,7 @@ import (
|
||||
func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime, images []string, cluster *k3d.Cluster, opts k3d.ImageImportOpts) error {
|
||||
imagesFromRuntime, imagesFromTar, err := findImages(ctx, runtime, images)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to find images: %w", err)
|
||||
}
|
||||
|
||||
// no images found to load -> exit early
|
||||
@ -51,53 +49,10 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime,
|
||||
return fmt.Errorf("No valid images specified")
|
||||
}
|
||||
|
||||
cluster, err = k3dc.ClusterGet(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to find the specified cluster")
|
||||
return err
|
||||
}
|
||||
|
||||
if cluster.Network.Name == "" {
|
||||
return fmt.Errorf("Failed to get network for cluster '%s'", cluster.Name)
|
||||
}
|
||||
|
||||
var imageVolume string
|
||||
var ok bool
|
||||
for _, node := range cluster.Nodes {
|
||||
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
|
||||
if imageVolume, ok = node.RuntimeLabels[k3d.LabelImageVolume]; ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if imageVolume == "" {
|
||||
return fmt.Errorf("Failed to find image volume for cluster '%s'", cluster.Name)
|
||||
}
|
||||
|
||||
log.Debugf("Attaching to cluster's image volume '%s'", imageVolume)
|
||||
|
||||
// create tools node to export images
|
||||
var toolsNode *k3d.Node
|
||||
toolsNode, err = runtime.GetNode(ctx, &k3d.Node{Name: fmt.Sprintf("%s-%s-tools", k3d.DefaultObjectNamePrefix, cluster.Name)})
|
||||
if err != nil || toolsNode == nil {
|
||||
log.Infoln("Starting new tools node...")
|
||||
toolsNode, err = runToolsNode( // TODO: re-use existing container
|
||||
ctx,
|
||||
runtime,
|
||||
cluster,
|
||||
cluster.Network.Name,
|
||||
[]string{
|
||||
fmt.Sprintf("%s:%s", imageVolume, k3d.DefaultImageVolumeMountPath),
|
||||
fmt.Sprintf("%s:%s", runtime.GetRuntimePath(), runtime.GetRuntimePath()),
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("Failed to run tools container for cluster '%s'", cluster.Name)
|
||||
}
|
||||
} else if !toolsNode.State.Running {
|
||||
log.Infof("Starting existing tools node %s...", toolsNode.Name)
|
||||
if err := runtime.StartNode(ctx, toolsNode); err != nil {
|
||||
return fmt.Errorf("error starting existing tools node %s: %v", toolsNode.Name, err)
|
||||
}
|
||||
toolsNode, err := EnsureToolsNode(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to ensure that tools node is running: %w", err)
|
||||
}
|
||||
|
||||
/* TODO:
|
||||
@ -112,22 +67,21 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime,
|
||||
|
||||
if len(imagesFromRuntime) > 0 {
|
||||
// save image to tarfile in shared volume
|
||||
log.Infof("Saving %d image(s) from runtime...", len(imagesFromRuntime))
|
||||
l.Log().Infof("Saving %d image(s) from runtime...", len(imagesFromRuntime))
|
||||
tarName := fmt.Sprintf("%s/k3d-%s-images-%s.tar", k3d.DefaultImageVolumeMountPath, cluster.Name, time.Now().Format("20060102150405"))
|
||||
if err := runtime.ExecInNode(ctx, toolsNode, append([]string{"./k3d-tools", "save-image", "-d", tarName}, imagesFromRuntime...)); err != nil {
|
||||
log.Errorf("Failed to save image(s) in tools container for cluster '%s'", cluster.Name)
|
||||
return err
|
||||
return fmt.Errorf("failed to save image(s) in tools container for cluster '%s': %w", cluster.Name, err)
|
||||
}
|
||||
importTarNames = append(importTarNames, tarName)
|
||||
}
|
||||
|
||||
if len(imagesFromTar) > 0 {
|
||||
// copy tarfiles to shared volume
|
||||
log.Infof("Saving %d tarball(s) to shared image volume...", len(imagesFromTar))
|
||||
l.Log().Infof("Saving %d tarball(s) to shared image volume...", len(imagesFromTar))
|
||||
for _, file := range imagesFromTar {
|
||||
tarName := fmt.Sprintf("%s/k3d-%s-images-%s-file-%s", k3d.DefaultImageVolumeMountPath, cluster.Name, time.Now().Format("20060102150405"), path.Base(file))
|
||||
if err := runtime.CopyToNode(ctx, file, tarName, toolsNode); err != nil {
|
||||
log.Errorf("Failed to copy image tar '%s' to tools node! Error below:\n%+v", file, err)
|
||||
l.Log().Errorf("failed to copy image tar '%s' to tools node! Error below:\n%+v", file, err)
|
||||
continue
|
||||
}
|
||||
importTarNames = append(importTarNames, tarName)
|
||||
@ -135,7 +89,7 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime,
|
||||
}
|
||||
|
||||
// import image in each node
|
||||
log.Infoln("Importing images into nodes...")
|
||||
l.Log().Infoln("Importing images into nodes...")
|
||||
var importWaitgroup sync.WaitGroup
|
||||
for _, tarName := range importTarNames {
|
||||
for _, node := range cluster.Nodes {
|
||||
@ -143,10 +97,9 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime,
|
||||
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
|
||||
importWaitgroup.Add(1)
|
||||
go func(node *k3d.Node, wg *sync.WaitGroup, tarPath string) {
|
||||
log.Infof("Importing images from tarball '%s' into node '%s'...", tarPath, node.Name)
|
||||
l.Log().Infof("Importing images from tarball '%s' into node '%s'...", tarPath, node.Name)
|
||||
if err := runtime.ExecInNode(ctx, node, []string{"ctr", "image", "import", tarPath}); err != nil {
|
||||
log.Errorf("Failed to import images in node '%s'", node.Name)
|
||||
log.Errorln(err)
|
||||
l.Log().Errorf("failed to import images in node '%s': %v", node.Name, err)
|
||||
}
|
||||
wg.Done()
|
||||
}(node, &importWaitgroup, tarName)
|
||||
@ -157,51 +110,53 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime,
|
||||
|
||||
// remove tarball
|
||||
if !opts.KeepTar && len(importTarNames) > 0 {
|
||||
log.Infoln("Removing the tarball(s) from image volume...")
|
||||
l.Log().Infoln("Removing the tarball(s) from image volume...")
|
||||
if err := runtime.ExecInNode(ctx, toolsNode, []string{"rm", "-f", strings.Join(importTarNames, " ")}); err != nil {
|
||||
log.Errorf("Failed to delete one or more tarballs from '%+v'", importTarNames)
|
||||
log.Errorln(err)
|
||||
l.Log().Errorf("failed to delete one or more tarballs from '%+v': %v", importTarNames, err)
|
||||
}
|
||||
}
|
||||
|
||||
// delete tools container
|
||||
if !opts.KeepToolsNode {
|
||||
log.Infoln("Removing k3d-tools node...")
|
||||
l.Log().Infoln("Removing k3d-tools node...")
|
||||
if err := runtime.DeleteNode(ctx, toolsNode); err != nil {
|
||||
log.Errorf("Failed to delete tools node '%s': Try to delete it manually", toolsNode.Name)
|
||||
l.Log().Errorf("failed to delete tools node '%s' (try to delete it manually): %v", toolsNode.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Infoln("Successfully imported image(s)")
|
||||
l.Log().Infoln("Successfully imported image(s)")
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func findImages(ctx context.Context, runtime runtimes.Runtime, requestedImages []string) (imagesFromRuntime, imagesFromTar []string, err error) {
|
||||
type runtimeImageGetter interface {
|
||||
GetImages(context.Context) ([]string, error)
|
||||
}
|
||||
|
||||
func findImages(ctx context.Context, runtime runtimeImageGetter, requestedImages []string) (imagesFromRuntime, imagesFromTar []string, err error) {
|
||||
runtimeImages, err := runtime.GetImages(ctx)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to fetch list of existing images from runtime")
|
||||
return nil, nil, err
|
||||
return nil, nil, fmt.Errorf("failed to fetch list of existing images from runtime: %w", err)
|
||||
}
|
||||
|
||||
for _, requestedImage := range requestedImages {
|
||||
if isFile(requestedImage) {
|
||||
imagesFromTar = append(imagesFromTar, requestedImage)
|
||||
log.Debugf("Selected image '%s' is a file", requestedImage)
|
||||
break
|
||||
l.Log().Debugf("Selected image '%s' is a file", requestedImage)
|
||||
continue
|
||||
}
|
||||
|
||||
runtimeImage, found := findRuntimeImage(requestedImage, runtimeImages)
|
||||
if found {
|
||||
imagesFromRuntime = append(imagesFromRuntime, runtimeImage)
|
||||
log.Debugf("Selected image '%s' (found as '%s') in runtime", requestedImage, runtimeImage)
|
||||
break
|
||||
l.Log().Debugf("Selected image '%s' (found as '%s') in runtime", requestedImage, runtimeImage)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Warnf("Image '%s' is not a file and couldn't be found in the container runtime", requestedImage)
|
||||
l.Log().Warnf("Image '%s' is not a file and couldn't be found in the container runtime", requestedImage)
|
||||
}
|
||||
return imagesFromRuntime, imagesFromTar, err
|
||||
return imagesFromRuntime, imagesFromTar, nil
|
||||
}
|
||||
|
||||
func findRuntimeImage(requestedImage string, runtimeImages []string) (string, bool) {
|
||||
@ -284,19 +239,78 @@ func runToolsNode(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cl
|
||||
}
|
||||
node := &k3d.Node{
|
||||
Name: fmt.Sprintf("%s-%s-tools", k3d.DefaultObjectNamePrefix, cluster.Name),
|
||||
Image: fmt.Sprintf("%s:%s", k3d.DefaultToolsImageRepo, version.GetHelperImageVersion()),
|
||||
Image: k3d.GetToolsImage(),
|
||||
Role: k3d.NoRole,
|
||||
Volumes: volumes,
|
||||
Networks: []string{network},
|
||||
Cmd: []string{},
|
||||
Args: []string{"noop"},
|
||||
RuntimeLabels: k3d.DefaultRuntimeLabels,
|
||||
RuntimeLabels: labels,
|
||||
}
|
||||
node.RuntimeLabels[k3d.LabelClusterName] = cluster.Name
|
||||
if err := k3dc.NodeRun(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
|
||||
log.Errorf("Failed to create tools container for cluster '%s'", cluster.Name)
|
||||
return node, err
|
||||
if err := NodeRun(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
|
||||
return node, fmt.Errorf("failed to run k3d-tools node for cluster '%s': %w", cluster.Name, err)
|
||||
}
|
||||
|
||||
return node, nil
|
||||
}
|
||||
|
||||
func EnsureToolsNode(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) (*k3d.Node, error) {
|
||||
|
||||
var toolsNode *k3d.Node
|
||||
toolsNode, err := runtime.GetNode(ctx, &k3d.Node{Name: fmt.Sprintf("%s-%s-tools", k3d.DefaultObjectNamePrefix, cluster.Name)})
|
||||
if err != nil || toolsNode == nil {
|
||||
|
||||
// Get more info on the cluster, if required
|
||||
var imageVolume string
|
||||
if cluster.Network.Name == "" || cluster.ImageVolume == "" {
|
||||
l.Log().Debugf("Gathering some more info about the cluster before creating the tools node...")
|
||||
var err error
|
||||
cluster, err = ClusterGet(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to retrieve cluster: %w", err)
|
||||
}
|
||||
|
||||
if cluster.Network.Name == "" {
|
||||
return nil, fmt.Errorf("failed to get network for cluster '%s'", cluster.Name)
|
||||
}
|
||||
|
||||
var ok bool
|
||||
for _, node := range cluster.Nodes {
|
||||
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
|
||||
if imageVolume, ok = node.RuntimeLabels[k3d.LabelImageVolume]; ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if imageVolume == "" {
|
||||
return nil, fmt.Errorf("Failed to find image volume for cluster '%s'", cluster.Name)
|
||||
}
|
||||
l.Log().Debugf("Attaching to cluster's image volume '%s'", imageVolume)
|
||||
cluster.ImageVolume = imageVolume
|
||||
}
|
||||
|
||||
// start tools node
|
||||
l.Log().Infoln("Starting new tools node...")
|
||||
toolsNode, err = runToolsNode(
|
||||
ctx,
|
||||
runtime,
|
||||
cluster,
|
||||
cluster.Network.Name,
|
||||
[]string{
|
||||
fmt.Sprintf("%s:%s", cluster.ImageVolume, k3d.DefaultImageVolumeMountPath),
|
||||
fmt.Sprintf("%s:%s", runtime.GetRuntimePath(), runtime.GetRuntimePath()),
|
||||
})
|
||||
if err != nil {
|
||||
l.Log().Errorf("Failed to run tools container for cluster '%s'", cluster.Name)
|
||||
}
|
||||
} else if !toolsNode.State.Running {
|
||||
l.Log().Infof("Starting existing tools node %s...", toolsNode.Name)
|
||||
if err := runtime.StartNode(ctx, toolsNode); err != nil {
|
||||
return nil, fmt.Errorf("error starting existing tools node %s: %v", toolsNode.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
return toolsNode, err
|
||||
|
||||
}
|
@ -20,10 +20,14 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
|
||||
package tools
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/go-test/deep"
|
||||
)
|
||||
|
||||
func Test_findRuntimeImage(T *testing.T) {
|
||||
@ -170,3 +174,44 @@ func Test_findRuntimeImage(T *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_findImages(t *testing.T) {
|
||||
// given
|
||||
tarImage, err := os.CreateTemp("", "images.tgz")
|
||||
if err != nil {
|
||||
t.Fatal("Failed to create temporary file")
|
||||
}
|
||||
defer os.Remove(tarImage.Name())
|
||||
|
||||
tarImages := []string{tarImage.Name()}
|
||||
runtimeImages := []string{
|
||||
"alpine:version",
|
||||
"busybox:latest",
|
||||
}
|
||||
runtime := &FakeRuntimeImageGetter{runtimeImages: runtimeImages}
|
||||
|
||||
requestedImages := append(runtimeImages, tarImages...)
|
||||
|
||||
// when
|
||||
foundRuntimeImages, foundTarImages, err := findImages(context.Background(), runtime, requestedImages)
|
||||
|
||||
// then
|
||||
if err != nil {
|
||||
t.Errorf("Got unexpected error %v", err)
|
||||
}
|
||||
|
||||
if diff := deep.Equal(foundRuntimeImages, runtimeImages); diff != nil {
|
||||
t.Errorf("Found runtime images\n%+v\ndoes not match expected runtime images\n%+v\nDiff:\n%+v", foundRuntimeImages, runtimeImages, diff)
|
||||
}
|
||||
if diff := deep.Equal(foundTarImages, tarImages); diff != nil {
|
||||
t.Errorf("Found tar images\n%+v\ndoes not match expected tar images\n%+v\nDiff:\n%+v", foundTarImages, runtimeImages, diff)
|
||||
}
|
||||
}
|
||||
|
||||
type FakeRuntimeImageGetter struct {
|
||||
runtimeImages []string
|
||||
}
|
||||
|
||||
func (f *FakeRuntimeImageGetter) GetImages(_ context.Context) ([]string, error) {
|
||||
return f.runtimeImages, nil
|
||||
}
|
@ -25,15 +25,15 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/rancher/k3d/v4/pkg/config/v1alpha2"
|
||||
"github.com/rancher/k3d/v4/pkg/config/v1alpha3"
|
||||
defaultConfig "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
|
||||
"github.com/rancher/k3d/v5/pkg/config/v1alpha2"
|
||||
"github.com/rancher/k3d/v5/pkg/config/v1alpha3"
|
||||
defaultConfig "github.com/rancher/k3d/v5/pkg/config/v1alpha3"
|
||||
|
||||
types "github.com/rancher/k3d/v4/pkg/config/types"
|
||||
types "github.com/rancher/k3d/v5/pkg/config/types"
|
||||
)
|
||||
|
||||
const DefaultConfigApiVersion = defaultConfig.ApiVersion
|
||||
@ -59,7 +59,7 @@ func FromViper(config *viper.Viper) (types.Config, error) {
|
||||
apiVersion := strings.ToLower(config.GetString("apiversion"))
|
||||
kind := strings.ToLower(config.GetString("kind"))
|
||||
|
||||
log.Tracef("Trying to read config apiVersion='%s', kind='%s'", apiVersion, kind)
|
||||
l.Log().Tracef("Trying to read config apiVersion='%s', kind='%s'", apiVersion, kind)
|
||||
|
||||
switch apiVersion {
|
||||
case "k3d.io/v1alpha2":
|
||||
@ -73,13 +73,11 @@ func FromViper(config *viper.Viper) (types.Config, error) {
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to parse config '%s': %w'", config.ConfigFileUsed(), err)
|
||||
}
|
||||
|
||||
if err := config.Unmarshal(&cfg); err != nil {
|
||||
log.Errorln("Failed to unmarshal File config")
|
||||
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to unmarshal config file '%s': %w", config.ConfigFileUsed(), err)
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user