Compare commits
No commits in common. "main" and "main-v3" have entirely different histories.
@ -112,33 +112,6 @@
|
||||
"contributions": [
|
||||
"doc"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "searsaw",
|
||||
"name": "Alex Sears",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/3712883?v=4",
|
||||
"profile": "http://www.alexsears.com",
|
||||
"contributions": [
|
||||
"doc"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "Shanduur",
|
||||
"name": "Mateusz Urbanek",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/32583062?v=4",
|
||||
"profile": "http://shanduur.github.io",
|
||||
"contributions": [
|
||||
"code"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "benjaminjb",
|
||||
"name": "Benjamin Blattberg",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/4651855?v=4",
|
||||
"profile": "https://github.com/benjaminjb",
|
||||
"contributions": [
|
||||
"code"
|
||||
]
|
||||
}
|
||||
],
|
||||
"contributorsPerLine": 7,
|
||||
|
490
.drone.yml
490
.drone.yml
@ -14,7 +14,7 @@ platform:
|
||||
steps:
|
||||
|
||||
- name: lint
|
||||
image: golang:1.17
|
||||
image: golang:1.14
|
||||
commands:
|
||||
- make ci-setup
|
||||
- make check-fmt lint
|
||||
@ -25,7 +25,7 @@ steps:
|
||||
- tag
|
||||
|
||||
- name: test
|
||||
image: docker:20.10
|
||||
image: docker:19.03
|
||||
volumes:
|
||||
- name: dockersock
|
||||
path: /var/run
|
||||
@ -40,7 +40,7 @@ steps:
|
||||
- tag
|
||||
|
||||
- name: build
|
||||
image: golang:1.17
|
||||
image: golang:1.14
|
||||
environment:
|
||||
GIT_TAG: "${DRONE_TAG}"
|
||||
commands:
|
||||
@ -50,6 +50,10 @@ steps:
|
||||
- lint
|
||||
- test
|
||||
when:
|
||||
branch:
|
||||
- main
|
||||
- main-v* # major version branches
|
||||
- release-*
|
||||
event:
|
||||
- push
|
||||
- tag
|
||||
@ -73,12 +77,10 @@ steps:
|
||||
- tag
|
||||
ref:
|
||||
include:
|
||||
# include only pre-release tags
|
||||
- "refs/tags/*rc*"
|
||||
- "refs/tags/*beta*"
|
||||
- "refs/tags/*alpha*"
|
||||
- "refs/tags/*test*"
|
||||
- "refs/tags/*dev*"
|
||||
|
||||
- name: release
|
||||
image: plugins/github-release
|
||||
@ -98,17 +100,59 @@ steps:
|
||||
- tag
|
||||
ref:
|
||||
exclude:
|
||||
# exclude pre-release tags
|
||||
- "refs/tags/*rc*"
|
||||
- "refs/tags/*beta*"
|
||||
- "refs/tags/*alpha*"
|
||||
- "refs/tags/*test*"
|
||||
- "refs/tags/*dev*"
|
||||
|
||||
- name: docker_build_push_dind
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d
|
||||
tags:
|
||||
- latest-dind
|
||||
- "${DRONE_TAG}-dind"
|
||||
dockerfile: Dockerfile
|
||||
target: dind
|
||||
context: .
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
depends_on:
|
||||
- lint
|
||||
- test
|
||||
- build
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
|
||||
- name: docker_build_push_binary
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d
|
||||
tags:
|
||||
- latest
|
||||
- "${DRONE_TAG}"
|
||||
dockerfile: Dockerfile
|
||||
target: dind
|
||||
context: .
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
depends_on:
|
||||
- lint
|
||||
- test
|
||||
- build
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
|
||||
services:
|
||||
# Starting the docker service to be used by dind
|
||||
- name: docker
|
||||
image: docker:20.10-dind
|
||||
image: docker:19.03-dind
|
||||
privileged: true
|
||||
volumes:
|
||||
- name: dockersock
|
||||
@ -118,29 +162,14 @@ volumes:
|
||||
- name: dockersock
|
||||
temp: {}
|
||||
|
||||
|
||||
---
|
||||
###########################
|
||||
###### Docker Images ######
|
||||
###########################
|
||||
#
|
||||
# +++ Docker Images +++
|
||||
# Tagged using the auto_tag feature of the docker plugin
|
||||
# See http://plugins.drone.io/drone-plugins/drone-docker/#autotag
|
||||
# > if event type is `tag`
|
||||
# > > 1.0.0 produces docker tags 1, 1.0, 1.0.0
|
||||
# > > 1.0.0-rc.1 produces docker tags 1.0.0-rc.1
|
||||
# > if event type is `push` and target branch == default branch (main)
|
||||
# > > tag `latest`
|
||||
|
||||
|
||||
################################
|
||||
##### Docker Images: amd64 #####
|
||||
################################
|
||||
#########################
|
||||
##### Documentation #####
|
||||
#########################
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: linux_amd64
|
||||
name: docs
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
@ -148,99 +177,93 @@ platform:
|
||||
|
||||
steps:
|
||||
|
||||
- name: build_push_binary
|
||||
environment:
|
||||
DOCKER_BUILDKIT: "1"
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-amd64
|
||||
dockerfile: Dockerfile
|
||||
target: binary-only
|
||||
context: .
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
build_args:
|
||||
- GIT_TAG_OVERRIDE=${DRONE_TAG}
|
||||
- name: build
|
||||
image: python:3.9
|
||||
commands:
|
||||
- python3 -m pip install -r docs/requirements.txt
|
||||
- mkdocs build --verbose --clean --strict
|
||||
when:
|
||||
branch:
|
||||
- main
|
||||
event:
|
||||
- push
|
||||
|
||||
- name: build_push_dind
|
||||
image: plugins/docker
|
||||
environment:
|
||||
DOCKER_BUILDKIT: "1"
|
||||
- name: publish
|
||||
image: plugins/gh-pages
|
||||
settings:
|
||||
repo: rancher/k3d
|
||||
auto_tag: true
|
||||
auto_tag_suffix: dind-linux-amd64
|
||||
dockerfile: Dockerfile
|
||||
target: dind
|
||||
context: .
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
build_args:
|
||||
- GIT_TAG_OVERRIDE=${DRONE_TAG}
|
||||
- ARCH=amd64
|
||||
from_secret: github_token
|
||||
username: rancherio-gh-m
|
||||
pages_directory: site/
|
||||
target_branch: gh-pages
|
||||
when:
|
||||
branch:
|
||||
- main
|
||||
event:
|
||||
- push
|
||||
|
||||
- name: build_push_proxy
|
||||
trigger:
|
||||
event:
|
||||
- push
|
||||
branch:
|
||||
- main
|
||||
|
||||
---
|
||||
#####################
|
||||
##### k3d-proxy #####
|
||||
#####################
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: proxy_linux_amd64
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
- name: build_push
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d-proxy
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-amd64
|
||||
tags:
|
||||
- latest-linux-amd64
|
||||
- "${DRONE_TAG}-linux-amd64"
|
||||
dockerfile: proxy/Dockerfile
|
||||
context: proxy/
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
|
||||
- name: build_push_tools
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d-tools
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-amd64
|
||||
dockerfile: tools/Dockerfile
|
||||
context: tools/
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag # see note at the start of the "Docker Images" section: creates SemVer tagged images using the `auto_tag` option of the docker plugin
|
||||
- push # `auto_tag` option only creates the `latest` tag if target branch is default branch (i.e. `main`)
|
||||
- tag
|
||||
|
||||
depends_on:
|
||||
- main
|
||||
|
||||
---
|
||||
|
||||
################################
|
||||
##### Docker Images: arm #####
|
||||
################################
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: linux_arm
|
||||
name: proxy_linux_arm
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: arm
|
||||
|
||||
steps:
|
||||
|
||||
- name: build_push_proxy
|
||||
- name: build_push
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d-proxy
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-arm
|
||||
tags:
|
||||
- latest-linux-arm
|
||||
- "${DRONE_TAG}-linux-arm"
|
||||
dockerfile: proxy/Dockerfile
|
||||
context: proxy/
|
||||
username:
|
||||
@ -249,87 +272,35 @@ steps:
|
||||
from_secret: docker_password
|
||||
build_args:
|
||||
- ARCH=arm
|
||||
|
||||
- name: build_push_tools
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d-tools
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-arm
|
||||
dockerfile: tools/Dockerfile
|
||||
context: tools/
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag # see note at the start of the "Docker Images" section: creates SemVer tagged images using the `auto_tag` option of the docker plugin
|
||||
- push # `auto_tag` option only creates the `latest` tag if target branch is default branch (i.e. `main`)
|
||||
- tag
|
||||
|
||||
depends_on:
|
||||
- main
|
||||
|
||||
---
|
||||
|
||||
################################
|
||||
##### Docker Images: arm64 #####
|
||||
################################
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: linux_arm64
|
||||
name: proxy_linux_arm64
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: arm64
|
||||
|
||||
steps:
|
||||
|
||||
- name: build_push_binary
|
||||
environment:
|
||||
DOCKER_BUILDKIT: "1"
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-arm64
|
||||
dockerfile: Dockerfile
|
||||
target: binary-only
|
||||
context: .
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
build_args:
|
||||
- GIT_TAG_OVERRIDE=${DRONE_TAG}
|
||||
|
||||
- name: build_push_dind
|
||||
image: plugins/docker
|
||||
environment:
|
||||
DOCKER_BUILDKIT: "1"
|
||||
settings:
|
||||
repo: rancher/k3d
|
||||
auto_tag: true
|
||||
auto_tag_suffix: dind-linux-arm64
|
||||
dockerfile: Dockerfile
|
||||
target: dind
|
||||
context: .
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
build_args:
|
||||
- GIT_TAG_OVERRIDE=${DRONE_TAG}
|
||||
- ARCH=arm64
|
||||
|
||||
- name: build_push_proxy
|
||||
- name: build_push
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d-proxy
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-arm64
|
||||
tags:
|
||||
- latest-linux-arm64
|
||||
- "${DRONE_TAG}-linux-arm64"
|
||||
dockerfile: proxy/Dockerfile
|
||||
context: proxy/
|
||||
username:
|
||||
@ -338,94 +309,195 @@ steps:
|
||||
from_secret: docker_password
|
||||
build_args:
|
||||
- ARCH=arm64
|
||||
|
||||
- name: build_push_tools
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d-tools
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-arm64
|
||||
dockerfile: tools/Dockerfile
|
||||
context: tools/
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag # see note at the start of the "Docker Images" section: creates SemVer tagged images using the `auto_tag` option of the docker plugin
|
||||
- push # `auto_tag` option only creates the `latest` tag if target branch is default branch (i.e. `main`)
|
||||
- tag
|
||||
|
||||
depends_on:
|
||||
- main
|
||||
|
||||
---
|
||||
|
||||
##############################
|
||||
###### Docker Manifests ######
|
||||
##############################
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: manifests
|
||||
name: proxy_manifest
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
- name: push_manifest_binary
|
||||
- name: push_manifest
|
||||
image: plugins/manifest
|
||||
settings:
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
spec: manifest.tmpl
|
||||
auto_tag: true
|
||||
ignore_missing: true # expected, as we dropped arm due to missing base image for that arch
|
||||
|
||||
- name: push_manifest_dind
|
||||
image: plugins/manifest
|
||||
settings:
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
spec: dind-manifest.tmpl
|
||||
auto_tag: true
|
||||
ignore_missing: true # expected, as we dropped arm due to missing base image for that arch
|
||||
|
||||
- name: push_manifest_proxy
|
||||
image: plugins/manifest
|
||||
settings:
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
spec: proxy/manifest.tmpl
|
||||
auto_tag: true
|
||||
ignore_missing: false
|
||||
|
||||
- name: push_manifest_tools
|
||||
image: plugins/manifest
|
||||
settings:
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
spec: tools/manifest.tmpl
|
||||
auto_tag: true
|
||||
ignore_missing: false
|
||||
target: "rancher/k3d-proxy:${DRONE_TAG}"
|
||||
template: "rancher/k3d-proxy:${DRONE_TAG}-OS-ARCH"
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/arm
|
||||
- linux/arm64
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag # see note at the start of the "Docker Images" section: creates SemVer tagged images using the `auto_tag` option of the manifest plugin
|
||||
- push # `auto_tag` option only creates the `latest` tag if target branch is default branch (i.e. `main`)
|
||||
- tag
|
||||
|
||||
depends_on:
|
||||
- main
|
||||
- linux_amd64
|
||||
- linux_arm
|
||||
- linux_arm64
|
||||
- proxy_linux_amd64
|
||||
- proxy_linux_arm
|
||||
- proxy_linux_arm64
|
||||
|
||||
---
|
||||
#####################
|
||||
##### k3d-tools #####
|
||||
#####################
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: tools_linux_amd64
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
- name: build_push
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d-tools
|
||||
tags:
|
||||
- latest-linux-amd64
|
||||
- "${DRONE_TAG}-linux-amd64"
|
||||
dockerfile: tools/Dockerfile
|
||||
context: tools/
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag
|
||||
|
||||
depends_on:
|
||||
- main
|
||||
|
||||
---
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: tools_linux_arm
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: arm
|
||||
|
||||
steps:
|
||||
- name: build_push
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d-tools
|
||||
tags:
|
||||
- latest-linux-arm
|
||||
- "${DRONE_TAG}-linux-arm"
|
||||
dockerfile: tools/Dockerfile
|
||||
context: tools/
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag
|
||||
|
||||
depends_on:
|
||||
- main
|
||||
---
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: tools_linux_arm64
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: arm64
|
||||
|
||||
steps:
|
||||
- name: build_push
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d-tools
|
||||
tags:
|
||||
- latest-linux-arm64
|
||||
- "${DRONE_TAG}-linux-arm64"
|
||||
dockerfile: tools/Dockerfile
|
||||
context: tools/
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag
|
||||
|
||||
depends_on:
|
||||
- main
|
||||
---
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: tools_manifest
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
- name: push_manifest
|
||||
image: plugins/manifest
|
||||
settings:
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
target: "rancher/k3d-tools:${DRONE_TAG}"
|
||||
template: "rancher/k3d-tools:${DRONE_TAG}-OS-ARCH"
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/arm
|
||||
- linux/arm64
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag
|
||||
|
||||
depends_on:
|
||||
- main
|
||||
- tools_linux_amd64
|
||||
- tools_linux_arm
|
||||
- tools_linux_arm64
|
||||
|
22
.github/ISSUE_TEMPLATE/question_help.md
vendored
22
.github/ISSUE_TEMPLATE/question_help.md
vendored
@ -1,22 +0,0 @@
|
||||
---
|
||||
name: Question or Help Wanted
|
||||
about: Get answers, receive Help.
|
||||
title: "[QUESTION/HELP] "
|
||||
labels: question
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
In general, please consider using GitHub Discussions for questions and general discussions: https://github.com/rancher/k3d/discussions .
|
||||
Especially please use Discussions for questions around use cases for k3d, etc.
|
||||
For everything else, fire away :)
|
||||
-->
|
||||
|
||||
## Question / Where do you need Help?
|
||||
|
||||
|
||||
## Scope of your Question
|
||||
|
||||
- Is your question related to a specific version of k3d (or k3s)?
|
||||
- Please paste the output of `k3d version` here
|
24
.github/pull_request_template.md
vendored
24
.github/pull_request_template.md
vendored
@ -1,24 +0,0 @@
|
||||
<!--
|
||||
Hi there, have an early THANK YOU for your contribution!
|
||||
k3d is a community-driven project, so we really highly appreciate any support.
|
||||
Please make sure, you've read our Code of Conduct and the Contributing Guidelines :)
|
||||
- Code of Conduct: https://github.com/rancher/k3d/blob/main/CODE_OF_CONDUCT.md
|
||||
- Contributing Guidelines: https://github.com/rancher/k3d/blob/main/CONTRIBUTING.md
|
||||
-->
|
||||
|
||||
# What
|
||||
|
||||
<!-- What does this PR do or change? -->
|
||||
|
||||
# Why
|
||||
|
||||
<!-- Link issues, discussions, etc. or just explain why you're creating this PR -->
|
||||
|
||||
# Implications
|
||||
|
||||
<!--
|
||||
Does this change existing behavior? If so, does it affect the CLI (cmd/) only or does it also/only change some internals of the Go module (pkg/)?
|
||||
Especially mention breaking changes here!
|
||||
-->
|
||||
|
||||
<!-- Get recognized using our all-contributors bot: https://github.com/rancher/k3d/blob/main/CONTRIBUTING.md#get-recognized -->
|
4
.github/workflows/aur-prerelease.yml
vendored
4
.github/workflows/aur-prerelease.yml
vendored
@ -6,8 +6,8 @@ on:
|
||||
|
||||
jobs:
|
||||
aur-pre-release:
|
||||
runs-on: ubuntu-20.04
|
||||
container: archlinux:base-20210228.0.16308
|
||||
runs-on: ubuntu-latest
|
||||
container: archlinux:20200705
|
||||
steps:
|
||||
- name: Checkout Project
|
||||
uses: actions/checkout@v1
|
||||
|
4
.github/workflows/aur-release.yml
vendored
4
.github/workflows/aur-release.yml
vendored
@ -6,8 +6,8 @@ on:
|
||||
|
||||
jobs:
|
||||
aur-release:
|
||||
runs-on: ubuntu-20.04
|
||||
container: archlinux:base-20210228.0.16308
|
||||
runs-on: ubuntu-latest
|
||||
container: archlinux:20200705
|
||||
steps:
|
||||
- name: Checkout Project
|
||||
uses: actions/checkout@v1
|
||||
|
46
.github/workflows/docs.yml
vendored
46
.github/workflows/docs.yml
vendored
@ -1,46 +0,0 @@
|
||||
name: k3d.io
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
# only run on tags for real releases and special docs releases
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+'
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+-docs.[0-9]+'
|
||||
# tags-ignore:
|
||||
# - "*rc*"
|
||||
# - "*beta*"
|
||||
# - "*alpha*"
|
||||
# - "*test*"
|
||||
# - "*dev*"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
container:
|
||||
image: python:3.9
|
||||
steps:
|
||||
- name: Checkout Project
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install Requirements
|
||||
run: pip install -r docs/requirements.txt
|
||||
- name: Build with MkDocs (validation)
|
||||
run: |
|
||||
mkdocs build --verbose --clean --strict
|
||||
rm -r site/
|
||||
- name: Configure Git
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
id: git
|
||||
run: |
|
||||
git config --global user.name ghaction-k3d.io
|
||||
git config --global user.email ghaction@k3d.io
|
||||
echo ::set-output name=tag::${GITHUB_REF#refs/tags/}
|
||||
- name: Build & Deploy with Mike (versioned)
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
run: |
|
||||
mike deploy --update-aliases --push --rebase ${{ steps.git.outputs.tag }} stable
|
||||
|
||||
|
5
.gitignore
vendored
5
.gitignore
vendored
@ -22,7 +22,4 @@ site/
|
||||
.vscode/
|
||||
.local/
|
||||
.idea/
|
||||
*.iml
|
||||
|
||||
# Pipenv
|
||||
Pipfile*
|
||||
*.iml
|
@ -1,3 +0,0 @@
|
||||
linters-settings:
|
||||
errcheck:
|
||||
check-blank: false # to keep `_ = viper.BindPFlag(...)` from throwing errors
|
470
CHANGELOG.md
470
CHANGELOG.md
@ -1,470 +0,0 @@
|
||||
# Changelog
|
||||
|
||||
## v5.0.3
|
||||
|
||||
### Enhancements & Fixes
|
||||
|
||||
- simplified way of getting a Docker API Client that works with Docker Contexts and `DOCKER_*` environment variable configuration (#829, @dragonflylee)
|
||||
- fix: didn't honor `DOCKER_TLS` environment variables before
|
||||
|
||||
## v5.0.2
|
||||
|
||||
### Enhancements
|
||||
|
||||
- CoreDNS Configmap is now edited in the auto-deploy manifest on disk instead of relying on `kubectl patch` command (#814)
|
||||
- refactor: add cmd subcommands in a single function call (#819, @moeryomenko)
|
||||
- handle ready-log-messages by type and intent & check them in single log streams instead of checking whole chunks every time (#818)
|
||||
|
||||
### Fixes
|
||||
|
||||
- fix: config file check failing with env var expansion because unexpanded input file was checked
|
||||
|
||||
### Misc
|
||||
|
||||
- cleanup: ensure that connections/streams are closed once unused (#818)
|
||||
- cleanup: split type definitions across multiple files to increase readability (#818)
|
||||
- docs: clarify `node create` help text about cluster reference (#808, @losinggeneration)
|
||||
- refactor: move from io/ioutil (deprecated) to io and os packages (#827, @Juneezee)
|
||||
|
||||
## v5.0.1
|
||||
|
||||
### Enhancement
|
||||
|
||||
- add `HostFromClusterNetwork` field to `LocalRegistryHosting` configmap as per KEP-1755 (#754)
|
||||
|
||||
### Fixes
|
||||
|
||||
- fix: nilpointer exception on failed exec process with no returned logreader
|
||||
- make post-create cluster preparation (DNS stuff mostly) more resilient (#780)
|
||||
- fix v1alpha2 -> v1alpha3 config migration (and other related issues) (#799)
|
||||
|
||||
### Misc
|
||||
|
||||
- docs: fix typo (#784)
|
||||
- docs: fix usage of legacy `--k3s-agent/server-arg` flag
|
||||
|
||||
## v5.0.0
|
||||
|
||||
This release contains a whole lot of new features, breaking changes as well as smaller fixes and improvements.
|
||||
The changelog shown here is likely not complete but gives a broad overview over the changes.
|
||||
For more details, please check the v5 milestone (<https://github.com/rancher/k3d/milestone/27>) or even the commit history.
|
||||
The docs have been updated, so you should also find the information you need there, with more to come!
|
||||
|
||||
The demo repository has also been updated to work with k3d v5: <https://github.com/iwilltry42/k3d-demo>.
|
||||
|
||||
**Info**: <https://k3d.io> is now versioned, so you can checkout different versions of the documentation by using the dropdown menu in the page title bar!
|
||||
|
||||
**Feedback welcome!**
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
- new syntax for nodefilters
|
||||
- dropped the usage of square brackets `[]` for indexing, as it caused problems with some shells trying to interpret them
|
||||
- new syntax: `@identifier[:index][:opt]` (see <https://github.com/rancher/k3d/discussions/652>)
|
||||
- example for a port-mapping: `--port 8080:80@server:0:proxy`
|
||||
- identifier = `server`, index = `0`, opt = `proxy`
|
||||
- `opt` is an extra optional argument used for different purposes depending on the flag
|
||||
- currently, only the `--port` flag has `opt`s, namely `proxy` and `direct` (see other breaking change)
|
||||
- port-mapping now go via the loadbalancer (serverlb) by default
|
||||
- the `--port` flag has the `proxy` opt (see new nodefilter syntax above) set by default
|
||||
- to leverage the old behavior of direct port-mappings, use the `direct` opt on the port flag
|
||||
- the nodefilter `loadbalancer` will now do the same as `servers:*;agents:*` (proxied via the loadbalancer)
|
||||
- flag `--registries-create` transformed from bool flag to string flag: let's you define the name and port-binding of the newly created registry, e.g. `--registry-create myregistry.localhost:5001`
|
||||
|
||||
### Fixes
|
||||
|
||||
- cleaned up and properly sorted the sanitization of existing resources used to create new nodes (#638)
|
||||
|
||||
### Features & Enhancements
|
||||
|
||||
- new command: `k3d node edit` to edit existing nodes (#615)
|
||||
- currently only allows `k3d node edit NODE --port-add HOSTPORT:CONTAINERPORT` for the serverlb/loadbalancer to add new ports
|
||||
- pkg: new `NodeEdit` function
|
||||
- new (hidden) command: `k3d debug` with some options for debugging k3d resources (#638)
|
||||
- e.g. `k3d debug loadbalancer get-config` to get the current loadbalancer configuration
|
||||
- loadbalancer / k3d-proxy (#638)
|
||||
- updated fork of `confd` to make usage of the file backend including a file watcher for auto-reloads
|
||||
- this also checks the config before applying it, so the lb doesn't crash on a faulty config
|
||||
- updating the loadbalancer writes the new config file and also checks if everything's going fine afterwards
|
||||
- some settings of the loadbalancer can now be configured using `--lb-config-override`, see docs at <https://k3d.io/v5.0.0/design/defaults/#k3d-loadbalancer>
|
||||
- helper images can now be set explicitly via environment variables: `K3D_IMAGE_LOADBALANCER` & `K3D_IMAGE_TOOLS` (#638)
|
||||
- concurrently add new nodes to an existing cluster (remove some dumb code) (#640)
|
||||
- `--wait` is now the default for `k3d node create`
|
||||
- normalized flag usage for k3s and runtime (#598, @ejose19)
|
||||
- rename `k3d cluster create --label` to `k3d cluster create --runtime-label` (as it's labelling the node on runtime level, e.g. docker)
|
||||
- config option moved to `options.runtime.labels`
|
||||
- add `k3d cluster create --k3s-node-label` to add Kubernetes node labels via k3s flag (#584, @developer-guy, @ejose, @dentrax)
|
||||
- new config option `options.k3s.nodeLabels`
|
||||
- the same for `k3d node create`
|
||||
- improved config file handling (#605)
|
||||
- new version `v1alpha3`
|
||||
- warning when using outdated version
|
||||
- validation dynamically based on provided config apiVersion
|
||||
- new default for `k3d config init`
|
||||
- new command `k3d config migrate INPUT [OUTPUT]` to migrate config files between versions
|
||||
- currently supported migration `v1alpha2` -> `v1alpha3`
|
||||
- pkg: new `Config` interface type to support new generic `FromViper` config file parsing
|
||||
- changed flags `--k3s-server-arg` & `--k3s-agent-arg` into `--k3s-arg` with nodefilter support (#605)
|
||||
- new config path `options.k3s.extraArgs`
|
||||
- config file: environment variables (`$VAR`, `${VAR}` will be expanded unconditionally) (#643)
|
||||
- docker context support (#601, @developer-guy & #674)
|
||||
- Feature flag using the environment variable `K3D_FIX_DNS` and setting it to a true value (e.g. `export K3D_FIX_DNS=1`) to forward DNS queries to your local machine, e.g. to use your local company DNS
|
||||
|
||||
### Misc
|
||||
|
||||
- tests/e2e: timeouts everywhere to avoid killing DroneCI (#638)
|
||||
- logs: really final output when creating/deleting nodes (so far, we were not outputting a final success message and the process was still doing stuff) (#640)
|
||||
- tests/e2e: add tests for v1alpha2 to v1alpha3 migration
|
||||
- docs: use v1alpha3 config version
|
||||
- docs: update general appearance and cleanup
|
||||
|
||||
## v4.4.8
|
||||
|
||||
## Enhancements
|
||||
|
||||
- Improved DroneCI Pipeline for Multiarch Images and SemVer Tags (#712)
|
||||
- **Important**: New images will not have the `v` prefix in the tag anymore!
|
||||
- but now real releases will use the "hierarchical" SemVer tags, so you could e.g. subscribe to rancher/k3d-proxy:4 to get v4.x.x images for the proxy container
|
||||
|
||||
## Fixes
|
||||
|
||||
- clusterCreate: do not override hostIP if hostPort is missing (#693, @lukaszo)
|
||||
- imageImport: import all listed images, not only the first one (#701, @mszostok)
|
||||
- clusterCreate: when memory constraints are set, only pull the image used for checking the edac folder, if it's not present on the machine
|
||||
- fix: update k3d-tools dependencies and use API Version Negotiation, so it still works with older versions of the Docker Engine (#679)
|
||||
|
||||
### Misc
|
||||
|
||||
- install script: add darwin/arm64 support (#676, @colelawrence)
|
||||
- docs: fix go install command (#677, @Rots)
|
||||
- docs: add project overview (<https://k3d.io/internals/project/>) (#680)
|
||||
|
||||
## v4.4.7
|
||||
|
||||
### Features / Enhancements
|
||||
|
||||
- new flag: `k3d image import --keep-tools` to not delete the tools node container after importing the image(s) (#672)
|
||||
- improve image name handling when importing images (#653, @cimnine)
|
||||
- normalize image names internally, e.g. strip prefixes that docker adds, but that break the process
|
||||
- see <https://k3d.io/usage/commands/k3d_image_import/> for more info
|
||||
|
||||
### Fixes
|
||||
|
||||
- Use default gateway, when bridge network doesn't have it (#666, @kuritka)
|
||||
- Start an existing, but not running tools node to re-use it when importing an image (#672)
|
||||
|
||||
### Misc
|
||||
|
||||
- deps: switching back to upstream viper including the StringArray fix
|
||||
- docs: reference to "nolar/setup-k3d-k3s" step for GitHub Actions (#668, @nolar)
|
||||
- docs: updated and simplified CUDA guide (#662, @vainkop) (#669)
|
||||
|
||||
## v4.4.6
|
||||
|
||||
### Fixes
|
||||
|
||||
- fix an issue where the cluster creation would stall waiting for the `starting worker processes` log message from the loadbalancer/serverlb
|
||||
- this was likely caused by a rounding issue when asking docker to get the container logs starting at a specific timestamp
|
||||
- we now drop subsecond precision for this to avoid the rounding issue, which was confirmed to work
|
||||
- see issues #592 & #621
|
||||
|
||||
### Misc
|
||||
|
||||
- to debug the issue mentioned above, we introduced a new environment variable `K3D_LOG_NODE_WAIT_LOGS`, which can be set to a list of node roles (e.g. `K3D_LOG_NODE_WAIT_LOGS=loadbalancer,agent`) to output the container logs that k3d inspects
|
||||
|
||||
## v4.4.5
|
||||
|
||||
### Fixes
|
||||
|
||||
- overall: use the getDockerClient helper function everywhere to e.g. support docker via ssh everywhere
|
||||
- nodeCreate: do not copy meminfo/edac volume mounts from existing nodes, to avoid conflicts with generated mounts
|
||||
- kubeconfig: fix file handling on windows (#626 + #628, @dragonflylee)
|
||||
|
||||
### Misc
|
||||
|
||||
- docs: add [FAQ entry](https://k3d.io/faq/faq/#nodes-fail-to-start-or-get-stuck-in-notready-state-with-log-nf_conntrack_max-permission-denied) on nf_conntrack_max: permission denied issue from kube-proxy (#607)
|
||||
- docs: cleanup, fix formatting, etc.
|
||||
- license: update to include 2021 in time range
|
||||
- docs: link to AutoK3s (#614, @JacieChao)
|
||||
- tests/e2e: update the list of tested k3s versions
|
||||
|
||||
## v4.4.4
|
||||
|
||||
### Enhancements
|
||||
|
||||
- nodes created via `k3d node create` now inherit the registry config from existing nodes (if there is any) (#597)
|
||||
- the cgroupv2 hotfix (custom entrypoint script) is now enabled by default (#603)
|
||||
- disable by setting the environment variable `K3D_FIX_CGROUPV2=false`
|
||||
|
||||
### Fixes
|
||||
|
||||
- fix using networks without IPAM config (e.g. `host`)
|
||||
|
||||
### Misc
|
||||
|
||||
- docs: edit links on k3d.io now point to the correct branch (`main`)
|
||||
- docs: new FAQ entry on spurious PID entries when using shared mounts (#609, @leelavg)
|
||||
|
||||
## v4.4.3
|
||||
|
||||
### Highlights
|
||||
|
||||
- cgroupv2 support: to properly work on cgroupv2 systems, k3s has to move all the processes from the root cgroup to a new /init cgroup and enable subtree_control
|
||||
- this is going to be included in the k3s agent code directly (<https://github.com/k3s-io/k3s/pull/3242>)
|
||||
- for now we're overriding the container entrypoint with a script that does this (#579, compare <https://github.com/k3s-io/k3s/pull/3237>)
|
||||
- thanks a lot for all the input and support @AkihiroSuda
|
||||
- **Usage**: set the environment variable `K3D_FIX_CGROUPV2` to a `true` value before/when creating a cluster with k3d
|
||||
- e.g. `export K3D_FIX_CGROUPV2=1`
|
||||
|
||||
### Fixes
|
||||
|
||||
- fix: docker volume not mountable due to validation failure
|
||||
- was not able to mount named volume on windows as we're checking for `:` meant for drive-letters and k3d separators
|
||||
|
||||
### Misc
|
||||
|
||||
- fix create command's flags typo (#568, @Jason-ZW)
|
||||
|
||||
## v4.4.2
|
||||
|
||||
### Fixes
|
||||
|
||||
- k3d-proxy: rename udp upstreams to avoid collisions/duplicates (#564)
|
||||
|
||||
### Features
|
||||
|
||||
- add *hidden* command `k3d runtime-info` used for debugging (#553)
|
||||
- this comes with some additions on package/runtime level
|
||||
- add *experimental* `--subnet` flag to get some k3d IPAM to ensure that server nodes keep static IPs across restarts (#560)
|
||||
|
||||
### Misc
|
||||
|
||||
- docs: fix typo (#556, @gcalmettes)
|
||||
- docs: fix typo (#561, @alechartung)
|
||||
- ci/drone: pre-release on `-dev.X` tags
|
||||
- ci/drone: always build no matter the branch name (just not release)
|
||||
- docs: add automatic command tree generation via cobra (#562)
|
||||
- makefile: use `go env gopath` as install target for tools (as per #445)
|
||||
- JSONSchema: add some examples and defaults (now also available via <https://raw.githubusercontent.com/rancher/k3d/main/pkg/config/v1alpha2/schema.json> in your IDE)
|
||||
|
||||
## v4.4.1
|
||||
|
||||
### Fixes
|
||||
|
||||
- use viper fork that contains a fix to make cobra's `StringArray` flags work properly
|
||||
- this fixes the issue, that flag values containing commas got split (because we had to use `StringSlice` type flags)
|
||||
- this is to be changed back to upstream viper as soon as <https://github.com/spf13/viper/pull/398> (or a similar fix) got merged
|
||||
|
||||
## v4.4.0
|
||||
|
||||
### Features / Enhancements
|
||||
|
||||
- Support for Memory Limits using e.g. `--servers-memory 1g` or `--agents-memory 1.5g` (#494, @konradmalik)
|
||||
- enabled by providing fake `meminfo` files
|
||||
|
||||
### Fixes
|
||||
|
||||
- fix absolute paths in volume mounts on Windows (#510, @markrexwinkel)
|
||||
|
||||
### Documentation
|
||||
|
||||
- clarify registry names in docs and help text
|
||||
- add usage section about config file (#534)
|
||||
- add FAQ entry on certificate error when running behind corporate proxy
|
||||
- add MacPorts install instructions (#539, @herbygillot)
|
||||
- Heal Shruggie: Replace amputated arm (#540, @claycooper)
|
||||
|
||||
## v4.3.0
|
||||
|
||||
### Features / Enhancements
|
||||
|
||||
- Use Go 1.16
|
||||
- update dependencies, including kubernetes, docker, containerd and more
|
||||
- add `darwin/arm64` (Apple Silicon, M1) build target (#530)
|
||||
- use the new `//go:embed` feature to directly embed the jsonschema in the binary (#529)
|
||||
- Add a status column to `k3d registry list` output (#496, @ebr)
|
||||
- Allow non-prefixed (i.e. without `k3d-` prefix) user input when fetching resources (e.g. `k3d node get mycluster-server-0` would return successfully)
|
||||
|
||||
### Fixes
|
||||
|
||||
- Allow absolute paths for volumes on Windows (#510, @markrexwinkel)
|
||||
- fix nil-pointer exception in case of non-existent IPAM network config
|
||||
- Properly handle combinations of host/hostIP in kubeAPI settings reflected in the kubeconfig (#500, @fabricev)
|
||||
|
||||
### Misc
|
||||
|
||||
- docs: fix typo in stop command help text (#513, @searsaw)
|
||||
- ci/ghaction: AUR (pre-)release now on Ubuntu 20.04 and latest archlinux image
|
||||
- REMOVE incomplete and unused `containerd` runtime from codebase, as it was causing issues to build for windows and hasn't made any progress in quite some time now
|
||||
|
||||
## v4.2.0
|
||||
|
||||
### Features / Enhancements
|
||||
|
||||
- add processing step for cluster config, to configure it e.g. for hostnetwork mode (#477, @konradmalik)
|
||||
- allow proxying UDP ports via the load balancer (#488, @k0da)
|
||||
|
||||
### Fixes
|
||||
|
||||
- fix usage of `DOCKER_HOST` env var for Kubeconfig server ref (trim port)
|
||||
- fix error when trying to attach the same node (e.g. registry) to the same network twice (#486, @kuritka)
|
||||
- fix Kube-API settings in configg file got overwritten (#490, @dtomasi)
|
||||
|
||||
### Misc
|
||||
|
||||
- add `k3d.version` label to created resources
|
||||
- add Pull-Request template
|
||||
- docs: add hint on minimal requirements for multi-server clusters (#481, @Filius-Patris)
|
||||
|
||||
## v4.1.1
|
||||
|
||||
### Fixes
|
||||
|
||||
- fix: `--k3s-server-arg` and `--k3s-agent-arg` didn't work (Viper StringArray incompatibility) (#482)
|
||||
|
||||
## v4.1.0
|
||||
|
||||
### Highlights
|
||||
|
||||
#### :scroll: Configuration Enhancements
|
||||
|
||||
- :snake: use [viper](https://github.com/spf13/viper) for configuration management
|
||||
- takes over the job of properly fetching and merging config options from
|
||||
- CLI arguments/flags
|
||||
- environment variables
|
||||
- config file
|
||||
- this also fixes some issues with using the config file (like cobra defaults overriding config file values)
|
||||
- :heavy_check_mark: add JSON-Schema validation for the `Simple` config file schema
|
||||
- :new: config version `k3d.io/v1alpha2` (some naming changes)
|
||||
- `exposeAPI` -> `kubeAPI`
|
||||
- `options.k3d.noRollback` -> `options.k3d.disableRollback`
|
||||
- `options.k3d.prepDisableHostIPInjection` -> `options.k3d.disableHostIPInjection`
|
||||
|
||||
#### :computer: Docker over SSH
|
||||
|
||||
- Support Docker over SSH (#324, @ekristen & @inercia)
|
||||
|
||||
### Features & Enhancements
|
||||
|
||||
- add root flag `--timestamps` to enable timestamped logs
|
||||
- improved multi-server cluster support (#467)
|
||||
- log a warning, if one tries to create a cluster with only 2 nodes (no majority possible, no fault tolerance)
|
||||
- revamped cluster start procedure: init-node, sorted servers, agents, helpers
|
||||
- different log messages per role and start-place (that we wait for to consider a node to be ready)
|
||||
- module: `NodeStartOpts` now accept a `ReadyLogMessage` and `NodeState` now takes a `Started` timestamp string
|
||||
|
||||
### Fixes
|
||||
|
||||
- do not ignore `--no-hostip` flag and don't inject hostip if `--network=host` (#471, @konradmalik)
|
||||
- fix: `--no-lb` ignored
|
||||
- fix: print error cause when serverlb fails to start
|
||||
|
||||
### Misc
|
||||
|
||||
- tests/e2e: add config override test
|
||||
- tests/e2e: add multi server start-stop cycle test
|
||||
- tests/e2e: improved logs with stage and test details.
|
||||
- builds&tests: use Docker 20.10 and BuildKit everywhere
|
||||
- :memo: docs: add <https://github.com/AbsaOSS/k3d-action> (GitHub Action) as a related project (#476, @kuritka)
|
||||
|
||||
### Tested with
|
||||
|
||||
- E2E Tests ran with k3s versions
|
||||
- v1.17.17-k3s1 (see Known Issues below)
|
||||
- v1.18.15-k3s1 (see Known Issues below)
|
||||
- v1.19.7-k3s1
|
||||
- v1.20.2-k3s1
|
||||
|
||||
### Known Issues
|
||||
|
||||
- automatic multi-server cluster restarts tend to fail with k3s versions v1.17.x & v1.18.x and probably earlier versions (using dqlite)
|
||||
- Using Viper brings us lots of nice features, but also one problem:
|
||||
- We had to switch StringArray flags to StringSlice flags, which
|
||||
- allow to use multiple flag values comma-separated in a single flag, but also
|
||||
- split flag values that contain a comma into separate parts (and we cannot handle issues that arise due to this)
|
||||
- so if you rely on commas in your flag values (e.g. for `--env X=a,b,c`), please consider filing an issue or supporting <https://github.com/spf13/viper/issues/246> and <https://github.com/spf13/viper/pull/398>
|
||||
- `--env X=a,b,c` would be treated the same as `--env X=a`, `--env b`, `--env c`
|
||||
|
||||
## v4.0.0
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
#### Module
|
||||
|
||||
**If you're using k3d as a Go module, please have a look into the code to see all the changes!**
|
||||
|
||||
- We're open for chats via Slack or GitHub discussions
|
||||
|
||||
- Module is now on `github.com/rancher/k3d/v4` due to lots of breaking changes
|
||||
- `pkg/cluster` is now `pkg/client`
|
||||
- `ClusterCreate` and `NodeCreate` don't start the entities (containers) anymore
|
||||
- `ClusterRun` and `NodeRun` orchestrate the new Create and Start functionality
|
||||
- `NodeDelete`/`ClusterDelete` now take an additional `NodeDeleteOpts`/`ClusterDeleteOpts` struct to toggle specific steps
|
||||
- NodeSpec now features a list of networks (required for registries)
|
||||
- New config flow: CLIConfig (SimpleConfig) -> ClusterConfig -> Cluster + Opts
|
||||
|
||||
#### CLI
|
||||
|
||||
- Some flags changed to also use `noun-action` syntax
|
||||
- e.g. `--switch-context --update-default-kubeconfig` -> `--kubeconfig-switch-context --kubeconfig-update-default`
|
||||
- this eases grouping and visibility
|
||||
|
||||
### Changes
|
||||
|
||||
#### Features
|
||||
|
||||
- **Registry Support**
|
||||
- k3d-managed registry like we had it in k3d v1.x
|
||||
- Option 1: default settings, paired with cluster creation
|
||||
- `k3d cluster create --registry-create` -> New registry for that cluster
|
||||
- `k3d cluster create --registry-use` -> Re-use existing registry
|
||||
- Option 2: customized, managed stand-alone
|
||||
- `k3d registry [create/start/stop/delete]`
|
||||
- Check the documentation, help text and tutorials for more details
|
||||
- Communicate managed registry using the LocalRegistryHostingV1 spec from [KEP-1755](https://github.com/kubernetes/enhancements/blob/0d69f7cea6fbe73a7d70fab569c6898f5ccb7be0/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry/README.md)
|
||||
- interesting especially for tools that reload images, like Tilt or Skaffold
|
||||
|
||||
- **Config File Support**
|
||||
- Put all your CLI-Arguments/Flags into a more readable config file and re-use it everywhere (keep it in your repo)
|
||||
- Note: this is not always a 1:1 matching in naming/syntax/semantics
|
||||
- `k3d cluster create --config myconfig.yaml`
|
||||
|
||||
```yaml
|
||||
apiVersion: k3d.io/v1alpha1
|
||||
kind: Simple
|
||||
name: mycluster
|
||||
servers: 3
|
||||
agents: 2
|
||||
ports:
|
||||
- port: 8080:80
|
||||
nodeFilters:
|
||||
- loadbalancer
|
||||
```
|
||||
|
||||
- Check out our test cases in [pkg/config/test_assets/](./pkg/config/test_assets/) for more config file examples
|
||||
- **Note**: The config file format (& feature) might still be a little rough around the edges and it's prone to change quickly until we hit a stable release of the config
|
||||
|
||||
- [WIP] Support for Lifecycle Hooks
|
||||
- Run any executable at specific stages during the cluster and node lifecycles
|
||||
- e.g. we modify the `registries.yaml` in the `preStart` stage of nodes
|
||||
- Guides will follow
|
||||
|
||||
- Print container creation time (#431, @inercia)
|
||||
- add output formats for `cluster ls` and `node ls` (#439, @inercia)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- import image: avoid nil pointer exception in specific cases
|
||||
- cluster delete: properly handle node and network (#437)
|
||||
- --port: fix bnil-pointer exception when exposing port on non-existent loadbalancer
|
||||
- completion/zsh: source completion file
|
||||
|
||||
#### Misc
|
||||
|
||||
- Now building with Go 1.15
|
||||
- same for the k3d-tools code
|
||||
- updated dependencies (including Docker v20.10)
|
||||
- tests/e2e: add `E2E_INCLUDE` and rename `E2E_SKIP` to `E2E_EXCLUDE`
|
||||
- tests/e2e: allow overriding the Helper Image Tag via `E2E_HELPER_IMAGE_TAG`
|
||||
- docs: spell checking (#434, @jsoref)
|
||||
- docs: add Chocolatey install option (#443, @erwinkersten)
|
@ -7,8 +7,6 @@ We welcome everyone who likes to use and improve our software.
|
||||
|
||||
Before starting to work with and on k3d, please read and understand our [**Code of Conduct**](./CODE_OF_CONDUCT.md).
|
||||
|
||||
Get an Overview of the k3d project in the documentation: [k3d.io/internals/project](https://k3d.io/internals/project)
|
||||
|
||||
Before opening an issue or a Pull-Request, please use GitHub's search function to check whether something similar is already in process and hook in there instead.
|
||||
|
||||
## Get Recognized
|
||||
|
42
Dockerfile
42
Dockerfile
@ -1,41 +1,15 @@
|
||||
############################################################
|
||||
# builder #
|
||||
# -> golang image used solely for building the k3d binary #
|
||||
# -> built executable can then be copied into other stages #
|
||||
############################################################
|
||||
FROM golang:1.17 as builder
|
||||
ARG GIT_TAG_OVERRIDE
|
||||
FROM golang:1.14 as builder
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
RUN make build -e GIT_TAG_OVERRIDE=${GIT_TAG_OVERRIDE} && bin/k3d version
|
||||
RUN make build && bin/k3d version
|
||||
|
||||
#######################################################
|
||||
# dind #
|
||||
# -> k3d + some tools in a docker-in-docker container #
|
||||
# -> used e.g. in our CI pipelines for testing #
|
||||
#######################################################
|
||||
FROM docker:20.10-dind as dind
|
||||
ARG OS=linux
|
||||
ARG ARCH=amd64
|
||||
|
||||
# install some basic packages needed for testing, etc.
|
||||
RUN echo "building for ${OS}/${ARCH}" && \
|
||||
apk update && \
|
||||
apk add bash curl sudo jq git make netcat-openbsd
|
||||
|
||||
# install kubectl to interact with the k3d cluster
|
||||
RUN curl -L https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/${OS}/${ARCH}/kubectl -o /usr/local/bin/kubectl && \
|
||||
chmod +x /usr/local/bin/kubectl
|
||||
|
||||
# install yq (yaml processor) from source, as the busybox yq had some issues
|
||||
RUN curl -L https://github.com/mikefarah/yq/releases/download/v4.9.6/yq_${OS}_${ARCH} -o /usr/bin/yq &&\
|
||||
chmod +x /usr/bin/yq
|
||||
FROM docker:19.03-dind as dind
|
||||
RUN apk update && apk add bash curl sudo jq git make netcat-openbsd
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl && \
|
||||
chmod +x ./kubectl && \
|
||||
mv ./kubectl /usr/local/bin/kubectl
|
||||
COPY --from=builder /app/bin/k3d /bin/k3d
|
||||
|
||||
#########################################
|
||||
# binary-only #
|
||||
# -> only the k3d binary.. nothing else #
|
||||
#########################################
|
||||
FROM scratch as binary-only
|
||||
COPY --from=builder /app/bin/k3d /bin/k3d
|
||||
ENTRYPOINT ["/bin/k3d"]
|
||||
ENTRYPOINT ["/bin/k3d"]
|
2
LICENSE
2
LICENSE
@ -1,6 +1,6 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright © 2019-2021 Thorsten Klein <iwilltry42@gmail.com>
|
||||
Copyright © 2019-2020 Thorsten Klein <iwilltry42@gmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
44
Makefile
44
Makefile
@ -16,18 +16,13 @@ export GO111MODULE=on
|
||||
########## Tags ##########
|
||||
|
||||
# get git tag
|
||||
ifneq ($(GIT_TAG_OVERRIDE),)
|
||||
$(info GIT_TAG set from env override!)
|
||||
GIT_TAG := $(GIT_TAG_OVERRIDE)
|
||||
endif
|
||||
|
||||
GIT_TAG ?= $(shell git describe --tags)
|
||||
ifeq ($(GIT_TAG),)
|
||||
GIT_TAG := $(shell git describe --always)
|
||||
endif
|
||||
|
||||
# Docker image tag derived from Git tag (with prefix "v" stripped off)
|
||||
K3D_IMAGE_TAG := $(GIT_TAG:v%=%)
|
||||
# Docker image tag derived from Git tag
|
||||
K3D_IMAGE_TAG := $(GIT_TAG)
|
||||
|
||||
# get latest k3s version: grep the tag and replace + with - (difference between git and dockerhub tags)
|
||||
K3S_TAG := $(shell curl --silent "https://update.k3s.io/v1-release/channels/stable" | egrep -o '/v[^ ]+"' | sed -E 's/\/|\"//g' | sed -E 's/\+/\-/')
|
||||
@ -46,26 +41,23 @@ REC_DIRS := cmd
|
||||
|
||||
########## Test Settings ##########
|
||||
E2E_LOG_LEVEL ?= WARN
|
||||
E2E_INCLUDE ?=
|
||||
E2E_EXCLUDE ?=
|
||||
E2E_SKIP ?=
|
||||
E2E_EXTRA ?=
|
||||
E2E_RUNNER_START_TIMEOUT ?= 10
|
||||
E2E_HELPER_IMAGE_TAG ?=
|
||||
|
||||
########## Go Build Options ##########
|
||||
# Build targets
|
||||
TARGETS ?= darwin/amd64 darwin/arm64 linux/amd64 linux/386 linux/arm linux/arm64 windows/amd64
|
||||
TARGET_OBJS ?= darwin-amd64.tar.gz darwin-amd64.tar.gz.sha256 darwin-arm64.tar.gz darwin-arm64.tar.gz.sha256 linux-amd64.tar.gz linux-amd64.tar.gz.sha256 linux-386.tar.gz linux-386.tar.gz.sha256 linux-arm.tar.gz linux-arm.tar.gz.sha256 linux-arm64.tar.gz linux-arm64.tar.gz.sha256 windows-amd64.zip windows-amd64.zip.sha256
|
||||
TARGETS ?= darwin/amd64 linux/amd64 linux/386 linux/arm linux/arm64 windows/amd64
|
||||
TARGET_OBJS ?= darwin-amd64.tar.gz darwin-amd64.tar.gz.sha256 linux-amd64.tar.gz linux-amd64.tar.gz.sha256 linux-386.tar.gz linux-386.tar.gz.sha256 linux-arm.tar.gz linux-arm.tar.gz.sha256 linux-arm64.tar.gz linux-arm64.tar.gz.sha256 windows-amd64.zip windows-amd64.zip.sha256
|
||||
K3D_HELPER_VERSION ?=
|
||||
|
||||
# Go options
|
||||
GO ?= go
|
||||
GOENVPATH := $(shell go env GOPATH)
|
||||
PKG := $(shell go mod vendor)
|
||||
TAGS :=
|
||||
TESTS := ./...
|
||||
TESTFLAGS :=
|
||||
LDFLAGS := -w -s -X github.com/rancher/k3d/v5/version.Version=${GIT_TAG} -X github.com/rancher/k3d/v5/version.K3sVersion=${K3S_TAG}
|
||||
LDFLAGS := -w -s -X github.com/rancher/k3d/v3/version.Version=${GIT_TAG} -X github.com/rancher/k3d/v3/version.K3sVersion=${K3S_TAG}
|
||||
GCFLAGS :=
|
||||
GOFLAGS :=
|
||||
BINDIR := $(CURDIR)/bin
|
||||
@ -74,7 +66,7 @@ BINARIES := k3d
|
||||
# Set version of the k3d helper images for build
|
||||
ifneq ($(K3D_HELPER_VERSION),)
|
||||
$(info [INFO] Helper Image version set to ${K3D_HELPER_VERSION})
|
||||
LDFLAGS += -X github.com/rancher/k3d/v5/version.HelperVersionOverride=${K3D_HELPER_VERSION}
|
||||
LDFLAGS += -X github.com/rancher/k3d/v3/version.HelperVersionOverride=${K3D_HELPER_VERSION}
|
||||
endif
|
||||
|
||||
# Rules for finding all go source files using 'DIRS' and 'REC_DIRS'
|
||||
@ -84,7 +76,7 @@ GO_SRC += $(foreach dir,$(REC_DIRS),$(shell find $(dir) -name "*.go"))
|
||||
########## Required Tools ##########
|
||||
# Go Package required
|
||||
PKG_GOX := github.com/mitchellh/gox@v1.0.1
|
||||
PKG_GOLANGCI_LINT_VERSION := 1.39.0
|
||||
PKG_GOLANGCI_LINT_VERSION := 1.28.3
|
||||
PKG_GOLANGCI_LINT_SCRIPT := https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh
|
||||
PKG_GOLANGCI_LINT := github.com/golangci/golangci-lint/cmd/golangci-lint@v${PKG_GOLANGCI_LINT_VERSION}
|
||||
|
||||
@ -103,7 +95,7 @@ LINT_DIRS := $(DIRS) $(foreach dir,$(REC_DIRS),$(dir)/...)
|
||||
|
||||
.PHONY: all build build-cross clean fmt check-fmt lint check extra-clean install-tools
|
||||
|
||||
all: clean fmt check test build
|
||||
all: clean fmt check build
|
||||
|
||||
############################
|
||||
########## Builds ##########
|
||||
@ -125,14 +117,14 @@ build-cross:
|
||||
# build a specific docker target ( '%' matches the target as specified in the Dockerfile)
|
||||
build-docker-%:
|
||||
@echo "Building Docker image k3d:$(K3D_IMAGE_TAG)-$*"
|
||||
DOCKER_BUILDKIT=1 docker build . -t k3d:$(K3D_IMAGE_TAG)-$* --target $*
|
||||
docker build . -t k3d:$(K3D_IMAGE_TAG)-$* --target $*
|
||||
|
||||
# build helper images
|
||||
build-helper-images:
|
||||
@echo "Building docker image rancher/k3d-proxy:$(K3D_IMAGE_TAG)"
|
||||
DOCKER_BUILDKIT=1 docker build proxy/ -f proxy/Dockerfile -t rancher/k3d-proxy:$(K3D_IMAGE_TAG)
|
||||
@echo "Building docker image rancher/k3d-tools:$(K3D_IMAGE_TAG)"
|
||||
DOCKER_BUILDKIT=1 docker build --no-cache tools/ -f tools/Dockerfile -t rancher/k3d-tools:$(K3D_IMAGE_TAG) --build-arg GIT_TAG=$(GIT_TAG)
|
||||
@echo "Building docker image rancher/k3d-proxy:$(GIT_TAG)"
|
||||
docker build proxy/ -f proxy/Dockerfile -t rancher/k3d-proxy:$(GIT_TAG)
|
||||
@echo "Building docker image rancher/k3d-tools:$(GIT_TAG)"
|
||||
docker build --no-cache tools/ -f tools/Dockerfile -t rancher/k3d-tools:$(GIT_TAG) --build-arg GIT_TAG=$(GIT_TAG)
|
||||
|
||||
##############################
|
||||
########## Cleaning ##########
|
||||
@ -171,7 +163,7 @@ test:
|
||||
|
||||
e2e: build-docker-dind
|
||||
@echo "Running e2e tests in k3d:$(K3D_IMAGE_TAG)"
|
||||
LOG_LEVEL="$(E2E_LOG_LEVEL)" E2E_INCLUDE="$(E2E_INCLUDE)" E2E_EXCLUDE="$(E2E_EXCLUDE)" E2E_EXTRA="$(E2E_EXTRA)" E2E_RUNNER_START_TIMEOUT=$(E2E_RUNNER_START_TIMEOUT) E2E_HELPER_IMAGE_TAG="$(E2E_HELPER_IMAGE_TAG)" tests/dind.sh "${K3D_IMAGE_TAG}-dind"
|
||||
LOG_LEVEL="$(E2E_LOG_LEVEL)" E2E_SKIP="$(E2E_SKIP)" E2E_EXTRA="$(E2E_EXTRA)" E2E_RUNNER_START_TIMEOUT=$(E2E_RUNNER_START_TIMEOUT) tests/dind.sh "${K3D_IMAGE_TAG}-dind"
|
||||
|
||||
ci-tests: fmt check e2e
|
||||
|
||||
@ -197,13 +189,13 @@ ifndef HAS_GOX
|
||||
($(GO) get $(PKG_GOX))
|
||||
endif
|
||||
ifndef HAS_GOLANGCI
|
||||
(curl -sfL $(PKG_GOLANGCI_LINT_SCRIPT) | sh -s -- -b $(GOENVPATH)/bin v${PKG_GOLANGCI_LINT_VERSION})
|
||||
(curl -sfL $(PKG_GOLANGCI_LINT_SCRIPT) | sh -s -- -b ${GOPATH}/bin v${PKG_GOLANGCI_LINT_VERSION})
|
||||
endif
|
||||
ifdef HAS_GOLANGCI
|
||||
ifeq ($(HAS_GOLANGCI_VERSION),)
|
||||
ifdef INTERACTIVE
|
||||
@echo "Warning: Your installed version of golangci-lint (interactive: ${INTERACTIVE}) differs from what we'd like to use. Switch to v${PKG_GOLANGCI_LINT_VERSION}? [Y/n]"
|
||||
@read line; if [ $$line == "y" ]; then (curl -sfL $(PKG_GOLANGCI_LINT_SCRIPT) | sh -s -- -b $(GOENVPATH)/bin v${PKG_GOLANGCI_LINT_VERSION}); fi
|
||||
@read line; if [ $$line == "y" ]; then (curl -sfL $(PKG_GOLANGCI_LINT_SCRIPT) | sh -s -- -b ${GOPATH}/bin v${PKG_GOLANGCI_LINT_VERSION}); fi
|
||||
else
|
||||
@echo "Warning: you're not using the same version of golangci-lint as us (v${PKG_GOLANGCI_LINT_VERSION})"
|
||||
endif
|
||||
@ -216,7 +208,7 @@ endif
|
||||
# - kubectl for E2E-tests (e2e)
|
||||
ci-setup:
|
||||
@echo "Installing Go tools..."
|
||||
curl -sfL $(PKG_GOLANGCI_LINT_SCRIPT) | sh -s -- -b $(GOENVPATH)/bin v$(PKG_GOLANGCI_LINT_VERSION)
|
||||
curl -sfL $(PKG_GOLANGCI_LINT_SCRIPT) | sh -s -- -b ${GOPATH}/bin v$(PKG_GOLANGCI_LINT_VERSION)
|
||||
$(GO) get $(PKG_GOX)
|
||||
|
||||
@echo "Installing kubectl..."
|
||||
|
34
README.md
34
README.md
@ -4,16 +4,16 @@
|
||||
[](./LICENSE.md)
|
||||

|
||||
|
||||
[](https://pkg.go.dev/github.com/rancher/k3d/v5)
|
||||
[](https://pkg.go.dev/github.com/rancher/k3d/v3)
|
||||
[](./go.mod)
|
||||
[](https://goreportcard.com/report/github.com/rancher/k3d)
|
||||
|
||||
<!-- ALL-CONTRIBUTORS-BADGE:START - Do not remove or modify this section -->
|
||||
[](#contributors-)
|
||||
[](#contributors-)
|
||||
<!-- ALL-CONTRIBUTORS-BADGE:END -->
|
||||
[](code_of_conduct.md)
|
||||
|
||||
**Please Note:** `main` is now v5.0.0 and the code for v4.x can be found in the `main-v4` branch!
|
||||
**Please Note:** `main` is now v3.0.0 and the code for v1.x can be found in the `main-v1` branch!
|
||||
|
||||
## [k3s in docker](https://k3d.io)
|
||||
|
||||
@ -21,7 +21,7 @@ k3s is the lightweight Kubernetes distribution by Rancher: [rancher/k3s](https:/
|
||||
|
||||
k3d creates containerized k3s clusters. This means, that you can spin up a multi-node k3s cluster on a single machine using docker.
|
||||
|
||||
[](https://asciinema.org/a/436420)
|
||||
[](https://asciinema.org/a/347570)
|
||||
|
||||
## Learning
|
||||
|
||||
@ -35,9 +35,7 @@ k3d creates containerized k3s clusters. This means, that you can spin up a multi
|
||||
|
||||
## Releases
|
||||
|
||||
**Note**: In May 2020 we upgraded from v1.7.x to **v3.0.0** after a complete rewrite of k3d!
|
||||
**Note**: In January 2021 we upgraded from v3.x.x to **v4.0.0** which includes some breaking changes!
|
||||
**Note**: In September 2021 we upgraded from v4.4.8 to **v5.0.0** which includes some breaking changes!
|
||||
**Note**: In May 2020 we upgraded from v1.7.x to **v3.0.0** after a complete rewrite of k3d!
|
||||
|
||||
| Platform | Stage | Version | Release Date | |
|
||||
|-----------------|--------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|---|
|
||||
@ -54,15 +52,14 @@ You have several options there:
|
||||
- wget: `wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash`
|
||||
- curl: `curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash`
|
||||
- use the install script to grab a specific release (via `TAG` environment variable):
|
||||
- wget: `wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v5.0.0 bash`
|
||||
- curl: `curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v5.0.0 bash`
|
||||
- wget: `wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v3.0.0 bash`
|
||||
- curl: `curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v3.0.0 bash`
|
||||
|
||||
- use [Homebrew](https://brew.sh): `brew install k3d` (Homebrew is available for MacOS and Linux)
|
||||
- Formula can be found in [homebrew/homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/k3d.rb) and is mirrored to [homebrew/linuxbrew-core](https://github.com/Homebrew/linuxbrew-core/blob/master/Formula/k3d.rb)
|
||||
- install via [MacPorts](https://www.macports.org): `sudo port selfupdate && sudo port install k3d` (MacPorts is available for MacOS)
|
||||
- install via [AUR](https://aur.archlinux.org/) package [rancher-k3d-bin](https://aur.archlinux.org/packages/rancher-k3d-bin/): `yay -S rancher-k3d-bin`
|
||||
- grab a release from the [release tab](https://github.com/rancher/k3d/releases) and install it yourself.
|
||||
- install via go: `go install github.com/rancher/k3d@latest` (**Note**: this will give you unreleased/bleeding-edge changes)
|
||||
- install via go: `go install github.com/rancher/k3d` (**Note**: this will give you unreleased/bleeding-edge changes)
|
||||
- use [Chocolatey](https://chocolatey.org/): `choco install k3d` (Chocolatey package manager is available for Windows)
|
||||
- package source can be found in [erwinkersten/chocolatey-packages](https://github.com/erwinkersten/chocolatey-packages/tree/master/automatic/k3d)
|
||||
|
||||
@ -70,7 +67,7 @@ or...
|
||||
|
||||
## Build
|
||||
|
||||
1. Clone this repo, e.g. via `git clone git@github.com:rancher/k3d.git` or `go get github.com/rancher/k3d/v5@main`
|
||||
1. Clone this repo, e.g. via `git clone git@github.com:rancher/k3d.git` or `go get github.com/rancher/k3d/v3@main`
|
||||
2. Inside the repo run
|
||||
- 'make install-tools' to make sure required go packages are installed
|
||||
3. Inside the repo run one of the following commands
|
||||
@ -85,7 +82,7 @@ Check out what you can do via `k3d help` or check the docs @ [k3d.io](https://k3
|
||||
Example Workflow: Create a new cluster and use it with `kubectl`
|
||||
|
||||
1. `k3d cluster create CLUSTER_NAME` to create a new single-node cluster (= 1 container running k3s + 1 loadbalancer container)
|
||||
2. [Optional, included in cluster create] `k3d kubeconfig merge CLUSTER_NAME --kubeconfig-switch-context` to update your default kubeconfig and switch the current-context to the new one
|
||||
2. `k3d kubeconfig merge CLUSTER_NAME --switch-context` to update your default kubeconfig and switch the current-context to the new one
|
||||
3. execute some commands like `kubectl get pods --all-namespaces`
|
||||
4. `k3d cluster delete CLUSTER_NAME` to delete the default cluster
|
||||
|
||||
@ -101,11 +98,7 @@ This repository is based on [@zeerorg](https://github.com/zeerorg/)'s [zeerorg/k
|
||||
|
||||
## Related Projects
|
||||
|
||||
- [k3x](https://github.com/inercia/k3x): GUI (Linux) to k3d
|
||||
- [vscode-k3d](https://github.com/inercia/vscode-k3d): vscode plugin for k3d
|
||||
- [AbsaOSS/k3d-action](https://github.com/AbsaOSS/k3d-action): fully customizable GitHub Action to run lightweight Kubernetes clusters.
|
||||
- [AutoK3s](https://github.com/cnrancher/autok3s): a lightweight tool to help run K3s everywhere including k3d provider.
|
||||
- [nolar/setup-k3d-k3s](https://github.com/nolar/setup-k3d-k3s): setup K3d/K3s for GitHub Actions.
|
||||
- [k3x](https://github.com/inercia/k3x): a graphics interface (for Linux) to k3d.
|
||||
|
||||
## Contributing
|
||||
|
||||
@ -113,8 +106,6 @@ k3d is a community-driven project and so we welcome contributions of any form, b
|
||||
|
||||
Please read our [**Contributing Guidelines**](./CONTRIBUTING.md) and the related [**Code of Conduct**](./CODE_OF_CONDUCT.md).
|
||||
|
||||
You can find an overview of the k3d project (e.g. explanations and a repository guide) in the documentation: [k3d.io/internals/project](https://k3d.io/internals/project)
|
||||
|
||||
[](code_of_conduct.md)
|
||||
|
||||
## Contributors ✨
|
||||
@ -139,9 +130,6 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
|
||||
<td align="center"><a href="http://inerciatech.com/"><img src="https://avatars2.githubusercontent.com/u/1841612?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Alvaro</b></sub></a><br /><a href="https://github.com/rancher/k3d/commits?author=inercia" title="Code">💻</a> <a href="#ideas-inercia" title="Ideas, Planning, & Feedback">🤔</a> <a href="#plugin-inercia" title="Plugin/utility libraries">🔌</a></td>
|
||||
<td align="center"><a href="http://wsl.dev"><img src="https://avatars2.githubusercontent.com/u/905874?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Nuno do Carmo</b></sub></a><br /><a href="#content-nunix" title="Content">🖋</a> <a href="#tutorial-nunix" title="Tutorials">✅</a> <a href="#question-nunix" title="Answering Questions">💬</a></td>
|
||||
<td align="center"><a href="https://github.com/erwinkersten"><img src="https://avatars0.githubusercontent.com/u/4391121?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Erwin Kersten</b></sub></a><br /><a href="https://github.com/rancher/k3d/commits?author=erwinkersten" title="Documentation">📖</a></td>
|
||||
<td align="center"><a href="http://www.alexsears.com"><img src="https://avatars.githubusercontent.com/u/3712883?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Alex Sears</b></sub></a><br /><a href="https://github.com/rancher/k3d/commits?author=searsaw" title="Documentation">📖</a></td>
|
||||
<td align="center"><a href="http://shanduur.github.io"><img src="https://avatars.githubusercontent.com/u/32583062?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Mateusz Urbanek</b></sub></a><br /><a href="https://github.com/rancher/k3d/commits?author=Shanduur" title="Code">💻</a></td>
|
||||
<td align="center"><a href="https://github.com/benjaminjb"><img src="https://avatars.githubusercontent.com/u/4651855?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Benjamin Blattberg</b></sub></a><br /><a href="https://github.com/rancher/k3d/commits?author=benjaminjb" title="Code">💻</a></td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -22,8 +22,7 @@ THE SOFTWARE.
|
||||
package cluster
|
||||
|
||||
import (
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -37,19 +36,18 @@ func NewCmdCluster() *cobra.Command {
|
||||
Long: `Manage cluster(s)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
log.Errorln("Couldn't get help text")
|
||||
log.Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
cmd.AddCommand(NewCmdClusterCreate(),
|
||||
NewCmdClusterStart(),
|
||||
NewCmdClusterStop(),
|
||||
NewCmdClusterDelete(),
|
||||
NewCmdClusterList(),
|
||||
NewCmdClusterEdit())
|
||||
cmd.AddCommand(NewCmdClusterCreate())
|
||||
cmd.AddCommand(NewCmdClusterStart())
|
||||
cmd.AddCommand(NewCmdClusterStop())
|
||||
cmd.AddCommand(NewCmdClusterDelete())
|
||||
cmd.AddCommand(NewCmdClusterList())
|
||||
|
||||
// add flags
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -26,30 +26,20 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
cliutil "github.com/rancher/k3d/v5/cmd/util"
|
||||
cliconfig "github.com/rancher/k3d/v5/cmd/util/config"
|
||||
k3dCluster "github.com/rancher/k3d/v5/pkg/client"
|
||||
"github.com/rancher/k3d/v5/pkg/config"
|
||||
conf "github.com/rancher/k3d/v5/pkg/config/v1alpha3"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/rancher/k3d/v5/version"
|
||||
cliutil "github.com/rancher/k3d/v3/cmd/util"
|
||||
"github.com/rancher/k3d/v3/pkg/cluster"
|
||||
k3dCluster "github.com/rancher/k3d/v3/pkg/cluster"
|
||||
"github.com/rancher/k3d/v3/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v3/pkg/types"
|
||||
"github.com/rancher/k3d/v3/version"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var configFile string
|
||||
|
||||
const clusterCreateDescription = `
|
||||
Create a new k3s cluster with containerized nodes (k3s in docker).
|
||||
Every cluster will consist of one or more containers:
|
||||
@ -58,294 +48,117 @@ Every cluster will consist of one or more containers:
|
||||
- (optionally) 1 (or more) agent node containers (k3s)
|
||||
`
|
||||
|
||||
/*
|
||||
* Viper for configuration handling
|
||||
* we use two different instances of Viper here to handle
|
||||
* - cfgViper: "static" configuration
|
||||
* - ppViper: "pre-processed" configuration, where CLI input has to be pre-processed
|
||||
* to be treated as part of the SImpleConfig
|
||||
*/
|
||||
var (
|
||||
cfgViper = viper.New()
|
||||
ppViper = viper.New()
|
||||
)
|
||||
|
||||
func initConfig() error {
|
||||
|
||||
// Viper for pre-processed config options
|
||||
ppViper.SetEnvPrefix("K3D")
|
||||
|
||||
if l.Log().GetLevel() >= logrus.DebugLevel {
|
||||
|
||||
c, _ := yaml.Marshal(ppViper.AllSettings())
|
||||
l.Log().Debugf("Additional CLI Configuration:\n%s", c)
|
||||
}
|
||||
|
||||
return cliconfig.InitViperWithConfigFile(cfgViper, configFile)
|
||||
}
|
||||
|
||||
// NewCmdClusterCreate returns a new cobra command
|
||||
func NewCmdClusterCreate() *cobra.Command {
|
||||
|
||||
createClusterOpts := &k3d.ClusterCreateOpts{}
|
||||
var noRollback bool
|
||||
var updateDefaultKubeconfig, updateCurrentContext bool
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "create NAME",
|
||||
Short: "Create a new cluster",
|
||||
Long: clusterCreateDescription,
|
||||
Args: cobra.RangeArgs(0, 1), // exactly one cluster name can be set (default: k3d.DefaultClusterName)
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
return initConfig()
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
/*************************
|
||||
* Compute Configuration *
|
||||
*************************/
|
||||
if cfgViper.GetString("apiversion") == "" {
|
||||
cfgViper.Set("apiversion", config.DefaultConfigApiVersion)
|
||||
}
|
||||
if cfgViper.GetString("kind") == "" {
|
||||
cfgViper.Set("kind", "Simple")
|
||||
}
|
||||
cfg, err := config.FromViper(cfgViper)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
if cfg.GetAPIVersion() != config.DefaultConfigApiVersion {
|
||||
l.Log().Warnf("Default config apiVersion is '%s', but you're using '%s': consider migrating.", config.DefaultConfigApiVersion, cfg.GetAPIVersion())
|
||||
cfg, err = config.Migrate(cfg, config.DefaultConfigApiVersion)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
simpleCfg := cfg.(conf.SimpleConfig)
|
||||
|
||||
l.Log().Debugf("========== Simple Config ==========\n%+v\n==========================\n", simpleCfg)
|
||||
|
||||
simpleCfg, err = applyCLIOverrides(simpleCfg)
|
||||
if err != nil {
|
||||
l.Log().Fatalf("Failed to apply CLI overrides: %+v", err)
|
||||
}
|
||||
|
||||
l.Log().Debugf("========== Merged Simple Config ==========\n%+v\n==========================\n", simpleCfg)
|
||||
|
||||
/**************************************
|
||||
* Transform, Process & Validate Configuration *
|
||||
**************************************/
|
||||
|
||||
// Set the name
|
||||
if len(args) != 0 {
|
||||
simpleCfg.Name = args[0]
|
||||
}
|
||||
|
||||
clusterConfig, err := config.TransformSimpleToClusterConfig(cmd.Context(), runtimes.SelectedRuntime, simpleCfg)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
l.Log().Debugf("===== Merged Cluster Config =====\n%+v\n===== ===== =====\n", clusterConfig)
|
||||
|
||||
clusterConfig, err = config.ProcessClusterConfig(*clusterConfig)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
l.Log().Debugf("===== Processed Cluster Config =====\n%+v\n===== ===== =====\n", clusterConfig)
|
||||
|
||||
if err := config.ValidateClusterConfig(cmd.Context(), runtimes.SelectedRuntime, *clusterConfig); err != nil {
|
||||
l.Log().Fatalln("Failed Cluster Configuration Validation: ", err)
|
||||
}
|
||||
|
||||
/**************************************
|
||||
* Create cluster if it doesn't exist *
|
||||
**************************************/
|
||||
// parse args and flags
|
||||
cluster := parseCreateClusterCmd(cmd, args, createClusterOpts)
|
||||
|
||||
// check if a cluster with that name exists already
|
||||
if _, err := k3dCluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster); err == nil {
|
||||
l.Log().Fatalf("Failed to create cluster '%s' because a cluster with that name already exists", clusterConfig.Cluster.Name)
|
||||
if _, err := k3dCluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, cluster); err == nil {
|
||||
log.Fatalf("Failed to create cluster '%s' because a cluster with that name already exists", cluster.Name)
|
||||
}
|
||||
|
||||
if !updateDefaultKubeconfig && updateCurrentContext {
|
||||
log.Infoln("--update-default-kubeconfig=false --> sets --switch-context=false")
|
||||
updateCurrentContext = false
|
||||
}
|
||||
|
||||
// create cluster
|
||||
if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig {
|
||||
l.Log().Debugln("'--kubeconfig-update-default set: enabling wait-for-server")
|
||||
clusterConfig.ClusterCreateOpts.WaitForServer = true
|
||||
if updateDefaultKubeconfig {
|
||||
log.Debugln("'--update-default-kubeconfig set: enabling wait-for-server")
|
||||
cluster.CreateClusterOpts.WaitForServer = true
|
||||
}
|
||||
//if err := k3dCluster.ClusterCreate(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster, &clusterConfig.ClusterCreateOpts); err != nil {
|
||||
if err := k3dCluster.ClusterRun(cmd.Context(), runtimes.SelectedRuntime, clusterConfig); err != nil {
|
||||
// rollback if creation failed
|
||||
l.Log().Errorln(err)
|
||||
if simpleCfg.Options.K3dOptions.NoRollback { // TODO: move rollback mechanics to pkg/
|
||||
l.Log().Fatalln("Cluster creation FAILED, rollback deactivated.")
|
||||
if err := k3dCluster.ClusterCreate(cmd.Context(), runtimes.SelectedRuntime, cluster); err != nil {
|
||||
log.Errorln(err)
|
||||
if noRollback {
|
||||
log.Fatalln("Cluster creation FAILED, rollback deactivated.")
|
||||
}
|
||||
// rollback if creation failed
|
||||
l.Log().Errorln("Failed to create cluster >>> Rolling Back")
|
||||
if err := k3dCluster.ClusterDelete(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster, k3d.ClusterDeleteOpts{SkipRegistryCheck: true}); err != nil {
|
||||
l.Log().Errorln(err)
|
||||
l.Log().Fatalln("Cluster creation FAILED, also FAILED to rollback changes!")
|
||||
log.Errorln("Failed to create cluster >>> Rolling Back")
|
||||
if err := k3dCluster.ClusterDelete(cmd.Context(), runtimes.SelectedRuntime, cluster); err != nil {
|
||||
log.Errorln(err)
|
||||
log.Fatalln("Cluster creation FAILED, also FAILED to rollback changes!")
|
||||
}
|
||||
l.Log().Fatalln("Cluster creation FAILED, all changes have been rolled back!")
|
||||
log.Fatalln("Cluster creation FAILED, all changes have been rolled back!")
|
||||
}
|
||||
l.Log().Infof("Cluster '%s' created successfully!", clusterConfig.Cluster.Name)
|
||||
log.Infof("Cluster '%s' created successfully!", cluster.Name)
|
||||
|
||||
/**************
|
||||
* Kubeconfig *
|
||||
**************/
|
||||
|
||||
if !clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig && clusterConfig.KubeconfigOpts.SwitchCurrentContext {
|
||||
l.Log().Infoln("--kubeconfig-update-default=false --> sets --kubeconfig-switch-context=false")
|
||||
clusterConfig.KubeconfigOpts.SwitchCurrentContext = false
|
||||
}
|
||||
|
||||
if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig {
|
||||
l.Log().Debugf("Updating default kubeconfig with a new context for cluster %s", clusterConfig.Cluster.Name)
|
||||
if _, err := k3dCluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster, "", &k3dCluster.WriteKubeConfigOptions{UpdateExisting: true, OverwriteExisting: false, UpdateCurrentContext: simpleCfg.Options.KubeconfigOptions.SwitchCurrentContext}); err != nil {
|
||||
l.Log().Warningln(err)
|
||||
if updateDefaultKubeconfig {
|
||||
log.Debugf("Updating default kubeconfig with a new context for cluster %s", cluster.Name)
|
||||
if _, err := k3dCluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, cluster, "", &k3dCluster.WriteKubeConfigOptions{UpdateExisting: true, OverwriteExisting: false, UpdateCurrentContext: updateCurrentContext}); err != nil {
|
||||
log.Warningln(err)
|
||||
}
|
||||
}
|
||||
|
||||
/*****************
|
||||
* User Feedback *
|
||||
*****************/
|
||||
|
||||
// print information on how to use the cluster with kubectl
|
||||
l.Log().Infoln("You can now use it like this:")
|
||||
if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig && !clusterConfig.KubeconfigOpts.SwitchCurrentContext {
|
||||
fmt.Printf("kubectl config use-context %s\n", fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, clusterConfig.Cluster.Name))
|
||||
} else if !clusterConfig.KubeconfigOpts.SwitchCurrentContext {
|
||||
log.Infoln("You can now use it like this:")
|
||||
if updateDefaultKubeconfig && !updateCurrentContext {
|
||||
fmt.Printf("kubectl config use-context %s\n", fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, cluster.Name))
|
||||
} else if !updateCurrentContext {
|
||||
if runtime.GOOS == "windows" {
|
||||
fmt.Printf("$env:KUBECONFIG=(%s kubeconfig write %s)\n", os.Args[0], clusterConfig.Cluster.Name)
|
||||
fmt.Printf("$env:KUBECONFIG=(%s kubeconfig write %s)\n", os.Args[0], cluster.Name)
|
||||
} else {
|
||||
fmt.Printf("export KUBECONFIG=$(%s kubeconfig write %s)\n", os.Args[0], clusterConfig.Cluster.Name)
|
||||
fmt.Printf("export KUBECONFIG=$(%s kubeconfig write %s)\n", os.Args[0], cluster.Name)
|
||||
}
|
||||
}
|
||||
fmt.Println("kubectl cluster-info")
|
||||
},
|
||||
}
|
||||
|
||||
/***************
|
||||
* Config File *
|
||||
***************/
|
||||
|
||||
cmd.Flags().StringVarP(&configFile, "config", "c", "", "Path of a config file to use")
|
||||
if err := cmd.MarkFlagFilename("config", "yaml", "yml"); err != nil {
|
||||
l.Log().Fatalln("Failed to mark flag 'config' as filename flag")
|
||||
}
|
||||
|
||||
/***********************
|
||||
* Pre-Processed Flags *
|
||||
***********************
|
||||
*
|
||||
* Flags that have a different style in the CLI than their internal representation.
|
||||
* Also, we cannot set (viper) default values just here for those.
|
||||
* Example:
|
||||
* CLI: `--api-port 0.0.0.0:6443`
|
||||
* Config File:
|
||||
* exposeAPI:
|
||||
* hostIP: 0.0.0.0
|
||||
* port: 6443
|
||||
*
|
||||
* Note: here we also use Slice-type flags instead of Array because of https://github.com/spf13/viper/issues/380
|
||||
*/
|
||||
|
||||
cmd.Flags().String("api-port", "", "Specify the Kubernetes API server port exposed on the LoadBalancer (Format: `[HOST:]HOSTPORT`)\n - Example: `k3d cluster create --servers 3 --api-port 0.0.0.0:6550`")
|
||||
_ = ppViper.BindPFlag("cli.api-port", cmd.Flags().Lookup("api-port"))
|
||||
|
||||
cmd.Flags().StringArrayP("env", "e", nil, "Add environment variables to nodes (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -e \"HTTP_PROXY=my.proxy.com@server:0\" -e \"SOME_KEY=SOME_VAL@server:0\"`")
|
||||
_ = ppViper.BindPFlag("cli.env", cmd.Flags().Lookup("env"))
|
||||
|
||||
cmd.Flags().StringArrayP("volume", "v", nil, "Mount volumes into the nodes (Format: `[SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -v /my/path@agent:0,1 -v /tmp/test:/tmp/other@server:0`")
|
||||
_ = ppViper.BindPFlag("cli.volumes", cmd.Flags().Lookup("volume"))
|
||||
|
||||
cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers (via the serverlb) to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d cluster create --agents 2 -p 8080:80@agent:0 -p 8081@agent:1`")
|
||||
_ = ppViper.BindPFlag("cli.ports", cmd.Flags().Lookup("port"))
|
||||
|
||||
cmd.Flags().StringArrayP("k3s-node-label", "", nil, "Add label to k3s node (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 --k3s-node-label \"my.label@agent:0,1\" --k3s-node-label \"other.label=somevalue@server:0\"`")
|
||||
_ = ppViper.BindPFlag("cli.k3s-node-labels", cmd.Flags().Lookup("k3s-node-label"))
|
||||
|
||||
cmd.Flags().StringArrayP("runtime-label", "", nil, "Add label to container runtime (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 --runtime-label \"my.label@agent:0,1\" --runtime-label \"other.label=somevalue@server:0\"`")
|
||||
_ = ppViper.BindPFlag("cli.runtime-labels", cmd.Flags().Lookup("runtime-label"))
|
||||
|
||||
cmd.Flags().String("registry-create", "", "Create a k3d-managed registry and connect it to the cluster (Format: `NAME[:HOST][:HOSTPORT]`\n - Example: `k3d cluster create --registry-create mycluster-registry:0.0.0.0:5432`")
|
||||
_ = ppViper.BindPFlag("cli.registries.create", cmd.Flags().Lookup("registry-create"))
|
||||
|
||||
/* k3s */
|
||||
cmd.Flags().StringArray("k3s-arg", nil, "Additional args passed to k3s command (Format: `ARG@NODEFILTER[;@NODEFILTER]`)\n - Example: `k3d cluster create --k3s-arg \"--disable=traefik@server:0\"")
|
||||
_ = ppViper.BindPFlag("cli.k3sargs", cmd.Flags().Lookup("k3s-arg"))
|
||||
|
||||
/******************
|
||||
* "Normal" Flags *
|
||||
******************
|
||||
*
|
||||
* No pre-processing needed on CLI level.
|
||||
* Bound to Viper config value.
|
||||
* Default Values set via Viper.
|
||||
*/
|
||||
|
||||
cmd.Flags().IntP("servers", "s", 0, "Specify how many servers you want to create")
|
||||
_ = cfgViper.BindPFlag("servers", cmd.Flags().Lookup("servers"))
|
||||
cfgViper.SetDefault("servers", 1)
|
||||
|
||||
/*********
|
||||
* Flags *
|
||||
*********/
|
||||
cmd.Flags().String("api-port", "random", "Specify the Kubernetes API server port exposed on the LoadBalancer (Format: `[HOST:]HOSTPORT`)\n - Example: `k3d cluster create --servers 3 --api-port 0.0.0.0:6550`")
|
||||
cmd.Flags().IntP("servers", "s", 1, "Specify how many servers you want to create")
|
||||
cmd.Flags().IntP("agents", "a", 0, "Specify how many agents you want to create")
|
||||
_ = cfgViper.BindPFlag("agents", cmd.Flags().Lookup("agents"))
|
||||
cfgViper.SetDefault("agents", 0)
|
||||
|
||||
cmd.Flags().StringP("image", "i", "", "Specify k3s image that you want to use for the nodes")
|
||||
_ = cfgViper.BindPFlag("image", cmd.Flags().Lookup("image"))
|
||||
cfgViper.SetDefault("image", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)))
|
||||
|
||||
cmd.Flags().StringP("image", "i", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), "Specify k3s image that you want to use for the nodes")
|
||||
cmd.Flags().String("network", "", "Join an existing network")
|
||||
_ = cfgViper.BindPFlag("network", cmd.Flags().Lookup("network"))
|
||||
|
||||
cmd.Flags().String("subnet", "", "[Experimental: IPAM] Define a subnet for the newly created container network (Example: `172.28.0.0/16`)")
|
||||
_ = cfgViper.BindPFlag("subnet", cmd.Flags().Lookup("subnet"))
|
||||
|
||||
cmd.Flags().String("token", "", "Specify a cluster token. By default, we generate one.")
|
||||
_ = cfgViper.BindPFlag("token", cmd.Flags().Lookup("token"))
|
||||
|
||||
cmd.Flags().Bool("wait", true, "Wait for the server(s) to be ready before returning. Use '--timeout DURATION' to not wait forever.")
|
||||
_ = cfgViper.BindPFlag("options.k3d.wait", cmd.Flags().Lookup("wait"))
|
||||
|
||||
cmd.Flags().Duration("timeout", 0*time.Second, "Rollback changes if cluster couldn't be created in specified duration.")
|
||||
_ = cfgViper.BindPFlag("options.k3d.timeout", cmd.Flags().Lookup("timeout"))
|
||||
|
||||
cmd.Flags().Bool("kubeconfig-update-default", true, "Directly update the default kubeconfig with the new cluster's context")
|
||||
_ = cfgViper.BindPFlag("options.kubeconfig.updatedefaultkubeconfig", cmd.Flags().Lookup("kubeconfig-update-default"))
|
||||
|
||||
cmd.Flags().Bool("kubeconfig-switch-context", true, "Directly switch the default kubeconfig's current-context to the new cluster's context (requires --kubeconfig-update-default)")
|
||||
_ = cfgViper.BindPFlag("options.kubeconfig.switchcurrentcontext", cmd.Flags().Lookup("kubeconfig-switch-context"))
|
||||
|
||||
cmd.Flags().Bool("no-lb", false, "Disable the creation of a LoadBalancer in front of the server nodes")
|
||||
_ = cfgViper.BindPFlag("options.k3d.disableloadbalancer", cmd.Flags().Lookup("no-lb"))
|
||||
|
||||
cmd.Flags().Bool("no-rollback", false, "Disable the automatic rollback actions, if anything goes wrong")
|
||||
_ = cfgViper.BindPFlag("options.k3d.disablerollback", cmd.Flags().Lookup("no-rollback"))
|
||||
|
||||
cmd.Flags().String("gpus", "", "GPU devices to add to the cluster node containers ('all' to pass all GPUs) [From docker]")
|
||||
_ = cfgViper.BindPFlag("options.runtime.gpurequest", cmd.Flags().Lookup("gpus"))
|
||||
|
||||
cmd.Flags().String("servers-memory", "", "Memory limit imposed on the server nodes [From docker]")
|
||||
_ = cfgViper.BindPFlag("options.runtime.serversmemory", cmd.Flags().Lookup("servers-memory"))
|
||||
|
||||
cmd.Flags().String("agents-memory", "", "Memory limit imposed on the agents nodes [From docker]")
|
||||
_ = cfgViper.BindPFlag("options.runtime.agentsmemory", cmd.Flags().Lookup("agents-memory"))
|
||||
cmd.Flags().StringArrayP("volume", "v", nil, "Mount volumes into the nodes (Format: `[SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -v \"/my/path@agent[0,1]\" -v \"/tmp/test:/tmp/other@server[0]\"`")
|
||||
cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d cluster create --agents 2 -p \"8080:80@agent[0]\" -p \"8081@agent[1]\"`")
|
||||
cmd.Flags().StringArrayP("label", "l", nil, "Add label to node container (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -l \"my.label@agent[0,1]\" -l \"other.label=somevalue@server[0]\"`")
|
||||
cmd.Flags().BoolVar(&createClusterOpts.WaitForServer, "wait", true, "Wait for the server(s) to be ready before returning. Use '--timeout DURATION' to not wait forever.")
|
||||
cmd.Flags().DurationVar(&createClusterOpts.Timeout, "timeout", 0*time.Second, "Rollback changes if cluster couldn't be created in specified duration.")
|
||||
cmd.Flags().BoolVar(&updateDefaultKubeconfig, "update-default-kubeconfig", true, "Directly update the default kubeconfig with the new cluster's context")
|
||||
cmd.Flags().BoolVar(&updateCurrentContext, "switch-context", true, "Directly switch the default kubeconfig's current-context to the new cluster's context (requires --update-default-kubeconfig)")
|
||||
cmd.Flags().BoolVar(&createClusterOpts.DisableLoadBalancer, "no-lb", false, "Disable the creation of a LoadBalancer in front of the server nodes")
|
||||
cmd.Flags().BoolVar(&noRollback, "no-rollback", false, "Disable the automatic rollback actions, if anything goes wrong")
|
||||
cmd.Flags().BoolVar(&createClusterOpts.PrepDisableHostIPInjection, "no-hostip", false, "Disable the automatic injection of the Host IP as 'host.k3d.internal' into the containers and CoreDNS")
|
||||
cmd.Flags().StringVar(&createClusterOpts.GPURequest, "gpus", "", "GPU devices to add to the cluster node containers ('all' to pass all GPUs) [From docker]")
|
||||
cmd.Flags().StringArrayP("env", "e", nil, "Add environment variables to nodes (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -e \"HTTP_PROXY=my.proxy.com\" -e \"SOME_KEY=SOME_VAL@server[0]\"`")
|
||||
|
||||
/* Image Importing */
|
||||
cmd.Flags().Bool("no-image-volume", false, "Disable the creation of a volume for importing images")
|
||||
_ = cfgViper.BindPFlag("options.k3d.disableimagevolume", cmd.Flags().Lookup("no-image-volume"))
|
||||
cmd.Flags().BoolVar(&createClusterOpts.DisableImageVolume, "no-image-volume", false, "Disable the creation of a volume for importing images")
|
||||
|
||||
/* Registry */
|
||||
cmd.Flags().StringArray("registry-use", nil, "Connect to one or more k3d-managed registries running locally")
|
||||
_ = cfgViper.BindPFlag("registries.use", cmd.Flags().Lookup("registry-use"))
|
||||
/* Multi Server Configuration */
|
||||
|
||||
cmd.Flags().String("registry-config", "", "Specify path to an extra registries.yaml file")
|
||||
_ = cfgViper.BindPFlag("registries.config", cmd.Flags().Lookup("registry-config"))
|
||||
if err := cmd.MarkFlagFilename("registry-config", "yaml", "yml"); err != nil {
|
||||
l.Log().Fatalln("Failed to mark flag 'config' as filename flag")
|
||||
}
|
||||
// multi-server - datastore
|
||||
// TODO: implement multi-server setups with external data store
|
||||
// cmd.Flags().String("datastore-endpoint", "", "[WIP] Specify external datastore endpoint (e.g. for multi server clusters)")
|
||||
/*
|
||||
cmd.Flags().String("datastore-network", "", "Specify container network where we can find the datastore-endpoint (add a connection)")
|
||||
|
||||
/* Loadbalancer / Proxy */
|
||||
cmd.Flags().StringSlice("lb-config-override", nil, "Use dotted YAML path syntax to override nginx loadbalancer settings")
|
||||
_ = cfgViper.BindPFlag("options.k3d.loadbalancer.configoverrides", cmd.Flags().Lookup("lb-config-override"))
|
||||
// TODO: set default paths and hint, that one should simply mount the files using --volume flag
|
||||
cmd.Flags().String("datastore-cafile", "", "Specify external datastore's TLS Certificate Authority (CA) file")
|
||||
cmd.Flags().String("datastore-certfile", "", "Specify external datastore's TLS certificate file'")
|
||||
cmd.Flags().String("datastore-keyfile", "", "Specify external datastore's TLS key file'")
|
||||
*/
|
||||
|
||||
/* k3s */
|
||||
cmd.Flags().StringArrayVar(&createClusterOpts.K3sServerArgs, "k3s-server-arg", nil, "Additional args passed to the `k3s server` command on server nodes (new flag per arg)")
|
||||
cmd.Flags().StringArrayVar(&createClusterOpts.K3sAgentArgs, "k3s-agent-arg", nil, "Additional args passed to the `k3s agent` command on agent nodes (new flag per arg)")
|
||||
|
||||
/* Subcommands */
|
||||
|
||||
@ -353,73 +166,115 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
// parseCreateClusterCmd parses the command input into variables required to create a cluster
|
||||
func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts *k3d.ClusterCreateOpts) *k3d.Cluster {
|
||||
|
||||
/********************************
|
||||
* Parse and validate arguments *
|
||||
********************************/
|
||||
|
||||
clustername := k3d.DefaultClusterName
|
||||
if len(args) != 0 {
|
||||
clustername = args[0]
|
||||
}
|
||||
if err := cluster.CheckName(clustername); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
/****************************
|
||||
* Parse and validate flags *
|
||||
****************************/
|
||||
|
||||
// -> API-PORT
|
||||
// --image
|
||||
image, err := cmd.Flags().GetString("image")
|
||||
if err != nil {
|
||||
log.Errorln("No image specified")
|
||||
log.Fatalln(err)
|
||||
}
|
||||
if image == "latest" {
|
||||
image = version.GetK3sVersion(true)
|
||||
}
|
||||
|
||||
// --servers
|
||||
serverCount, err := cmd.Flags().GetInt("servers")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// --agents
|
||||
agentCount, err := cmd.Flags().GetInt("agents")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// --network
|
||||
networkName, err := cmd.Flags().GetString("network")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
network := k3d.ClusterNetwork{}
|
||||
if networkName != "" {
|
||||
network.Name = networkName
|
||||
network.External = true
|
||||
}
|
||||
if networkName == "host" && (serverCount+agentCount) > 1 {
|
||||
log.Fatalln("Can only run a single node in hostnetwork mode")
|
||||
}
|
||||
|
||||
// --token
|
||||
token, err := cmd.Flags().GetString("token")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// --timeout
|
||||
if cmd.Flags().Changed("timeout") && createClusterOpts.Timeout <= 0*time.Second {
|
||||
log.Fatalln("--timeout DURATION must be >= 1s")
|
||||
}
|
||||
|
||||
// --api-port
|
||||
apiPort, err := cmd.Flags().GetString("api-port")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// parse the port mapping
|
||||
var (
|
||||
err error
|
||||
exposeAPI *k3d.ExposureOpts
|
||||
)
|
||||
|
||||
// Apply config file values as defaults
|
||||
exposeAPI = &k3d.ExposureOpts{
|
||||
PortMapping: nat.PortMapping{
|
||||
Binding: nat.PortBinding{
|
||||
HostIP: cfg.ExposeAPI.HostIP,
|
||||
HostPort: cfg.ExposeAPI.HostPort,
|
||||
},
|
||||
},
|
||||
Host: cfg.ExposeAPI.Host,
|
||||
exposeAPI, err := cliutil.ParseAPIPort(apiPort)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
if exposeAPI.Host == "" {
|
||||
exposeAPI.Host = k3d.DefaultAPIHost
|
||||
}
|
||||
if exposeAPI.HostIP == "" {
|
||||
exposeAPI.HostIP = k3d.DefaultAPIHost
|
||||
}
|
||||
if networkName == "host" {
|
||||
// in hostNetwork mode, we're not going to map a hostport. Here it should always use 6443.
|
||||
// Note that hostNetwork mode is super inflexible and since we don't change the backend port (on the container), it will only be one hostmode cluster allowed.
|
||||
exposeAPI.Port = k3d.DefaultAPIPort
|
||||
}
|
||||
|
||||
// Overwrite if cli arg is set
|
||||
if ppViper.IsSet("cli.api-port") {
|
||||
if cfg.ExposeAPI.HostPort != "" {
|
||||
l.Log().Debugf("Overriding pre-defined kubeAPI Exposure Spec %+v with CLI argument %s", cfg.ExposeAPI, ppViper.GetString("cli.api-port"))
|
||||
}
|
||||
exposeAPI, err = cliutil.ParsePortExposureSpec(ppViper.GetString("cli.api-port"), k3d.DefaultAPIPort)
|
||||
if err != nil {
|
||||
return cfg, fmt.Errorf("failed to parse API Port spec: %w", err)
|
||||
}
|
||||
// --volume
|
||||
volumeFlags, err := cmd.Flags().GetStringArray("volume")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// Set to random port if port is empty string
|
||||
if len(exposeAPI.Binding.HostPort) == 0 {
|
||||
var freePort string
|
||||
port, err := cliutil.GetFreePort()
|
||||
freePort = strconv.Itoa(port)
|
||||
if err != nil || port == 0 {
|
||||
l.Log().Warnf("Failed to get random free port: %+v", err)
|
||||
l.Log().Warnf("Falling back to internal port %s (may be blocked though)...", k3d.DefaultAPIPort)
|
||||
freePort = k3d.DefaultAPIPort
|
||||
}
|
||||
exposeAPI.Binding.HostPort = freePort
|
||||
}
|
||||
|
||||
cfg.ExposeAPI = conf.SimpleExposureOpts{
|
||||
Host: exposeAPI.Host,
|
||||
HostIP: exposeAPI.Binding.HostIP,
|
||||
HostPort: exposeAPI.Binding.HostPort,
|
||||
}
|
||||
|
||||
// -> VOLUMES
|
||||
// volumeFilterMap will map volume mounts to applied node filters
|
||||
volumeFilterMap := make(map[string][]string, 1)
|
||||
for _, volumeFlag := range ppViper.GetStringSlice("cli.volumes") {
|
||||
for _, volumeFlag := range volumeFlags {
|
||||
|
||||
// split node filter from the specified volume
|
||||
volume, filters, err := cliutil.SplitFiltersFromFlag(volumeFlag)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
if strings.Contains(volume, k3d.DefaultRegistriesFilePath) && (cfg.Registries.Create != nil || cfg.Registries.Config != "" || len(cfg.Registries.Use) != 0) {
|
||||
l.Log().Warnf("Seems like you're mounting a file at '%s' while also using a referenced registries config or k3d-managed registries: Your mounted file will probably be overwritten!", k3d.DefaultRegistriesFilePath)
|
||||
// validate the specified volume mount and return it in SRC:DEST format
|
||||
volume, err = cliutil.ValidateVolumeMount(runtimes.SelectedRuntime, volume)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
@ -430,108 +285,81 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
}
|
||||
}
|
||||
|
||||
for volume, nodeFilters := range volumeFilterMap {
|
||||
cfg.Volumes = append(cfg.Volumes, conf.VolumeWithNodeFilters{
|
||||
Volume: volume,
|
||||
NodeFilters: nodeFilters,
|
||||
})
|
||||
// --port
|
||||
portFlags, err := cmd.Flags().GetStringArray("port")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
l.Log().Tracef("VolumeFilterMap: %+v", volumeFilterMap)
|
||||
|
||||
// -> PORTS
|
||||
portFilterMap := make(map[string][]string, 1)
|
||||
for _, portFlag := range ppViper.GetStringSlice("cli.ports") {
|
||||
for _, portFlag := range portFlags {
|
||||
// split node filter from the specified volume
|
||||
portmap, filters, err := cliutil.SplitFiltersFromFlag(portFlag)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
if len(filters) > 1 {
|
||||
log.Fatalln("Can only apply a Portmap to one node")
|
||||
}
|
||||
|
||||
// the same portmapping can't be applied to multiple nodes
|
||||
|
||||
// validate the specified volume mount and return it in SRC:DEST format
|
||||
portmap, err = cliutil.ValidatePortMap(portmap)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
if _, exists := portFilterMap[portmap]; exists {
|
||||
l.Log().Fatalln("Same Portmapping can not be used for multiple nodes")
|
||||
log.Fatalln("Same Portmapping can not be used for multiple nodes")
|
||||
} else {
|
||||
portFilterMap[portmap] = filters
|
||||
}
|
||||
}
|
||||
|
||||
for port, nodeFilters := range portFilterMap {
|
||||
cfg.Ports = append(cfg.Ports, conf.PortWithNodeFilters{
|
||||
Port: port,
|
||||
NodeFilters: nodeFilters,
|
||||
})
|
||||
log.Tracef("PortFilterMap: %+v", portFilterMap)
|
||||
|
||||
// --label
|
||||
labelFlags, err := cmd.Flags().GetStringArray("label")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
l.Log().Tracef("PortFilterMap: %+v", portFilterMap)
|
||||
|
||||
// --k3s-node-label
|
||||
// k3sNodeLabelFilterMap will add k3s node label to applied node filters
|
||||
k3sNodeLabelFilterMap := make(map[string][]string, 1)
|
||||
for _, labelFlag := range ppViper.GetStringSlice("cli.k3s-node-labels") {
|
||||
// labelFilterMap will add container label to applied node filters
|
||||
labelFilterMap := make(map[string][]string, 1)
|
||||
for _, labelFlag := range labelFlags {
|
||||
|
||||
// split node filter from the specified label
|
||||
label, nodeFilters, err := cliutil.SplitFiltersFromFlag(labelFlag)
|
||||
label, filters, err := cliutil.SplitFiltersFromFlag(labelFlag)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
if _, exists := k3sNodeLabelFilterMap[label]; exists {
|
||||
k3sNodeLabelFilterMap[label] = append(k3sNodeLabelFilterMap[label], nodeFilters...)
|
||||
if _, exists := labelFilterMap[label]; exists {
|
||||
labelFilterMap[label] = append(labelFilterMap[label], filters...)
|
||||
} else {
|
||||
k3sNodeLabelFilterMap[label] = nodeFilters
|
||||
labelFilterMap[label] = filters
|
||||
}
|
||||
}
|
||||
|
||||
for label, nodeFilters := range k3sNodeLabelFilterMap {
|
||||
cfg.Options.K3sOptions.NodeLabels = append(cfg.Options.K3sOptions.NodeLabels, conf.LabelWithNodeFilters{
|
||||
Label: label,
|
||||
NodeFilters: nodeFilters,
|
||||
})
|
||||
}
|
||||
|
||||
l.Log().Tracef("K3sNodeLabelFilterMap: %+v", k3sNodeLabelFilterMap)
|
||||
|
||||
// --runtime-label
|
||||
// runtimeLabelFilterMap will add container runtime label to applied node filters
|
||||
runtimeLabelFilterMap := make(map[string][]string, 1)
|
||||
for _, labelFlag := range ppViper.GetStringSlice("cli.runtime-labels") {
|
||||
|
||||
// split node filter from the specified label
|
||||
label, nodeFilters, err := cliutil.SplitFiltersFromFlag(labelFlag)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
cliutil.ValidateRuntimeLabelKey(strings.Split(label, "=")[0])
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
if _, exists := runtimeLabelFilterMap[label]; exists {
|
||||
runtimeLabelFilterMap[label] = append(runtimeLabelFilterMap[label], nodeFilters...)
|
||||
} else {
|
||||
runtimeLabelFilterMap[label] = nodeFilters
|
||||
}
|
||||
}
|
||||
|
||||
for label, nodeFilters := range runtimeLabelFilterMap {
|
||||
cfg.Options.Runtime.Labels = append(cfg.Options.Runtime.Labels, conf.LabelWithNodeFilters{
|
||||
Label: label,
|
||||
NodeFilters: nodeFilters,
|
||||
})
|
||||
}
|
||||
|
||||
l.Log().Tracef("RuntimeLabelFilterMap: %+v", runtimeLabelFilterMap)
|
||||
log.Tracef("LabelFilterMap: %+v", labelFilterMap)
|
||||
|
||||
// --env
|
||||
envFlags, err := cmd.Flags().GetStringArray("env")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// envFilterMap will add container env vars to applied node filters
|
||||
envFilterMap := make(map[string][]string, 1)
|
||||
for _, envFlag := range ppViper.GetStringSlice("cli.env") {
|
||||
for _, envFlag := range envFlags {
|
||||
|
||||
// split node filter from the specified env var
|
||||
env, filters, err := cliutil.SplitFiltersFromFlag(envFlag)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
@ -542,58 +370,134 @@ func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
}
|
||||
}
|
||||
|
||||
for envVar, nodeFilters := range envFilterMap {
|
||||
cfg.Env = append(cfg.Env, conf.EnvVarWithNodeFilters{
|
||||
EnvVar: envVar,
|
||||
NodeFilters: nodeFilters,
|
||||
})
|
||||
log.Tracef("EnvFilterMap: %+v", envFilterMap)
|
||||
|
||||
/********************
|
||||
* *
|
||||
* generate cluster *
|
||||
* *
|
||||
********************/
|
||||
|
||||
cluster := &k3d.Cluster{
|
||||
Name: clustername,
|
||||
Network: network,
|
||||
Token: token,
|
||||
CreateClusterOpts: createClusterOpts,
|
||||
ExposeAPI: exposeAPI,
|
||||
}
|
||||
|
||||
l.Log().Tracef("EnvFilterMap: %+v", envFilterMap)
|
||||
// generate list of nodes
|
||||
cluster.Nodes = []*k3d.Node{}
|
||||
|
||||
// --k3s-arg
|
||||
argFilterMap := make(map[string][]string, 1)
|
||||
for _, argFlag := range ppViper.GetStringSlice("cli.k3sargs") {
|
||||
// ServerLoadBalancer
|
||||
if !createClusterOpts.DisableLoadBalancer {
|
||||
cluster.ServerLoadBalancer = &k3d.Node{
|
||||
Role: k3d.LoadBalancerRole,
|
||||
}
|
||||
}
|
||||
|
||||
// split node filter from the specified arg
|
||||
arg, filters, err := cliutil.SplitFiltersFromFlag(argFlag)
|
||||
/****************
|
||||
* Server Nodes *
|
||||
****************/
|
||||
|
||||
for i := 0; i < serverCount; i++ {
|
||||
node := k3d.Node{
|
||||
Role: k3d.ServerRole,
|
||||
Image: image,
|
||||
Args: createClusterOpts.K3sServerArgs,
|
||||
ServerOpts: k3d.ServerOpts{},
|
||||
}
|
||||
|
||||
// TODO: by default, we don't expose an API port: should we change that?
|
||||
// -> if we want to change that, simply add the exposeAPI struct here
|
||||
|
||||
// first server node will be init node if we have more than one server specified but no external datastore
|
||||
if i == 0 && serverCount > 1 {
|
||||
node.ServerOpts.IsInit = true
|
||||
cluster.InitNode = &node
|
||||
}
|
||||
|
||||
// append node to list
|
||||
cluster.Nodes = append(cluster.Nodes, &node)
|
||||
}
|
||||
|
||||
/****************
|
||||
* Agent Nodes *
|
||||
****************/
|
||||
|
||||
for i := 0; i < agentCount; i++ {
|
||||
node := k3d.Node{
|
||||
Role: k3d.AgentRole,
|
||||
Image: image,
|
||||
Args: createClusterOpts.K3sAgentArgs,
|
||||
}
|
||||
|
||||
cluster.Nodes = append(cluster.Nodes, &node)
|
||||
}
|
||||
|
||||
// append volumes
|
||||
for volume, filters := range volumeFilterMap {
|
||||
nodes, err := cliutil.FilterNodes(cluster.Nodes, filters)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
if _, exists := argFilterMap[arg]; exists {
|
||||
argFilterMap[arg] = append(argFilterMap[arg], filters...)
|
||||
} else {
|
||||
argFilterMap[arg] = filters
|
||||
for _, node := range nodes {
|
||||
node.Volumes = append(node.Volumes, volume)
|
||||
}
|
||||
}
|
||||
|
||||
for arg, nodeFilters := range argFilterMap {
|
||||
cfg.Options.K3sOptions.ExtraArgs = append(cfg.Options.K3sOptions.ExtraArgs, conf.K3sArgWithNodeFilters{
|
||||
Arg: arg,
|
||||
NodeFilters: nodeFilters,
|
||||
})
|
||||
// append ports
|
||||
nodeCount := serverCount + agentCount
|
||||
nodeList := cluster.Nodes
|
||||
if !createClusterOpts.DisableLoadBalancer {
|
||||
nodeCount++
|
||||
nodeList = append(nodeList, cluster.ServerLoadBalancer)
|
||||
}
|
||||
for portmap, filters := range portFilterMap {
|
||||
if len(filters) == 0 && (nodeCount) > 1 {
|
||||
log.Fatalf("Malformed portmapping '%s' lacks a node filter, but there is more than one node (including the loadbalancer, if there is any).", portmap)
|
||||
}
|
||||
nodes, err := cliutil.FilterNodes(nodeList, filters)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
for _, node := range nodes {
|
||||
node.Ports = append(node.Ports, portmap)
|
||||
}
|
||||
}
|
||||
|
||||
// --registry-create
|
||||
if ppViper.IsSet("cli.registries.create") {
|
||||
flagvalue := ppViper.GetString("cli.registries.create")
|
||||
fvSplit := strings.SplitN(flagvalue, ":", 2)
|
||||
if cfg.Registries.Create == nil {
|
||||
cfg.Registries.Create = &conf.SimpleConfigRegistryCreateConfig{}
|
||||
// append labels
|
||||
for label, filters := range labelFilterMap {
|
||||
nodes, err := cliutil.FilterNodes(cluster.Nodes, filters)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
cfg.Registries.Create.Name = fvSplit[0]
|
||||
if len(fvSplit) > 1 {
|
||||
exposeAPI, err = cliutil.ParsePortExposureSpec(fvSplit[1], "1234") // internal port is unused after all
|
||||
if err != nil {
|
||||
return cfg, fmt.Errorf("failed to registry port spec: %w", err)
|
||||
for _, node := range nodes {
|
||||
// ensure node.Labels map is initialized (see also ClusterCreate.nodeSetup)
|
||||
if node.Labels == nil {
|
||||
node.Labels = make(map[string]string)
|
||||
}
|
||||
cfg.Registries.Create.Host = exposeAPI.Host
|
||||
cfg.Registries.Create.HostPort = exposeAPI.Binding.HostPort
|
||||
}
|
||||
|
||||
labelKey, labelValue := cliutil.SplitKV(label)
|
||||
node.Labels[labelKey] = labelValue
|
||||
}
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
// append env vars
|
||||
for env, filters := range envFilterMap {
|
||||
nodes, err := cliutil.FilterNodes(cluster.Nodes, filters)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
for _, node := range nodes {
|
||||
node.Env = append(node.Env, env)
|
||||
}
|
||||
}
|
||||
|
||||
/**********************
|
||||
* Utility Containers *
|
||||
**********************/
|
||||
// ...
|
||||
|
||||
return cluster
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -26,21 +26,16 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
cliconfig "github.com/rancher/k3d/v5/cmd/util/config"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
k3dutil "github.com/rancher/k3d/v5/pkg/util"
|
||||
"github.com/rancher/k3d/v3/cmd/util"
|
||||
"github.com/rancher/k3d/v3/pkg/cluster"
|
||||
"github.com/rancher/k3d/v3/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v3/pkg/types"
|
||||
k3dutil "github.com/rancher/k3d/v3/pkg/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var clusterDeleteConfigFile string
|
||||
var clusterDeleteCfgViper = viper.New()
|
||||
|
||||
// NewCmdClusterDelete returns a new cobra command
|
||||
func NewCmdClusterDelete() *cobra.Command {
|
||||
|
||||
@ -52,38 +47,35 @@ func NewCmdClusterDelete() *cobra.Command {
|
||||
Long: `Delete cluster(s).`,
|
||||
Args: cobra.MinimumNArgs(0), // 0 or n arguments; 0 = default cluster name
|
||||
ValidArgsFunction: util.ValidArgsAvailableClusters,
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cliconfig.InitViperWithConfigFile(clusterDeleteCfgViper, clusterDeleteConfigFile)
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
clusters := parseDeleteClusterCmd(cmd, args)
|
||||
|
||||
if len(clusters) == 0 {
|
||||
l.Log().Infoln("No clusters found")
|
||||
log.Infoln("No clusters found")
|
||||
} else {
|
||||
for _, c := range clusters {
|
||||
if err := client.ClusterDelete(cmd.Context(), runtimes.SelectedRuntime, c, k3d.ClusterDeleteOpts{SkipRegistryCheck: false}); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
if err := cluster.ClusterDelete(cmd.Context(), runtimes.SelectedRuntime, c); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
l.Log().Infoln("Removing cluster details from default kubeconfig...")
|
||||
if err := client.KubeconfigRemoveClusterFromDefaultConfig(cmd.Context(), c); err != nil {
|
||||
l.Log().Warnln("Failed to remove cluster details from default kubeconfig")
|
||||
l.Log().Warnln(err)
|
||||
log.Infoln("Removing cluster details from default kubeconfig...")
|
||||
if err := cluster.KubeconfigRemoveClusterFromDefaultConfig(cmd.Context(), c); err != nil {
|
||||
log.Warnln("Failed to remove cluster details from default kubeconfig")
|
||||
log.Warnln(err)
|
||||
}
|
||||
l.Log().Infoln("Removing standalone kubeconfig file (if there is one)...")
|
||||
log.Infoln("Removing standalone kubeconfig file (if there is one)...")
|
||||
configDir, err := k3dutil.GetConfigDirOrCreate()
|
||||
if err != nil {
|
||||
l.Log().Warnf("Failed to delete kubeconfig file: %+v", err)
|
||||
log.Warnf("Failed to delete kubeconfig file: %+v", err)
|
||||
} else {
|
||||
kubeconfigfile := path.Join(configDir, fmt.Sprintf("kubeconfig-%s.yaml", c.Name))
|
||||
if err := os.Remove(kubeconfigfile); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
l.Log().Warnf("Failed to delete kubeconfig file '%s'", kubeconfigfile)
|
||||
log.Warnf("Failed to delete kubeconfig file '%s'", kubeconfigfile)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
l.Log().Infof("Successfully deleted cluster %s!", c.Name)
|
||||
log.Infof("Successfully deleted cluster %s!", c.Name)
|
||||
}
|
||||
}
|
||||
|
||||
@ -95,15 +87,6 @@ func NewCmdClusterDelete() *cobra.Command {
|
||||
// add flags
|
||||
cmd.Flags().BoolP("all", "a", false, "Delete all existing clusters")
|
||||
|
||||
/***************
|
||||
* Config File *
|
||||
***************/
|
||||
|
||||
cmd.Flags().StringVarP(&clusterDeleteConfigFile, "config", "c", "", "Path of a config file to use")
|
||||
if err := cmd.MarkFlagFilename("config", "yaml", "yml"); err != nil {
|
||||
l.Log().Fatalln("Failed to mark flag 'config' as filename flag")
|
||||
}
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
@ -111,57 +94,31 @@ func NewCmdClusterDelete() *cobra.Command {
|
||||
// parseDeleteClusterCmd parses the command input into variables required to delete clusters
|
||||
func parseDeleteClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
|
||||
// --all
|
||||
var clusters []*k3d.Cluster
|
||||
|
||||
// --all
|
||||
all, err := cmd.Flags().GetBool("all")
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// --config
|
||||
if clusterDeleteConfigFile != "" {
|
||||
// not allowed with --all or more args
|
||||
if len(args) > 0 || all {
|
||||
l.Log().Fatalln("failed to delete cluster: cannot use `--config` flag with additional arguments or `--all`")
|
||||
}
|
||||
|
||||
if clusterDeleteCfgViper.GetString("name") == "" {
|
||||
l.Log().Fatalln("failed to delete cluster via config file: no name in config file")
|
||||
}
|
||||
|
||||
c, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterDeleteCfgViper.GetString("name")})
|
||||
if all, err := cmd.Flags().GetBool("all"); err != nil {
|
||||
log.Fatalln(err)
|
||||
} else if all {
|
||||
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Fatalf("failed to delete cluster '%s': %v", clusterDeleteCfgViper.GetString("name"), err)
|
||||
}
|
||||
|
||||
clusters = append(clusters, c)
|
||||
return clusters
|
||||
}
|
||||
|
||||
// --all was set
|
||||
if all {
|
||||
l.Log().Infoln("Deleting all clusters...")
|
||||
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
return clusters
|
||||
}
|
||||
|
||||
// args only
|
||||
clusternames := []string{k3d.DefaultClusterName}
|
||||
if len(args) != 0 {
|
||||
clusternames = args
|
||||
}
|
||||
|
||||
for _, name := range clusternames {
|
||||
c, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
c, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
if err != nil {
|
||||
if err == client.ClusterGetNoNodesFoundError {
|
||||
if err == cluster.ClusterGetNoNodesFoundError {
|
||||
continue
|
||||
}
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, c)
|
||||
}
|
||||
|
@ -1,124 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
cliutil "github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
conf "github.com/rancher/k3d/v5/pkg/config/v1alpha3"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCmdClusterEdit returns a new cobra command
|
||||
func NewCmdClusterEdit() *cobra.Command {
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "edit CLUSTER",
|
||||
Short: "[EXPERIMENTAL] Edit cluster(s).",
|
||||
Long: `[EXPERIMENTAL] Edit cluster(s).`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
Aliases: []string{"update"},
|
||||
ValidArgsFunction: util.ValidArgsAvailableClusters,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
existingCluster, changeset := parseEditClusterCmd(cmd, args)
|
||||
|
||||
l.Log().Debugf("===== Current =====\n%+v\n===== Changeset =====\n%+v\n", existingCluster, changeset)
|
||||
|
||||
if err := client.ClusterEditChangesetSimple(cmd.Context(), runtimes.SelectedRuntime, existingCluster, changeset); err != nil {
|
||||
l.Log().Fatalf("Failed to update the cluster: %v", err)
|
||||
}
|
||||
|
||||
l.Log().Infof("Successfully updated %s", existingCluster.Name)
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
|
||||
// add flags
|
||||
cmd.Flags().StringArray("port-add", nil, "[EXPERIMENTAL] Map ports from the node containers (via the serverlb) to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d node edit k3d-mycluster-serverlb --port-add 8080:80`")
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
|
||||
// parseEditClusterCmd parses the command input into variables required to delete nodes
|
||||
func parseEditClusterCmd(cmd *cobra.Command, args []string) (*k3d.Cluster, *conf.SimpleConfig) {
|
||||
|
||||
existingCluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: args[0]})
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
if existingCluster == nil {
|
||||
l.Log().Infof("Cluster %s not found", args[0])
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
changeset := conf.SimpleConfig{}
|
||||
|
||||
/*
|
||||
* --port-add
|
||||
*/
|
||||
portFlags, err := cmd.Flags().GetStringArray("port-add")
|
||||
if err != nil {
|
||||
l.Log().Errorln(err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// init portmap
|
||||
changeset.Ports = []conf.PortWithNodeFilters{}
|
||||
|
||||
portFilterMap := make(map[string][]string, 1)
|
||||
for _, portFlag := range portFlags {
|
||||
|
||||
// split node filter from the specified volume
|
||||
portmap, filters, err := cliutil.SplitFiltersFromFlag(portFlag)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
if _, exists := portFilterMap[portmap]; exists {
|
||||
l.Log().Fatalln("Same Portmapping can not be used for multiple nodes")
|
||||
} else {
|
||||
portFilterMap[portmap] = filters
|
||||
}
|
||||
}
|
||||
|
||||
for port, nodeFilters := range portFilterMap {
|
||||
changeset.Ports = append(changeset.Ports, conf.PortWithNodeFilters{
|
||||
Port: port,
|
||||
NodeFilters: nodeFilters,
|
||||
})
|
||||
}
|
||||
|
||||
l.Log().Tracef("PortFilterMap: %+v", portFilterMap)
|
||||
|
||||
return existingCluster, &changeset
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -28,14 +28,15 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
k3cluster "github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/rancher/k3d/v3/cmd/util"
|
||||
k3cluster "github.com/rancher/k3d/v3/pkg/cluster"
|
||||
"github.com/rancher/k3d/v3/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v3/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/liggitt/tabwriter"
|
||||
)
|
||||
|
||||
@ -82,14 +83,14 @@ func buildClusterList(ctx context.Context, args []string) []*k3d.Cluster {
|
||||
// cluster name not specified : get all clusters
|
||||
clusters, err = k3cluster.ClusterList(ctx, runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
for _, clusterName := range args {
|
||||
// cluster name specified : get specific cluster
|
||||
retrievedCluster, err := k3cluster.ClusterGet(ctx, runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, retrievedCluster)
|
||||
}
|
||||
@ -125,7 +126,7 @@ func PrintClusters(clusters []*k3d.Cluster, flags clusterFlags) {
|
||||
}
|
||||
_, err := fmt.Fprintf(tabwriter, "%s\n", strings.Join(headers, "\t"))
|
||||
if err != nil {
|
||||
l.Log().Fatalln("Failed to print headers")
|
||||
log.Fatalln("Failed to print headers")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -24,22 +24,21 @@ package cluster
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/rancher/k3d/v3/cmd/util"
|
||||
"github.com/rancher/k3d/v3/pkg/cluster"
|
||||
"github.com/rancher/k3d/v3/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v3/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
k3d "github.com/rancher/k3d/v3/pkg/types"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewCmdClusterStart returns a new cobra command
|
||||
func NewCmdClusterStart() *cobra.Command {
|
||||
|
||||
startClusterOpts := types.ClusterStartOpts{
|
||||
Intent: k3d.IntentClusterStart,
|
||||
}
|
||||
startClusterOpts := types.ClusterStartOpts{}
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
@ -50,18 +49,12 @@ func NewCmdClusterStart() *cobra.Command {
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
clusters := parseStartClusterCmd(cmd, args)
|
||||
if len(clusters) == 0 {
|
||||
l.Log().Infoln("No clusters found")
|
||||
log.Infoln("No clusters found")
|
||||
} else {
|
||||
for _, c := range clusters {
|
||||
envInfo, err := client.GatherEnvironmentInfo(cmd.Context(), runtimes.SelectedRuntime, c)
|
||||
if err != nil {
|
||||
l.Log().Fatalf("failed to gather info about cluster environment: %v", err)
|
||||
if err := cluster.ClusterStart(cmd.Context(), runtimes.SelectedRuntime, c, startClusterOpts); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
startClusterOpts.EnvironmentInfo = envInfo
|
||||
if err := client.ClusterStart(cmd.Context(), runtimes.SelectedRuntime, c, startClusterOpts); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
l.Log().Infof("Started cluster '%s'", c.Name)
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -69,7 +62,7 @@ func NewCmdClusterStart() *cobra.Command {
|
||||
|
||||
// add flags
|
||||
cmd.Flags().BoolP("all", "a", false, "Start all existing clusters")
|
||||
cmd.Flags().BoolVar(&startClusterOpts.WaitForServer, "wait", true, "Wait for the server(s) (and loadbalancer) to be ready before returning.")
|
||||
cmd.Flags().BoolVar(&startClusterOpts.WaitForServer, "wait", false, "Wait for the server(s) (and loadbalancer) to be ready before returning.")
|
||||
cmd.Flags().DurationVar(&startClusterOpts.Timeout, "timeout", 0*time.Second, "Maximum waiting time for '--wait' before canceling/returning.")
|
||||
|
||||
// add subcommands
|
||||
@ -84,11 +77,11 @@ func parseStartClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
var clusters []*k3d.Cluster
|
||||
|
||||
if all, err := cmd.Flags().GetBool("all"); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
} else if all {
|
||||
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
return clusters
|
||||
}
|
||||
@ -99,9 +92,9 @@ func parseStartClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
}
|
||||
|
||||
for _, name := range clusternames {
|
||||
cluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
cluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, cluster)
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -24,11 +24,12 @@ package cluster
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/rancher/k3d/v3/cmd/util"
|
||||
"github.com/rancher/k3d/v3/pkg/cluster"
|
||||
"github.com/rancher/k3d/v3/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v3/pkg/types"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewCmdClusterStop returns a new cobra command
|
||||
@ -43,11 +44,11 @@ func NewCmdClusterStop() *cobra.Command {
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
clusters := parseStopClusterCmd(cmd, args)
|
||||
if len(clusters) == 0 {
|
||||
l.Log().Infoln("No clusters found")
|
||||
log.Infoln("No clusters found")
|
||||
} else {
|
||||
for _, c := range clusters {
|
||||
if err := client.ClusterStop(cmd.Context(), runtimes.SelectedRuntime, c); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
if err := cluster.ClusterStop(cmd.Context(), runtimes.SelectedRuntime, c); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -55,7 +56,7 @@ func NewCmdClusterStop() *cobra.Command {
|
||||
}
|
||||
|
||||
// add flags
|
||||
cmd.Flags().BoolP("all", "a", false, "Stop all existing clusters")
|
||||
cmd.Flags().BoolP("all", "a", false, "Start all existing clusters")
|
||||
|
||||
// add subcommands
|
||||
|
||||
@ -69,11 +70,11 @@ func parseStopClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
var clusters []*k3d.Cluster
|
||||
|
||||
if all, err := cmd.Flags().GetBool("all"); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
} else if all {
|
||||
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
return clusters
|
||||
}
|
||||
@ -84,9 +85,9 @@ func parseStopClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
}
|
||||
|
||||
for _, name := range clusternames {
|
||||
cluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
cluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, cluster)
|
||||
}
|
||||
|
@ -1,47 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package config
|
||||
|
||||
import (
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCmdConfig returns a new cobra command
|
||||
func NewCmdConfig() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "config",
|
||||
Short: "Work with config file(s)",
|
||||
Long: `Work with config file(s)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
cmd.AddCommand(NewCmdConfigInit(), NewCmdConfigMigrate())
|
||||
|
||||
return cmd
|
||||
}
|
@ -1,77 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
config "github.com/rancher/k3d/v5/pkg/config/v1alpha3"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCmdConfigInit returns a new cobra command
|
||||
func NewCmdConfigInit() *cobra.Command {
|
||||
var output string
|
||||
var force bool
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "init",
|
||||
Aliases: []string{"create"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
l.Log().Infoln("COMING SOON: print a basic k3d config with default pre-filled.")
|
||||
if output == "-" {
|
||||
fmt.Println(config.DefaultConfig)
|
||||
} else {
|
||||
// check if file exists
|
||||
var file *os.File
|
||||
var err error
|
||||
_, err = os.Stat(output)
|
||||
if os.IsNotExist(err) || force {
|
||||
// create/overwrite file
|
||||
file, err = os.Create(output)
|
||||
if err != nil {
|
||||
l.Log().Fatalf("Failed to create/overwrite output file: %s", err)
|
||||
}
|
||||
// write content
|
||||
if _, err = file.WriteString(config.DefaultConfig); err != nil {
|
||||
l.Log().Fatalf("Failed to write to output file: %+v", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
l.Log().Fatalf("Failed to stat output file: %+v", err)
|
||||
} else {
|
||||
l.Log().Errorln("Output file exists and --force was not set")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().StringVarP(&output, "output", "o", "k3d-default.yaml", "Write a default k3d config")
|
||||
if err := cmd.MarkFlagFilename("output", "yaml", "yml"); err != nil {
|
||||
l.Log().Fatalf("Failed to mark flag 'output' as filename flag: %v", err)
|
||||
}
|
||||
cmd.Flags().BoolVarP(&force, "force", "f", false, "Force overwrite of target file")
|
||||
|
||||
return cmd
|
||||
}
|
@ -1,112 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/k3d/v5/pkg/config"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// NewCmdConfigMigrate returns a new cobra command
|
||||
func NewCmdConfigMigrate() *cobra.Command {
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "migrate INPUT [OUTPUT]",
|
||||
Aliases: []string{"update"},
|
||||
Args: cobra.RangeArgs(1, 2),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
configFile := args[0]
|
||||
|
||||
if _, err := os.Stat(configFile); err != nil {
|
||||
l.Log().Fatalf("Failed to stat config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
cfgViper := viper.New()
|
||||
cfgViper.SetConfigType("yaml")
|
||||
|
||||
cfgViper.SetConfigFile(configFile)
|
||||
|
||||
// try to read config into memory (viper map structure)
|
||||
if err := cfgViper.ReadInConfig(); err != nil {
|
||||
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
|
||||
l.Log().Fatalf("Config file %s not found: %+v", configFile, err)
|
||||
}
|
||||
// config file found but some other error happened
|
||||
l.Log().Fatalf("Failed to read config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
schema, err := config.GetSchemaByVersion(cfgViper.GetString("apiVersion"))
|
||||
if err != nil {
|
||||
l.Log().Fatalf("Cannot validate config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
if err := config.ValidateSchemaFile(configFile, schema); err != nil {
|
||||
l.Log().Fatalf("Schema Validation failed for config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
l.Log().Infof("Using config file %s (%s#%s)", cfgViper.ConfigFileUsed(), strings.ToLower(cfgViper.GetString("apiVersion")), strings.ToLower(cfgViper.GetString("kind")))
|
||||
|
||||
cfg, err := config.FromViper(cfgViper)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
if cfg.GetAPIVersion() != config.DefaultConfigApiVersion {
|
||||
cfg, err = config.Migrate(cfg, config.DefaultConfigApiVersion)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
yamlout, err := yaml.Marshal(cfg)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
output := "-"
|
||||
|
||||
if len(args) > 1 {
|
||||
output = args[1]
|
||||
}
|
||||
|
||||
if output == "-" {
|
||||
if _, err := os.Stdout.Write(yamlout); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
if err := os.WriteFile(output, yamlout, os.ModePerm); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
@ -1,41 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// NewCmdConfig returns a new cobra command
|
||||
func NewCmdConfigView() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "view",
|
||||
Aliases: []string{"show"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Printf("%+v", viper.AllSettings())
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
@ -1,93 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package debug
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// NewCmdDebug returns a new cobra command
|
||||
func NewCmdDebug() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "debug",
|
||||
Hidden: true,
|
||||
Short: "Debug k3d cluster(s)",
|
||||
Long: `Debug k3d cluster(s)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
cmd.AddCommand(NewCmdDebugLoadbalancer())
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func NewCmdDebugLoadbalancer() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "loadbalancer",
|
||||
Aliases: []string{"lb"},
|
||||
Short: "Debug the loadbalancer",
|
||||
Long: `Debug the loadbalancer`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
cmd.AddCommand(&cobra.Command{
|
||||
Use: "get-config CLUSTERNAME",
|
||||
Args: cobra.ExactArgs(1), // cluster name
|
||||
ValidArgsFunction: util.ValidArgsAvailableClusters,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
c, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &types.Cluster{Name: args[0]})
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
lbconf, err := client.GetLoadbalancerConfig(cmd.Context(), runtimes.SelectedRuntime, c)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
yamlized, err := yaml.Marshal(lbconf)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
fmt.Println(string(yamlized))
|
||||
},
|
||||
})
|
||||
|
||||
return cmd
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -22,7 +22,7 @@ THE SOFTWARE.
|
||||
package image
|
||||
|
||||
import (
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -31,14 +31,13 @@ func NewCmdImage() *cobra.Command {
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "image",
|
||||
Aliases: []string{"images"},
|
||||
Short: "Handle container images.",
|
||||
Long: `Handle container images.`,
|
||||
Use: "image",
|
||||
Short: "Handle container images.",
|
||||
Long: `Handle container images.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
log.Errorln("Couldn't get help text")
|
||||
log.Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -26,11 +26,12 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/rancher/k3d/v3/cmd/util"
|
||||
"github.com/rancher/k3d/v3/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v3/pkg/tools"
|
||||
k3d "github.com/rancher/k3d/v3/pkg/types"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewCmdImageImport returns a new cobra command
|
||||
@ -40,39 +41,27 @@ func NewCmdImageImport() *cobra.Command {
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "import [IMAGE | ARCHIVE [IMAGE | ARCHIVE...]]",
|
||||
Short: "Import image(s) from docker into k3d cluster(s).",
|
||||
Long: `Import image(s) from docker into k3d cluster(s).
|
||||
|
||||
If an IMAGE starts with the prefix 'docker.io/', then this prefix is stripped internally.
|
||||
That is, 'docker.io/rancher/k3d-tools:latest' is treated as 'rancher/k3d-tools:latest'.
|
||||
|
||||
If an IMAGE starts with the prefix 'library/' (or 'docker.io/library/'), then this prefix is stripped internally.
|
||||
That is, 'library/busybox:latest' (or 'docker.io/library/busybox:latest') are treated as 'busybox:latest'.
|
||||
|
||||
If an IMAGE does not have a version tag, then ':latest' is assumed.
|
||||
That is, 'rancher/k3d-tools' is treated as 'rancher/k3d-tools:latest'.
|
||||
|
||||
A file ARCHIVE always takes precedence.
|
||||
So if a file './rancher/k3d-tools' exists, k3d will try to import it instead of the IMAGE of the same name.`,
|
||||
Aliases: []string{"load"},
|
||||
Use: "import [IMAGE | ARCHIVE [IMAGE | ARCHIVE...]]",
|
||||
Short: "Import image(s) from docker into k3d cluster(s).",
|
||||
Long: `Import image(s) from docker into k3d cluster(s).`,
|
||||
Aliases: []string{"images"},
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
images, clusters := parseLoadImageCmd(cmd, args)
|
||||
l.Log().Debugf("Importing image(s) [%+v] from runtime [%s] into cluster(s) [%+v]...", images, runtimes.SelectedRuntime, clusters)
|
||||
log.Debugf("Importing image(s) [%+v] from runtime [%s] into cluster(s) [%+v]...", images, runtimes.SelectedRuntime, clusters)
|
||||
errOccured := false
|
||||
for _, cluster := range clusters {
|
||||
l.Log().Infof("Importing image(s) into cluster '%s'", cluster.Name)
|
||||
if err := client.ImageImportIntoClusterMulti(cmd.Context(), runtimes.SelectedRuntime, images, &cluster, loadImageOpts); err != nil {
|
||||
l.Log().Errorf("Failed to import image(s) into cluster '%s': %+v", cluster.Name, err)
|
||||
log.Infof("Importing image(s) into cluster '%s'", cluster.Name)
|
||||
if err := tools.ImageImportIntoClusterMulti(cmd.Context(), runtimes.SelectedRuntime, images, &cluster, loadImageOpts); err != nil {
|
||||
log.Errorf("Failed to import image(s) into cluster '%s': %+v", cluster.Name, err)
|
||||
errOccured = true
|
||||
}
|
||||
}
|
||||
if errOccured {
|
||||
l.Log().Warnln("At least one error occured while trying to import the image(s) into the selected cluster(s)")
|
||||
log.Warnln("At least one error occured while trying to import the image(s) into the selected cluster(s)")
|
||||
os.Exit(1)
|
||||
}
|
||||
l.Log().Infof("Successfully imported %d image(s) into %d cluster(s)", len(images), len(clusters))
|
||||
log.Infof("Successfully imported %d image(s) into %d cluster(s)", len(images), len(clusters))
|
||||
},
|
||||
}
|
||||
|
||||
@ -81,11 +70,10 @@ So if a file './rancher/k3d-tools' exists, k3d will try to import it instead of
|
||||
*********/
|
||||
cmd.Flags().StringArrayP("cluster", "c", []string{k3d.DefaultClusterName}, "Select clusters to load the image to.")
|
||||
if err := cmd.RegisterFlagCompletionFunc("cluster", util.ValidArgsAvailableClusters); err != nil {
|
||||
l.Log().Fatalln("Failed to register flag completion for '--cluster'", err)
|
||||
log.Fatalln("Failed to register flag completion for '--cluster'", err)
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVarP(&loadImageOpts.KeepTar, "keep-tarball", "k", false, "Do not delete the tarball containing the saved images from the shared volume")
|
||||
cmd.Flags().BoolVarP(&loadImageOpts.KeepToolsNode, "keep-tools", "t", false, "Do not delete the tools node after import")
|
||||
|
||||
/* Subcommands */
|
||||
|
||||
@ -99,7 +87,7 @@ func parseLoadImageCmd(cmd *cobra.Command, args []string) ([]string, []k3d.Clust
|
||||
// --cluster
|
||||
clusterNames, err := cmd.Flags().GetStringArray("cluster")
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
clusters := []k3d.Cluster{}
|
||||
for _, clusterName := range clusterNames {
|
||||
@ -109,7 +97,7 @@ func parseLoadImageCmd(cmd *cobra.Command, args []string) ([]string, []k3d.Clust
|
||||
// images
|
||||
images := args
|
||||
if len(images) == 0 {
|
||||
l.Log().Fatalln("No images specified!")
|
||||
log.Fatalln("No images specified!")
|
||||
}
|
||||
|
||||
return images, clusters
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -22,7 +22,7 @@ THE SOFTWARE.
|
||||
package kubeconfig
|
||||
|
||||
import (
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -36,14 +36,15 @@ func NewCmdKubeconfig() *cobra.Command {
|
||||
Long: `Manage kubeconfig(s)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
log.Errorln("Couldn't get help text")
|
||||
log.Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
cmd.AddCommand(NewCmdKubeconfigGet(), NewCmdKubeconfigMerge())
|
||||
cmd.AddCommand(NewCmdKubeconfigGet())
|
||||
cmd.AddCommand(NewCmdKubeconfigMerge())
|
||||
|
||||
// add flags
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -25,12 +25,13 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/rancher/k3d/v3/cmd/util"
|
||||
"github.com/rancher/k3d/v3/pkg/cluster"
|
||||
"github.com/rancher/k3d/v3/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v3/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type getKubeconfigFlags struct {
|
||||
@ -40,7 +41,7 @@ type getKubeconfigFlags struct {
|
||||
// NewCmdKubeconfigGet returns a new cobra command
|
||||
func NewCmdKubeconfigGet() *cobra.Command {
|
||||
|
||||
writeKubeConfigOptions := client.WriteKubeConfigOptions{
|
||||
writeKubeConfigOptions := cluster.WriteKubeConfigOptions{
|
||||
UpdateExisting: true,
|
||||
UpdateCurrentContext: true,
|
||||
OverwriteExisting: true,
|
||||
@ -67,15 +68,15 @@ func NewCmdKubeconfigGet() *cobra.Command {
|
||||
|
||||
// generate list of clusters
|
||||
if getKubeconfigFlags.all {
|
||||
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
for _, clusterName := range args {
|
||||
retrievedCluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
retrievedCluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, retrievedCluster)
|
||||
}
|
||||
@ -84,10 +85,10 @@ func NewCmdKubeconfigGet() *cobra.Command {
|
||||
// get kubeconfigs from all clusters
|
||||
errorGettingKubeconfig := false
|
||||
for _, c := range clusters {
|
||||
l.Log().Debugf("Getting kubeconfig for cluster '%s'", c.Name)
|
||||
log.Debugf("Getting kubeconfig for cluster '%s'", c.Name)
|
||||
fmt.Println("---") // YAML document separator
|
||||
if _, err := client.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, c, "-", &writeKubeConfigOptions); err != nil {
|
||||
l.Log().Errorln(err)
|
||||
if _, err := cluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, c, "-", &writeKubeConfigOptions); err != nil {
|
||||
log.Errorln(err)
|
||||
errorGettingKubeconfig = true
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -27,14 +27,15 @@ import (
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
k3dutil "github.com/rancher/k3d/v5/pkg/util"
|
||||
"github.com/rancher/k3d/v3/cmd/util"
|
||||
"github.com/rancher/k3d/v3/pkg/cluster"
|
||||
"github.com/rancher/k3d/v3/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v3/pkg/types"
|
||||
k3dutil "github.com/rancher/k3d/v3/pkg/util"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type mergeKubeconfigFlags struct {
|
||||
@ -46,7 +47,7 @@ type mergeKubeconfigFlags struct {
|
||||
// NewCmdKubeconfigMerge returns a new cobra command
|
||||
func NewCmdKubeconfigMerge() *cobra.Command {
|
||||
|
||||
writeKubeConfigOptions := client.WriteKubeConfigOptions{}
|
||||
writeKubeConfigOptions := cluster.WriteKubeConfigOptions{}
|
||||
|
||||
mergeKubeconfigFlags := mergeKubeconfigFlags{}
|
||||
|
||||
@ -63,14 +64,14 @@ func NewCmdKubeconfigMerge() *cobra.Command {
|
||||
var err error
|
||||
|
||||
if mergeKubeconfigFlags.targetDefault && mergeKubeconfigFlags.output != "" {
|
||||
l.Log().Fatalln("Cannot use both '--output' and '--kubeconfig-merge-default' at the same time")
|
||||
log.Fatalln("Cannot use both '--output' and '--merge-default-kubeconfig' at the same time")
|
||||
}
|
||||
|
||||
// generate list of clusters
|
||||
if mergeKubeconfigFlags.all {
|
||||
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
|
||||
@ -80,9 +81,9 @@ func NewCmdKubeconfigMerge() *cobra.Command {
|
||||
}
|
||||
|
||||
for _, clusterName := range clusternames {
|
||||
retrievedCluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
retrievedCluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, retrievedCluster)
|
||||
}
|
||||
@ -93,18 +94,18 @@ func NewCmdKubeconfigMerge() *cobra.Command {
|
||||
var outputs []string
|
||||
outputDir, err := k3dutil.GetConfigDirOrCreate()
|
||||
if err != nil {
|
||||
l.Log().Errorln(err)
|
||||
l.Log().Fatalln("Failed to save kubeconfig to local directory")
|
||||
log.Errorln(err)
|
||||
log.Fatalln("Failed to save kubeconfig to local directory")
|
||||
}
|
||||
for _, c := range clusters {
|
||||
l.Log().Debugf("Getting kubeconfig for cluster '%s'", c.Name)
|
||||
log.Debugf("Getting kubeconfig for cluster '%s'", c.Name)
|
||||
output := mergeKubeconfigFlags.output
|
||||
if output == "" && !mergeKubeconfigFlags.targetDefault {
|
||||
output = path.Join(outputDir, fmt.Sprintf("kubeconfig-%s.yaml", c.Name))
|
||||
}
|
||||
output, err = client.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, c, output, &writeKubeConfigOptions)
|
||||
output, err = cluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, c, output, &writeKubeConfigOptions)
|
||||
if err != nil {
|
||||
l.Log().Errorln(err)
|
||||
log.Errorln(err)
|
||||
errorGettingKubeconfig = true
|
||||
} else {
|
||||
outputs = append(outputs, output)
|
||||
@ -126,11 +127,11 @@ func NewCmdKubeconfigMerge() *cobra.Command {
|
||||
// add flags
|
||||
cmd.Flags().StringVarP(&mergeKubeconfigFlags.output, "output", "o", "", fmt.Sprintf("Define output [ - | FILE ] (default from $KUBECONFIG or %s", clientcmd.RecommendedHomeFile))
|
||||
if err := cmd.MarkFlagFilename("output"); err != nil {
|
||||
l.Log().Fatalln("Failed to mark flag --output as filename")
|
||||
log.Fatalln("Failed to mark flag --output as filename")
|
||||
}
|
||||
cmd.Flags().BoolVarP(&mergeKubeconfigFlags.targetDefault, "kubeconfig-merge-default", "d", false, fmt.Sprintf("Merge into the default kubeconfig ($KUBECONFIG or %s)", clientcmd.RecommendedHomeFile))
|
||||
cmd.Flags().BoolVarP(&mergeKubeconfigFlags.targetDefault, "merge-default-kubeconfig", "d", false, fmt.Sprintf("Merge into the default kubeconfig ($KUBECONFIG or %s)", clientcmd.RecommendedHomeFile))
|
||||
cmd.Flags().BoolVarP(&writeKubeConfigOptions.UpdateExisting, "update", "u", true, "Update conflicting fields in existing kubeconfig")
|
||||
cmd.Flags().BoolVarP(&writeKubeConfigOptions.UpdateCurrentContext, "kubeconfig-switch-context", "s", true, "Switch to new context")
|
||||
cmd.Flags().BoolVarP(&writeKubeConfigOptions.UpdateCurrentContext, "switch-context", "s", true, "Switch to new context")
|
||||
cmd.Flags().BoolVar(&writeKubeConfigOptions.OverwriteExisting, "overwrite", false, "[Careful!] Overwrite existing file, ignoring its contents")
|
||||
cmd.Flags().BoolVarP(&mergeKubeconfigFlags.all, "all", "a", false, "Get kubeconfigs from all existing clusters")
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -22,7 +22,7 @@ THE SOFTWARE.
|
||||
package node
|
||||
|
||||
import (
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -36,19 +36,18 @@ func NewCmdNode() *cobra.Command {
|
||||
Long: `Manage node(s)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
log.Errorln("Couldn't get help text")
|
||||
log.Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
cmd.AddCommand(NewCmdNodeCreate(),
|
||||
NewCmdNodeStart(),
|
||||
NewCmdNodeStop(),
|
||||
NewCmdNodeDelete(),
|
||||
NewCmdNodeList(),
|
||||
NewCmdNodeEdit())
|
||||
cmd.AddCommand(NewCmdNodeCreate())
|
||||
cmd.AddCommand(NewCmdNodeStart())
|
||||
cmd.AddCommand(NewCmdNodeStop())
|
||||
cmd.AddCommand(NewCmdNodeDelete())
|
||||
cmd.AddCommand(NewCmdNodeList())
|
||||
|
||||
// add flags
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -23,19 +23,16 @@ package node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
dockerunits "github.com/docker/go-units"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
cliutil "github.com/rancher/k3d/v5/cmd/util"
|
||||
k3dc "github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/rancher/k3d/v5/version"
|
||||
"github.com/rancher/k3d/v3/cmd/util"
|
||||
k3dc "github.com/rancher/k3d/v3/pkg/cluster"
|
||||
"github.com/rancher/k3d/v3/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v3/pkg/types"
|
||||
"github.com/rancher/k3d/v3/version"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewCmdNodeCreate returns a new cobra command
|
||||
@ -50,19 +47,11 @@ func NewCmdNodeCreate() *cobra.Command {
|
||||
Long: `Create a new containerized k3s node (k3s in docker).`,
|
||||
Args: cobra.ExactArgs(1), // exactly one name accepted // TODO: if not specified, inherit from cluster that the node shall belong to, if that is specified
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
nodes, clusterName := parseCreateNodeCmd(cmd, args)
|
||||
if strings.HasPrefix(clusterName, "https://") {
|
||||
l.Log().Infof("Adding %d node(s) to the remote cluster '%s'...", len(nodes), clusterName)
|
||||
if err := k3dc.NodeAddToClusterMultiRemote(cmd.Context(), runtimes.SelectedRuntime, nodes, clusterName, createNodeOpts); err != nil {
|
||||
l.Log().Fatalf("failed to add %d node(s) to the remote cluster '%s': %v", len(nodes), clusterName, err)
|
||||
}
|
||||
} else {
|
||||
l.Log().Infof("Adding %d node(s) to the runtime local cluster '%s'...", len(nodes), clusterName)
|
||||
if err := k3dc.NodeAddToClusterMulti(cmd.Context(), runtimes.SelectedRuntime, nodes, &k3d.Cluster{Name: clusterName}, createNodeOpts); err != nil {
|
||||
l.Log().Fatalf("failed to add %d node(s) to the runtime local cluster '%s': %v", len(nodes), clusterName, err)
|
||||
}
|
||||
nodes, cluster := parseCreateNodeCmd(cmd, args)
|
||||
if err := k3dc.NodeAddToClusterMulti(cmd.Context(), runtimes.SelectedRuntime, nodes, cluster, createNodeOpts); err != nil {
|
||||
log.Errorf("Failed to add nodes to cluster '%s'", cluster.Name)
|
||||
log.Errorln(err)
|
||||
}
|
||||
l.Log().Infof("Successfully created %d node(s)!", len(nodes))
|
||||
},
|
||||
}
|
||||
|
||||
@ -70,131 +59,73 @@ func NewCmdNodeCreate() *cobra.Command {
|
||||
cmd.Flags().Int("replicas", 1, "Number of replicas of this node specification.")
|
||||
cmd.Flags().String("role", string(k3d.AgentRole), "Specify node role [server, agent]")
|
||||
if err := cmd.RegisterFlagCompletionFunc("role", util.ValidArgsNodeRoles); err != nil {
|
||||
l.Log().Fatalln("Failed to register flag completion for '--role'", err)
|
||||
log.Fatalln("Failed to register flag completion for '--role'", err)
|
||||
}
|
||||
cmd.Flags().StringP("cluster", "c", k3d.DefaultClusterName, "Cluster URL or k3d cluster name to connect to.")
|
||||
cmd.Flags().StringP("cluster", "c", k3d.DefaultClusterName, "Select the cluster that the node shall connect to.")
|
||||
if err := cmd.RegisterFlagCompletionFunc("cluster", util.ValidArgsAvailableClusters); err != nil {
|
||||
l.Log().Fatalln("Failed to register flag completion for '--cluster'", err)
|
||||
log.Fatalln("Failed to register flag completion for '--cluster'", err)
|
||||
}
|
||||
|
||||
cmd.Flags().StringP("image", "i", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), "Specify k3s image used for the node(s)")
|
||||
cmd.Flags().String("memory", "", "Memory limit imposed on the node [From docker]")
|
||||
|
||||
cmd.Flags().BoolVar(&createNodeOpts.Wait, "wait", true, "Wait for the node(s) to be ready before returning.")
|
||||
cmd.Flags().BoolVar(&createNodeOpts.Wait, "wait", false, "Wait for the node(s) to be ready before returning.")
|
||||
cmd.Flags().DurationVar(&createNodeOpts.Timeout, "timeout", 0*time.Second, "Maximum waiting time for '--wait' before canceling/returning.")
|
||||
|
||||
cmd.Flags().StringSliceP("runtime-label", "", []string{}, "Specify container runtime labels in format \"foo=bar\"")
|
||||
cmd.Flags().StringSliceP("k3s-node-label", "", []string{}, "Specify k3s node labels in format \"foo=bar\"")
|
||||
|
||||
cmd.Flags().StringSliceP("network", "n", []string{}, "Add node to (another) runtime network")
|
||||
|
||||
cmd.Flags().StringVarP(&createNodeOpts.ClusterToken, "token", "t", "", "Override cluster token (required when connecting to an external cluster)")
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
|
||||
// parseCreateNodeCmd parses the command input into variables required to create a node
|
||||
func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, string) {
|
||||
// parseCreateNodeCmd parses the command input into variables required to create a cluster
|
||||
func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, *k3d.Cluster) {
|
||||
|
||||
// --replicas
|
||||
replicas, err := cmd.Flags().GetInt("replicas")
|
||||
if err != nil {
|
||||
l.Log().Errorln("No replica count specified")
|
||||
l.Log().Fatalln(err)
|
||||
log.Errorln("No replica count specified")
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// --role
|
||||
roleStr, err := cmd.Flags().GetString("role")
|
||||
if err != nil {
|
||||
l.Log().Errorln("No node role specified")
|
||||
l.Log().Fatalln(err)
|
||||
log.Errorln("No node role specified")
|
||||
log.Fatalln(err)
|
||||
}
|
||||
if _, ok := k3d.NodeRoles[roleStr]; !ok {
|
||||
l.Log().Fatalf("Unknown node role '%s'\n", roleStr)
|
||||
log.Fatalf("Unknown node role '%s'\n", roleStr)
|
||||
}
|
||||
role := k3d.NodeRoles[roleStr]
|
||||
|
||||
// --image
|
||||
image, err := cmd.Flags().GetString("image")
|
||||
if err != nil {
|
||||
l.Log().Errorln("No image specified")
|
||||
l.Log().Fatalln(err)
|
||||
log.Errorln("No image specified")
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// --cluster
|
||||
clusterName, err := cmd.Flags().GetString("cluster")
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// --memory
|
||||
memory, err := cmd.Flags().GetString("memory")
|
||||
if err != nil {
|
||||
l.Log().Errorln("No memory specified")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
if _, err := dockerunits.RAMInBytes(memory); memory != "" && err != nil {
|
||||
l.Log().Errorf("Provided memory limit value is invalid")
|
||||
}
|
||||
|
||||
// --runtime-label
|
||||
runtimeLabelsFlag, err := cmd.Flags().GetStringSlice("runtime-label")
|
||||
if err != nil {
|
||||
l.Log().Errorln("No runtime-label specified")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
runtimeLabels := make(map[string]string, len(runtimeLabelsFlag)+1)
|
||||
for _, label := range runtimeLabelsFlag {
|
||||
labelSplitted := strings.Split(label, "=")
|
||||
if len(labelSplitted) != 2 {
|
||||
l.Log().Fatalf("unknown runtime-label format format: %s, use format \"foo=bar\"", label)
|
||||
}
|
||||
cliutil.ValidateRuntimeLabelKey(labelSplitted[0])
|
||||
runtimeLabels[labelSplitted[0]] = labelSplitted[1]
|
||||
}
|
||||
|
||||
// Internal k3d runtime labels take precedence over user-defined labels
|
||||
runtimeLabels[k3d.LabelRole] = roleStr
|
||||
|
||||
// --k3s-node-label
|
||||
k3sNodeLabelsFlag, err := cmd.Flags().GetStringSlice("k3s-node-label")
|
||||
if err != nil {
|
||||
l.Log().Errorln("No k3s-node-label specified")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
k3sNodeLabels := make(map[string]string, len(k3sNodeLabelsFlag))
|
||||
for _, label := range k3sNodeLabelsFlag {
|
||||
labelSplitted := strings.Split(label, "=")
|
||||
if len(labelSplitted) != 2 {
|
||||
l.Log().Fatalf("unknown k3s-node-label format format: %s, use format \"foo=bar\"", label)
|
||||
}
|
||||
k3sNodeLabels[labelSplitted[0]] = labelSplitted[1]
|
||||
}
|
||||
|
||||
// --network
|
||||
networks, err := cmd.Flags().GetStringSlice("network")
|
||||
if err != nil {
|
||||
l.Log().Fatalf("failed to get --network string slice flag: %v", err)
|
||||
cluster := &k3d.Cluster{
|
||||
Name: clusterName,
|
||||
}
|
||||
|
||||
// generate list of nodes
|
||||
nodes := []*k3d.Node{}
|
||||
for i := 0; i < replicas; i++ {
|
||||
node := &k3d.Node{
|
||||
Name: fmt.Sprintf("%s-%s-%d", k3d.DefaultObjectNamePrefix, args[0], i),
|
||||
Role: role,
|
||||
Image: image,
|
||||
K3sNodeLabels: k3sNodeLabels,
|
||||
RuntimeLabels: runtimeLabels,
|
||||
Restart: true,
|
||||
Memory: memory,
|
||||
Networks: networks,
|
||||
Name: fmt.Sprintf("%s-%s-%d", k3d.DefaultObjectNamePrefix, args[0], i),
|
||||
Role: role,
|
||||
Image: image,
|
||||
Labels: map[string]string{
|
||||
k3d.LabelRole: roleStr,
|
||||
},
|
||||
Restart: true,
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
return nodes, clusterName
|
||||
return nodes, cluster
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -22,44 +22,36 @@ THE SOFTWARE.
|
||||
package node
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/rancher/k3d/v3/cmd/util"
|
||||
"github.com/rancher/k3d/v3/pkg/cluster"
|
||||
"github.com/rancher/k3d/v3/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v3/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type nodeDeleteFlags struct {
|
||||
All bool
|
||||
IncludeRegistries bool
|
||||
}
|
||||
|
||||
// NewCmdNodeDelete returns a new cobra command
|
||||
func NewCmdNodeDelete() *cobra.Command {
|
||||
|
||||
flags := nodeDeleteFlags{}
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "delete (NAME | --all)",
|
||||
Short: "Delete node(s).",
|
||||
Long: `Delete node(s).`,
|
||||
Args: cobra.MinimumNArgs(1), // at least one node has to be specified
|
||||
ValidArgsFunction: util.ValidArgsAvailableNodes,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
nodes := parseDeleteNodeCmd(cmd, args, &flags)
|
||||
nodeDeleteOpts := k3d.NodeDeleteOpts{SkipLBUpdate: flags.All} // do not update LB, if we're deleting all nodes anyway
|
||||
nodes := parseDeleteNodeCmd(cmd, args)
|
||||
|
||||
if len(nodes) == 0 {
|
||||
l.Log().Infoln("No nodes found")
|
||||
log.Infoln("No nodes found")
|
||||
} else {
|
||||
for _, node := range nodes {
|
||||
if err := client.NodeDelete(cmd.Context(), runtimes.SelectedRuntime, node, nodeDeleteOpts); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
if err := cluster.NodeDelete(cmd.Context(), runtimes.SelectedRuntime, node); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
l.Log().Infof("Successfully deleted %d node(s)!", len(nodes))
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -67,45 +59,36 @@ func NewCmdNodeDelete() *cobra.Command {
|
||||
// add subcommands
|
||||
|
||||
// add flags
|
||||
cmd.Flags().BoolVarP(&flags.All, "all", "a", false, "Delete all existing nodes")
|
||||
cmd.Flags().BoolVarP(&flags.IncludeRegistries, "registries", "r", false, "Also delete registries")
|
||||
cmd.Flags().BoolP("all", "a", false, "Delete all existing clusters")
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
|
||||
// parseDeleteNodeCmd parses the command input into variables required to delete nodes
|
||||
func parseDeleteNodeCmd(cmd *cobra.Command, args []string, flags *nodeDeleteFlags) []*k3d.Node {
|
||||
|
||||
var nodes []*k3d.Node
|
||||
var err error
|
||||
func parseDeleteNodeCmd(cmd *cobra.Command, args []string) []*k3d.Node {
|
||||
|
||||
// --all
|
||||
if flags.All {
|
||||
if !flags.IncludeRegistries {
|
||||
l.Log().Infoln("Didn't set '--registries', so won't delete registries.")
|
||||
}
|
||||
nodes, err = client.NodeList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
var nodes []*k3d.Node
|
||||
|
||||
if all, err := cmd.Flags().GetBool("all"); err != nil {
|
||||
log.Fatalln(err)
|
||||
} else if all {
|
||||
nodes, err = cluster.NodeList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
include := k3d.ClusterInternalNodeRoles
|
||||
exclude := []k3d.Role{}
|
||||
if flags.IncludeRegistries {
|
||||
include = append(include, k3d.RegistryRole)
|
||||
}
|
||||
nodes = client.NodeFilterByRoles(nodes, include, exclude)
|
||||
return nodes
|
||||
}
|
||||
|
||||
if !flags.All && len(args) < 1 {
|
||||
l.Log().Fatalln("Expecting at least one node name if `--all` is not set")
|
||||
if len(args) < 1 {
|
||||
log.Fatalln("Expecting at least one node name if `--all` is not set")
|
||||
}
|
||||
|
||||
for _, name := range args {
|
||||
node, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: name})
|
||||
node, err := cluster.NodeGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: name})
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
@ -1,113 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package node
|
||||
|
||||
import (
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCmdNodeEdit returns a new cobra command
|
||||
func NewCmdNodeEdit() *cobra.Command {
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "edit NODE",
|
||||
Short: "[EXPERIMENTAL] Edit node(s).",
|
||||
Long: `[EXPERIMENTAL] Edit node(s).`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
Aliases: []string{"update"},
|
||||
ValidArgsFunction: util.ValidArgsAvailableNodes,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
existingNode, changeset := parseEditNodeCmd(cmd, args)
|
||||
|
||||
l.Log().Debugf("===== Current =====\n%+v\n===== Changeset =====\n%+v\n", existingNode, changeset)
|
||||
|
||||
if err := client.NodeEdit(cmd.Context(), runtimes.SelectedRuntime, existingNode, changeset); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
l.Log().Infof("Successfully updated %s", existingNode.Name)
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
|
||||
// add flags
|
||||
cmd.Flags().StringArray("port-add", nil, "[EXPERIMENTAL] (serverlb only!) Map ports from the node container to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d node edit k3d-mycluster-serverlb --port-add 8080:80`")
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
|
||||
// parseEditNodeCmd parses the command input into variables required to delete nodes
|
||||
func parseEditNodeCmd(cmd *cobra.Command, args []string) (*k3d.Node, *k3d.Node) {
|
||||
|
||||
existingNode, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: args[0]})
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
if existingNode == nil {
|
||||
l.Log().Infof("Node %s not found", args[0])
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if existingNode.Role != k3d.LoadBalancerRole {
|
||||
l.Log().Fatalln("Currently only the loadbalancer can be updated!")
|
||||
}
|
||||
|
||||
changeset := &k3d.Node{}
|
||||
|
||||
/*
|
||||
* --port-add
|
||||
*/
|
||||
portFlags, err := cmd.Flags().GetStringArray("port-add")
|
||||
if err != nil {
|
||||
l.Log().Errorln(err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// init portmap
|
||||
changeset.Ports = nat.PortMap{}
|
||||
|
||||
for _, flag := range portFlags {
|
||||
|
||||
portmappings, err := nat.ParsePortSpec(flag)
|
||||
if err != nil {
|
||||
l.Log().Fatalf("Failed to parse port spec '%s': %+v", flag, err)
|
||||
}
|
||||
|
||||
for _, pm := range portmappings {
|
||||
changeset.Ports[pm.Port] = append(changeset.Ports[pm.Port], pm.Binding)
|
||||
}
|
||||
}
|
||||
|
||||
return existingNode, changeset
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -23,81 +23,101 @@ package node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/liggitt/tabwriter"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/rancher/k3d/v3/cmd/util"
|
||||
"github.com/rancher/k3d/v3/pkg/cluster"
|
||||
"github.com/rancher/k3d/v3/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v3/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type nodeListFlags struct {
|
||||
noHeader bool
|
||||
output string
|
||||
}
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewCmdNodeList returns a new cobra command
|
||||
func NewCmdNodeList() *cobra.Command {
|
||||
nodeListFlags := nodeListFlags{}
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "list [NODE [NODE...]]",
|
||||
Use: "list [NAME [NAME...]]",
|
||||
Aliases: []string{"ls", "get"},
|
||||
Short: "List node(s)",
|
||||
Long: `List node(s).`,
|
||||
Args: cobra.MinimumNArgs(0), // 0 or more; 0 = all
|
||||
ValidArgsFunction: util.ValidArgsAvailableNodes,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
nodes := []*k3d.Node{}
|
||||
for _, name := range args {
|
||||
nodes = append(nodes, &k3d.Node{
|
||||
Name: name,
|
||||
})
|
||||
}
|
||||
|
||||
nodes, headersOff := parseGetNodeCmd(cmd, args)
|
||||
var existingNodes []*k3d.Node
|
||||
if len(nodes) == 0 { // Option a) no name specified -> get all nodes
|
||||
found, err := client.NodeList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
found, err := cluster.NodeList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
existingNodes = append(existingNodes, found...)
|
||||
} else { // Option b) cluster name specified -> get specific cluster
|
||||
for _, node := range nodes {
|
||||
found, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, node)
|
||||
found, err := cluster.NodeGet(cmd.Context(), runtimes.SelectedRuntime, node)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
existingNodes = append(existingNodes, found)
|
||||
}
|
||||
}
|
||||
|
||||
// print existing nodes
|
||||
headers := &[]string{}
|
||||
if !nodeListFlags.noHeader {
|
||||
headers = &[]string{"NAME", "ROLE", "CLUSTER", "STATUS"}
|
||||
}
|
||||
|
||||
util.PrintNodes(existingNodes, nodeListFlags.output,
|
||||
headers, util.NodePrinterFunc(func(tabwriter *tabwriter.Writer, node *k3d.Node) {
|
||||
fmt.Fprintf(tabwriter, "%s\t%s\t%s\t%s\n",
|
||||
strings.TrimPrefix(node.Name, "/"),
|
||||
string(node.Role),
|
||||
node.RuntimeLabels[k3d.LabelClusterName],
|
||||
node.State.Status)
|
||||
}))
|
||||
// print existing clusters
|
||||
printNodes(existingNodes, headersOff)
|
||||
},
|
||||
}
|
||||
|
||||
// add flags
|
||||
cmd.Flags().BoolVar(&nodeListFlags.noHeader, "no-headers", false, "Disable headers")
|
||||
cmd.Flags().StringVarP(&nodeListFlags.output, "output", "o", "", "Output format. One of: json|yaml")
|
||||
cmd.Flags().Bool("no-headers", false, "Disable headers")
|
||||
|
||||
// add subcommands
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
|
||||
func parseGetNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, bool) {
|
||||
// --no-headers
|
||||
headersOff, err := cmd.Flags().GetBool("no-headers")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// Args = node name
|
||||
if len(args) == 0 {
|
||||
return nil, headersOff
|
||||
}
|
||||
|
||||
nodes := []*k3d.Node{}
|
||||
for _, name := range args {
|
||||
nodes = append(nodes, &k3d.Node{Name: name})
|
||||
}
|
||||
|
||||
return nodes, headersOff
|
||||
}
|
||||
|
||||
func printNodes(nodes []*k3d.Node, headersOff bool) {
|
||||
|
||||
tabwriter := tabwriter.NewWriter(os.Stdout, 6, 4, 3, ' ', tabwriter.RememberWidths)
|
||||
defer tabwriter.Flush()
|
||||
|
||||
if !headersOff {
|
||||
headers := []string{"NAME", "ROLE", "CLUSTER", "STATUS"}
|
||||
_, err := fmt.Fprintf(tabwriter, "%s\n", strings.Join(headers, "\t"))
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to print headers")
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(nodes, func(i, j int) bool {
|
||||
return nodes[i].Name < nodes[j].Name
|
||||
})
|
||||
|
||||
for _, node := range nodes {
|
||||
fmt.Fprintf(tabwriter, "%s\t%s\t%s\t%s\n", strings.TrimPrefix(node.Name, "/"), string(node.Role), node.Labels[k3d.LabelClusterName], node.State.Status)
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -22,11 +22,12 @@ THE SOFTWARE.
|
||||
package node
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/rancher/k3d/v3/cmd/util"
|
||||
"github.com/rancher/k3d/v3/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v3/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewCmdNodeStart returns a new cobra command
|
||||
@ -34,14 +35,14 @@ func NewCmdNodeStart() *cobra.Command {
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "start NODE", // TODO: startNode: allow one or more names or --all
|
||||
Use: "start NAME", // TODO: startNode: allow one or more names or --all
|
||||
Short: "Start an existing k3d node",
|
||||
Long: `Start an existing k3d node.`,
|
||||
ValidArgsFunction: util.ValidArgsAvailableNodes,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
node := parseStartNodeCmd(cmd, args)
|
||||
if err := runtimes.SelectedRuntime.StartNode(cmd.Context(), node); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -54,7 +55,7 @@ func NewCmdNodeStart() *cobra.Command {
|
||||
func parseStartNodeCmd(cmd *cobra.Command, args []string) *k3d.Node {
|
||||
// node name // TODO: startNode: allow node filters, e.g. `k3d node start mycluster@agent` to start all agent nodes of cluster 'mycluster'
|
||||
if len(args) == 0 || len(args[0]) == 0 {
|
||||
l.Log().Fatalln("No node name given")
|
||||
log.Fatalln("No node name given")
|
||||
}
|
||||
|
||||
return &k3d.Node{Name: args[0]}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -22,12 +22,13 @@ THE SOFTWARE.
|
||||
package node
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v3/cmd/util"
|
||||
"github.com/rancher/k3d/v3/pkg/runtimes"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
k3d "github.com/rancher/k3d/v3/pkg/types"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewCmdNodeStop returns a new cobra command
|
||||
@ -42,7 +43,7 @@ func NewCmdNodeStop() *cobra.Command {
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
node := parseStopNodeCmd(cmd, args)
|
||||
if err := runtimes.SelectedRuntime.StopNode(cmd.Context(), node); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -55,7 +56,7 @@ func NewCmdNodeStop() *cobra.Command {
|
||||
func parseStopNodeCmd(cmd *cobra.Command, args []string) *k3d.Node {
|
||||
// node name // TODO: allow node filters, e.g. `k3d node stop mycluster@agent` to stop all agent nodes of cluster 'mycluster'
|
||||
if len(args) == 0 || len(args[0]) == 0 {
|
||||
l.Log().Fatalln("No node name given")
|
||||
log.Fatalln("No node name given")
|
||||
}
|
||||
|
||||
return &k3d.Node{Name: args[0]}
|
||||
|
@ -1,57 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package registry
|
||||
|
||||
import (
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCmdRegistry returns a new cobra command
|
||||
func NewCmdRegistry() *cobra.Command {
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "registry",
|
||||
Aliases: []string{"registries", "reg"},
|
||||
Short: "Manage registry/registries",
|
||||
Long: `Manage registry/registries`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
cmd.AddCommand(NewCmdRegistryCreate(),
|
||||
NewCmdRegistryStart(),
|
||||
NewCmdRegistryStop(),
|
||||
NewCmdRegistryDelete(),
|
||||
NewCmdRegistryList())
|
||||
|
||||
// add flags
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
@ -1,139 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package registry
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
|
||||
cliutil "github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type regCreatePreProcessedFlags struct {
|
||||
Port string
|
||||
Clusters []string
|
||||
}
|
||||
|
||||
type regCreateFlags struct {
|
||||
Image string
|
||||
NoHelp bool
|
||||
}
|
||||
|
||||
var helptext string = `# You can now use the registry like this (example):
|
||||
# 1. create a new cluster that uses this registry
|
||||
k3d cluster create --registry-use %s
|
||||
|
||||
# 2. tag an existing local image to be pushed to the registry
|
||||
docker tag nginx:latest %s/mynginx:v0.1
|
||||
|
||||
# 3. push that image to the registry
|
||||
docker push %s/mynginx:v0.1
|
||||
|
||||
# 4. run a pod that uses this image
|
||||
kubectl run mynginx --image %s/mynginx:v0.1
|
||||
`
|
||||
|
||||
// NewCmdRegistryCreate returns a new cobra command
|
||||
func NewCmdRegistryCreate() *cobra.Command {
|
||||
|
||||
flags := ®CreateFlags{}
|
||||
ppFlags := ®CreatePreProcessedFlags{}
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "create NAME",
|
||||
Short: "Create a new registry",
|
||||
Long: `Create a new registry.`,
|
||||
Args: cobra.MaximumNArgs(1), // maximum one name accepted
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
reg, clusters := parseCreateRegistryCmd(cmd, args, flags, ppFlags)
|
||||
regNode, err := client.RegistryRun(cmd.Context(), runtimes.SelectedRuntime, reg)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
if err := client.RegistryConnectClusters(cmd.Context(), runtimes.SelectedRuntime, regNode, clusters); err != nil {
|
||||
l.Log().Errorln(err)
|
||||
}
|
||||
l.Log().Infof("Successfully created registry '%s'", reg.Host)
|
||||
regString := fmt.Sprintf("%s:%s", reg.Host, reg.ExposureOpts.Binding.HostPort)
|
||||
if !flags.NoHelp {
|
||||
fmt.Println(fmt.Sprintf(helptext, regString, regString, regString, regString))
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add flags
|
||||
|
||||
// TODO: connecting to clusters requires non-existing config reload functionality in containerd
|
||||
cmd.Flags().StringArrayVarP(&ppFlags.Clusters, "cluster", "c", nil, "[NotReady] Select the cluster(s) that the registry shall connect to.")
|
||||
if err := cmd.RegisterFlagCompletionFunc("cluster", cliutil.ValidArgsAvailableClusters); err != nil {
|
||||
l.Log().Fatalln("Failed to register flag completion for '--cluster'", err)
|
||||
}
|
||||
if err := cmd.Flags().MarkHidden("cluster"); err != nil {
|
||||
l.Log().Fatalln("Failed to hide --cluster flag on registry create command")
|
||||
}
|
||||
|
||||
cmd.Flags().StringVarP(&flags.Image, "image", "i", fmt.Sprintf("%s:%s", k3d.DefaultRegistryImageRepo, k3d.DefaultRegistryImageTag), "Specify image used for the registry")
|
||||
|
||||
cmd.Flags().StringVarP(&ppFlags.Port, "port", "p", "random", "Select which port the registry should be listening on on your machine (localhost) (Format: `[HOST:]HOSTPORT`)\n - Example: `k3d registry create --port 0.0.0.0:5111`")
|
||||
|
||||
cmd.Flags().BoolVar(&flags.NoHelp, "no-help", false, "Disable the help text (How-To use the registry)")
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
|
||||
// parseCreateRegistryCmd parses the command input into variables required to create a registry
|
||||
func parseCreateRegistryCmd(cmd *cobra.Command, args []string, flags *regCreateFlags, ppFlags *regCreatePreProcessedFlags) (*k3d.Registry, []*k3d.Cluster) {
|
||||
|
||||
// --cluster
|
||||
clusters := []*k3d.Cluster{}
|
||||
for _, name := range ppFlags.Clusters {
|
||||
clusters = append(clusters,
|
||||
&k3d.Cluster{
|
||||
Name: name,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// --port
|
||||
exposePort, err := cliutil.ParsePortExposureSpec(ppFlags.Port, k3d.DefaultRegistryPort)
|
||||
if err != nil {
|
||||
l.Log().Errorln("Failed to parse registry port")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// set the name for the registry node
|
||||
registryName := ""
|
||||
if len(args) > 0 {
|
||||
registryName = fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, args[0])
|
||||
}
|
||||
|
||||
return &k3d.Registry{Host: registryName, Image: flags.Image, ExposureOpts: *exposePort}, clusters
|
||||
}
|
@ -1,102 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package registry
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type registryDeleteFlags struct {
|
||||
All bool
|
||||
}
|
||||
|
||||
// NewCmdRegistryDelete returns a new cobra command
|
||||
func NewCmdRegistryDelete() *cobra.Command {
|
||||
|
||||
flags := registryDeleteFlags{}
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "delete (NAME | --all)",
|
||||
Short: "Delete registry/registries.",
|
||||
Long: `Delete registry/registries.`,
|
||||
Aliases: []string{"del", "rm"},
|
||||
ValidArgsFunction: util.ValidArgsAvailableRegistries,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
nodes := parseRegistryDeleteCmd(cmd, args, &flags)
|
||||
|
||||
if len(nodes) == 0 {
|
||||
l.Log().Infoln("No registries found")
|
||||
} else {
|
||||
for _, node := range nodes {
|
||||
if err := client.NodeDelete(cmd.Context(), runtimes.SelectedRuntime, node, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
|
||||
// add flags
|
||||
cmd.Flags().BoolVarP(&flags.All, "all", "a", false, "Delete all existing registries")
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
|
||||
// parseRegistryDeleteCmd parses the command input into variables required to delete nodes
|
||||
func parseRegistryDeleteCmd(cmd *cobra.Command, args []string, flags *registryDeleteFlags) []*k3d.Node {
|
||||
|
||||
var nodes []*k3d.Node
|
||||
var err error
|
||||
|
||||
if flags.All {
|
||||
nodes, err = client.NodeList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
if !flags.All && len(args) < 1 {
|
||||
l.Log().Fatalln("Expecting at least one registry name if `--all` is not set")
|
||||
}
|
||||
|
||||
for _, name := range args {
|
||||
node, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: name})
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
nodes = client.NodeFilterByRoles(nodes, []k3d.Role{k3d.RegistryRole}, []k3d.Role{})
|
||||
|
||||
return nodes
|
||||
}
|
@ -1,113 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package registry
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/liggitt/tabwriter"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type registryListFlags struct {
|
||||
noHeader bool
|
||||
output string
|
||||
}
|
||||
|
||||
// NewCmdRegistryList creates a new cobra command
|
||||
func NewCmdRegistryList() *cobra.Command {
|
||||
registryListFlags := registryListFlags{}
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "list [NAME [NAME...]]",
|
||||
Aliases: []string{"ls", "get"},
|
||||
Short: "List registries",
|
||||
Long: `List registries.`,
|
||||
Args: cobra.MinimumNArgs(0), // 0 or more; 0 = all
|
||||
ValidArgsFunction: util.ValidArgsAvailableRegistries,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var existingNodes []*k3d.Node
|
||||
|
||||
nodes := []*k3d.Node{}
|
||||
for _, name := range args {
|
||||
nodes = append(nodes, &k3d.Node{
|
||||
Name: name,
|
||||
})
|
||||
}
|
||||
|
||||
if len(nodes) == 0 { // Option a) no name specified -> get all registries
|
||||
found, err := client.NodeList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
existingNodes = append(existingNodes, found...)
|
||||
} else { // Option b) registry name(s) specified -> get specific registries
|
||||
for _, node := range nodes {
|
||||
l.Log().Tracef("Node %s", node.Name)
|
||||
found, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, node)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
existingNodes = append(existingNodes, found)
|
||||
}
|
||||
}
|
||||
existingNodes = client.NodeFilterByRoles(existingNodes, []k3d.Role{k3d.RegistryRole}, []k3d.Role{})
|
||||
|
||||
// print existing registries
|
||||
headers := &[]string{}
|
||||
if !registryListFlags.noHeader {
|
||||
headers = &[]string{"NAME", "ROLE", "CLUSTER", "STATUS"}
|
||||
}
|
||||
|
||||
util.PrintNodes(existingNodes, registryListFlags.output,
|
||||
headers, util.NodePrinterFunc(func(tabwriter *tabwriter.Writer, node *k3d.Node) {
|
||||
cluster := "*"
|
||||
if _, ok := node.RuntimeLabels[k3d.LabelClusterName]; ok {
|
||||
cluster = node.RuntimeLabels[k3d.LabelClusterName]
|
||||
}
|
||||
fmt.Fprintf(tabwriter, "%s\t%s\t%s\t%s\n",
|
||||
strings.TrimPrefix(node.Name, "/"),
|
||||
string(node.Role),
|
||||
cluster,
|
||||
node.State.Status,
|
||||
)
|
||||
}),
|
||||
)
|
||||
},
|
||||
}
|
||||
|
||||
// add flags
|
||||
cmd.Flags().BoolVar(®istryListFlags.noHeader, "no-headers", false, "Disable headers")
|
||||
cmd.Flags().StringVarP(®istryListFlags.output, "output", "o", "", "Output format. One of: json|yaml")
|
||||
|
||||
// add subcommands
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package registry
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
// NewCmdRegistryStart creates a new cobra command
|
||||
func NewCmdRegistryStart() *cobra.Command {
|
||||
return &cobra.Command{}
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package registry
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
// NewCmdRegistryStop creates a new cobra command
|
||||
func NewCmdRegistryStop() *cobra.Command {
|
||||
return &cobra.Command{}
|
||||
}
|
288
cmd/root.go
288
cmd/root.go
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -25,187 +25,152 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/rancher/k3d/v5/cmd/cluster"
|
||||
cfg "github.com/rancher/k3d/v5/cmd/config"
|
||||
"github.com/rancher/k3d/v5/cmd/debug"
|
||||
"github.com/rancher/k3d/v5/cmd/image"
|
||||
"github.com/rancher/k3d/v5/cmd/kubeconfig"
|
||||
"github.com/rancher/k3d/v5/cmd/node"
|
||||
"github.com/rancher/k3d/v5/cmd/registry"
|
||||
cliutil "github.com/rancher/k3d/v5/cmd/util"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v5/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/rancher/k3d/v3/cmd/cluster"
|
||||
"github.com/rancher/k3d/v3/cmd/image"
|
||||
"github.com/rancher/k3d/v3/cmd/kubeconfig"
|
||||
"github.com/rancher/k3d/v3/cmd/node"
|
||||
cliutil "github.com/rancher/k3d/v3/cmd/util"
|
||||
"github.com/rancher/k3d/v3/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v3/version"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus/hooks/writer"
|
||||
)
|
||||
|
||||
// RootFlags describes a struct that holds flags that can be set on root level of the command
|
||||
type RootFlags struct {
|
||||
debugLogging bool
|
||||
traceLogging bool
|
||||
timestampedLogging bool
|
||||
version bool
|
||||
debugLogging bool
|
||||
traceLogging bool
|
||||
version bool
|
||||
}
|
||||
|
||||
var flags = RootFlags{}
|
||||
|
||||
func NewCmdK3d() *cobra.Command {
|
||||
// var cfgFile string
|
||||
|
||||
// rootCmd represents the base command when called without any subcommands
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "k3d",
|
||||
Short: "https://k3d.io/ -> Run k3s in Docker!",
|
||||
Long: `https://k3d.io/
|
||||
// rootCmd represents the base command when called without any subcommands
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "k3d",
|
||||
Short: "https://k3d.io/ -> Run k3s in Docker!",
|
||||
Long: `https://k3d.io/
|
||||
k3d is a wrapper CLI that helps you to easily create k3s clusters inside docker.
|
||||
Nodes of a k3d cluster are docker containers running a k3s image.
|
||||
All Nodes of a k3d cluster are part of the same docker network.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if flags.version {
|
||||
printVersion()
|
||||
} else {
|
||||
if err := cmd.Usage(); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if flags.version {
|
||||
printVersion()
|
||||
} else {
|
||||
if err := cmd.Usage(); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
rootCmd.PersistentFlags().BoolVar(&flags.debugLogging, "verbose", false, "Enable verbose output (debug logging)")
|
||||
rootCmd.PersistentFlags().BoolVar(&flags.traceLogging, "trace", false, "Enable super verbose output (trace logging)")
|
||||
rootCmd.PersistentFlags().BoolVar(&flags.timestampedLogging, "timestamps", false, "Enable Log timestamps")
|
||||
|
||||
// add local flags
|
||||
rootCmd.Flags().BoolVar(&flags.version, "version", false, "Show k3d and default k3s version")
|
||||
|
||||
// add subcommands
|
||||
rootCmd.AddCommand(NewCmdCompletion(rootCmd),
|
||||
cluster.NewCmdCluster(),
|
||||
kubeconfig.NewCmdKubeconfig(),
|
||||
node.NewCmdNode(),
|
||||
image.NewCmdImage(),
|
||||
cfg.NewCmdConfig(),
|
||||
registry.NewCmdRegistry(),
|
||||
debug.NewCmdDebug(),
|
||||
&cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Show k3d and default k3s version",
|
||||
Long: "Show k3d and default k3s version",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
printVersion()
|
||||
},
|
||||
},
|
||||
&cobra.Command{
|
||||
Use: "runtime-info",
|
||||
Short: "Show runtime information",
|
||||
Long: "Show some information about the runtime environment (e.g. docker info)",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
info, err := runtimes.SelectedRuntime.Info()
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
err = yaml.NewEncoder(os.Stdout).Encode(info)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
Hidden: true,
|
||||
})
|
||||
|
||||
// Init
|
||||
cobra.OnInitialize(initLogging, initRuntime)
|
||||
|
||||
return rootCmd
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
||||
func Execute() {
|
||||
cmd := NewCmdK3d()
|
||||
if len(os.Args) > 1 {
|
||||
parts := os.Args[1:]
|
||||
// Check if it's a built-in command, else try to execute it as a plugin
|
||||
if _, _, err := cmd.Find(parts); err != nil {
|
||||
if _, _, err := rootCmd.Find(parts); err != nil {
|
||||
pluginFound, err := cliutil.HandlePlugin(context.Background(), parts)
|
||||
if err != nil {
|
||||
l.Log().Errorf("Failed to execute plugin '%+v'", parts)
|
||||
l.Log().Fatalln(err)
|
||||
log.Errorf("Failed to execute plugin '%+v'", parts)
|
||||
log.Fatalln(err)
|
||||
} else if pluginFound {
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := cmd.Execute(); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
cobra.OnInitialize(initLogging, initRuntime)
|
||||
|
||||
// add persistent flags (present to all subcommands)
|
||||
// rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.k3d/config.yaml)")
|
||||
rootCmd.PersistentFlags().BoolVar(&flags.debugLogging, "verbose", false, "Enable verbose output (debug logging)")
|
||||
rootCmd.PersistentFlags().BoolVar(&flags.traceLogging, "trace", false, "Enable super verbose output (trace logging)")
|
||||
|
||||
// add local flags
|
||||
rootCmd.Flags().BoolVar(&flags.version, "version", false, "Show k3d and default k3s version")
|
||||
|
||||
// add subcommands
|
||||
rootCmd.AddCommand(NewCmdCompletion())
|
||||
rootCmd.AddCommand(cluster.NewCmdCluster())
|
||||
rootCmd.AddCommand(kubeconfig.NewCmdKubeconfig())
|
||||
rootCmd.AddCommand(node.NewCmdNode())
|
||||
rootCmd.AddCommand(image.NewCmdImage())
|
||||
|
||||
rootCmd.AddCommand(&cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Show k3d and default k3s version",
|
||||
Long: "Show k3d and default k3s version",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
printVersion()
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// initLogging initializes the logger
|
||||
func initLogging() {
|
||||
if flags.traceLogging {
|
||||
l.Log().SetLevel(logrus.TraceLevel)
|
||||
log.SetLevel(log.TraceLevel)
|
||||
} else if flags.debugLogging {
|
||||
l.Log().SetLevel(logrus.DebugLevel)
|
||||
log.SetLevel(log.DebugLevel)
|
||||
} else {
|
||||
switch logLevel := strings.ToUpper(os.Getenv("LOG_LEVEL")); logLevel {
|
||||
case "TRACE":
|
||||
l.Log().SetLevel(logrus.TraceLevel)
|
||||
log.SetLevel(log.TraceLevel)
|
||||
case "DEBUG":
|
||||
l.Log().SetLevel(logrus.DebugLevel)
|
||||
log.SetLevel(log.DebugLevel)
|
||||
case "WARN":
|
||||
l.Log().SetLevel(logrus.WarnLevel)
|
||||
log.SetLevel(log.WarnLevel)
|
||||
case "ERROR":
|
||||
l.Log().SetLevel(logrus.ErrorLevel)
|
||||
log.SetLevel(log.ErrorLevel)
|
||||
default:
|
||||
l.Log().SetLevel(logrus.InfoLevel)
|
||||
log.SetLevel(log.InfoLevel)
|
||||
}
|
||||
}
|
||||
l.Log().SetOutput(io.Discard)
|
||||
l.Log().AddHook(&writer.Hook{
|
||||
log.SetOutput(ioutil.Discard)
|
||||
log.AddHook(&writer.Hook{
|
||||
Writer: os.Stderr,
|
||||
LogLevels: []logrus.Level{
|
||||
logrus.PanicLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.ErrorLevel,
|
||||
logrus.WarnLevel,
|
||||
LogLevels: []log.Level{
|
||||
log.PanicLevel,
|
||||
log.FatalLevel,
|
||||
log.ErrorLevel,
|
||||
log.WarnLevel,
|
||||
},
|
||||
})
|
||||
l.Log().AddHook(&writer.Hook{
|
||||
log.AddHook(&writer.Hook{
|
||||
Writer: os.Stdout,
|
||||
LogLevels: []logrus.Level{
|
||||
logrus.InfoLevel,
|
||||
logrus.DebugLevel,
|
||||
logrus.TraceLevel,
|
||||
LogLevels: []log.Level{
|
||||
log.InfoLevel,
|
||||
log.DebugLevel,
|
||||
log.TraceLevel,
|
||||
},
|
||||
})
|
||||
|
||||
formatter := &logrus.TextFormatter{
|
||||
log.SetFormatter(&log.TextFormatter{
|
||||
ForceColors: true,
|
||||
}
|
||||
|
||||
if flags.timestampedLogging || os.Getenv("LOG_TIMESTAMPS") != "" {
|
||||
formatter.FullTimestamp = true
|
||||
}
|
||||
|
||||
l.Log().SetFormatter(formatter)
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
func initRuntime() {
|
||||
runtime, err := runtimes.GetRuntime("docker")
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
runtimes.SelectedRuntime = runtime
|
||||
if rtinfo, err := runtime.Info(); err == nil {
|
||||
l.Log().Debugf("Runtime Info:\n%+v", rtinfo)
|
||||
}
|
||||
log.Debugf("Selected runtime is '%T'", runtimes.SelectedRuntime)
|
||||
}
|
||||
|
||||
func printVersion() {
|
||||
@ -213,82 +178,35 @@ func printVersion() {
|
||||
fmt.Printf("k3s version %s (default)\n", version.K3sVersion)
|
||||
}
|
||||
|
||||
func generateFishCompletion(writer io.Writer) error {
|
||||
return rootCmd.GenFishCompletion(writer, true)
|
||||
}
|
||||
|
||||
// Completion
|
||||
var completionFunctions = map[string]func(io.Writer) error{
|
||||
"bash": rootCmd.GenBashCompletion,
|
||||
"zsh": rootCmd.GenZshCompletion,
|
||||
"psh": rootCmd.GenPowerShellCompletion,
|
||||
"powershell": rootCmd.GenPowerShellCompletion,
|
||||
"fish": generateFishCompletion,
|
||||
}
|
||||
|
||||
// NewCmdCompletion creates a new completion command
|
||||
func NewCmdCompletion(rootCmd *cobra.Command) *cobra.Command {
|
||||
|
||||
completionFunctions := map[string]func(io.Writer) error{
|
||||
"bash": rootCmd.GenBashCompletion,
|
||||
"zsh": func(writer io.Writer) error {
|
||||
if err := rootCmd.GenZshCompletion(writer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprintf(writer, "\n# source completion file\ncompdef _k3d k3d\n")
|
||||
|
||||
return nil
|
||||
},
|
||||
"psh": rootCmd.GenPowerShellCompletion,
|
||||
"powershell": rootCmd.GenPowerShellCompletionWithDesc,
|
||||
"fish": func(writer io.Writer) error {
|
||||
return rootCmd.GenFishCompletion(writer, true)
|
||||
},
|
||||
}
|
||||
|
||||
func NewCmdCompletion() *cobra.Command {
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "completion SHELL",
|
||||
Short: "Generate completion scripts for [bash, zsh, fish, powershell | psh]",
|
||||
Long: `To load completions:
|
||||
|
||||
Bash:
|
||||
|
||||
$ source <(k3d completion bash)
|
||||
|
||||
# To load completions for each session, execute once:
|
||||
# Linux:
|
||||
$ k3d completion bash > /etc/bash_completion.d/k3d
|
||||
# macOS:
|
||||
$ k3d completion bash > /usr/local/etc/bash_completion.d/k3d
|
||||
|
||||
Zsh:
|
||||
|
||||
# If shell completion is not already enabled in your environment,
|
||||
# you will need to enable it. You can execute the following once:
|
||||
|
||||
$ echo "autoload -U compinit; compinit" >> ~/.zshrc
|
||||
|
||||
# To load completions for each session, execute once:
|
||||
$ k3d completion zsh > "${fpath[1]}/k3d"
|
||||
|
||||
# You will need to start a new shell for this setup to take effect.
|
||||
|
||||
fish:
|
||||
|
||||
$ k3d completion fish | source
|
||||
|
||||
# To load completions for each session, execute once:
|
||||
$ k3d completion fish > ~/.config/fish/completions/k3d.fish
|
||||
|
||||
PowerShell:
|
||||
|
||||
PS> k3d completion powershell | Out-String | Invoke-Expression
|
||||
|
||||
# To load completions for every new session, run:
|
||||
PS> k3d completion powershell > k3d.ps1
|
||||
# and source this file from your PowerShell profile.
|
||||
`,
|
||||
ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
|
||||
ArgAliases: []string{"psh"},
|
||||
DisableFlagsInUseLine: true,
|
||||
Args: cobra.ExactValidArgs(1),
|
||||
Long: `Generate completion scripts for [bash, zsh, fish, powershell | psh]`,
|
||||
Args: cobra.ExactArgs(1), // TODO: NewCmdCompletion: add support for 0 args = auto detection
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if completionFunc, ok := completionFunctions[args[0]]; ok {
|
||||
if err := completionFunc(os.Stdout); err != nil {
|
||||
l.Log().Fatalf("Failed to generate completion script for shell '%s'", args[0])
|
||||
if f, ok := completionFunctions[args[0]]; ok {
|
||||
if err := f(os.Stdout); err != nil {
|
||||
log.Fatalf("Failed to generate completion script for shell '%s'", args[0])
|
||||
}
|
||||
return
|
||||
}
|
||||
l.Log().Fatalf("Shell '%s' not supported for completion", args[0])
|
||||
log.Fatalf("Shell '%s' not supported for completion", args[0])
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -25,10 +25,10 @@ import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
k3dcluster "github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
k3dcluster "github.com/rancher/k3d/v3/pkg/cluster"
|
||||
"github.com/rancher/k3d/v3/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v3/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -39,7 +39,7 @@ func ValidArgsAvailableClusters(cmd *cobra.Command, args []string, toComplete st
|
||||
var clusters []*k3d.Cluster
|
||||
clusters, err := k3dcluster.ClusterList(context.Background(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Errorln("Failed to get list of clusters for shell completion")
|
||||
log.Errorln("Failed to get list of clusters for shell completion")
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
@ -64,41 +64,14 @@ func ValidArgsAvailableNodes(cmd *cobra.Command, args []string, toComplete strin
|
||||
var nodes []*k3d.Node
|
||||
nodes, err := k3dcluster.NodeList(context.Background(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Errorln("Failed to get list of nodes for shell completion")
|
||||
log.Errorln("Failed to get list of nodes for shell completion")
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
nodeLoop:
|
||||
for _, node := range nodes {
|
||||
for _, arg := range args {
|
||||
if arg == node.Name { // only nodes, that are not in the args yet
|
||||
continue nodeLoop
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(node.Name, toComplete) {
|
||||
completions = append(completions, node.Name)
|
||||
}
|
||||
}
|
||||
return completions, cobra.ShellCompDirectiveDefault
|
||||
}
|
||||
|
||||
// ValidArgsAvailableRegistries is used for shell completions: proposes the list of existing registries
|
||||
func ValidArgsAvailableRegistries(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
|
||||
var completions []string
|
||||
var nodes []*k3d.Node
|
||||
nodes, err := k3dcluster.NodeList(context.Background(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Errorln("Failed to get list of nodes for shell completion")
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
nodes = k3dcluster.NodeFilterByRoles(nodes, []k3d.Role{k3d.RegistryRole}, []k3d.Role{})
|
||||
|
||||
nodeLoop:
|
||||
for _, node := range nodes {
|
||||
for _, arg := range args {
|
||||
if arg == node.Name { // only nodes, that are not in the args yet
|
||||
if arg == node.Name { // only clusters, that are not in the args yet
|
||||
continue nodeLoop
|
||||
}
|
||||
}
|
||||
|
@ -1,97 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/k3d/v5/pkg/config"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/viper"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
func InitViperWithConfigFile(cfgViper *viper.Viper, configFile string) error {
|
||||
|
||||
// viper for the general config (file, env and non pre-processed flags)
|
||||
cfgViper.SetEnvPrefix("K3D")
|
||||
cfgViper.AutomaticEnv()
|
||||
|
||||
cfgViper.SetConfigType("yaml")
|
||||
|
||||
// Set config file, if specified
|
||||
if configFile != "" {
|
||||
|
||||
if _, err := os.Stat(configFile); err != nil {
|
||||
l.Log().Fatalf("Failed to stat config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
// create temporary file to expand environment variables in the config without writing that back to the original file
|
||||
// we're doing it here, because this happens just before absolutely all other processing
|
||||
tmpfile, err := os.CreateTemp(os.TempDir(), fmt.Sprintf("k3d-config-tmp-%s", filepath.Base(configFile)))
|
||||
if err != nil {
|
||||
l.Log().Fatalf("error creating temp copy of configfile %s for variable expansion: %v", configFile, err)
|
||||
}
|
||||
defer tmpfile.Close()
|
||||
|
||||
originalcontent, err := os.ReadFile(configFile)
|
||||
if err != nil {
|
||||
l.Log().Fatalf("error reading config file %s: %v", configFile, err)
|
||||
}
|
||||
expandedcontent := os.ExpandEnv(string(originalcontent))
|
||||
if _, err := tmpfile.WriteString(expandedcontent); err != nil {
|
||||
l.Log().Fatalf("error writing expanded config file contents to temp file %s: %v", tmpfile.Name(), err)
|
||||
}
|
||||
|
||||
// use temp file with expanded variables
|
||||
cfgViper.SetConfigFile(tmpfile.Name())
|
||||
|
||||
// try to read config into memory (viper map structure)
|
||||
if err := cfgViper.ReadInConfig(); err != nil {
|
||||
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
|
||||
l.Log().Fatalf("Config file %s not found: %+v", configFile, err)
|
||||
}
|
||||
// config file found but some other error happened
|
||||
l.Log().Fatalf("Failed to read config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
schema, err := config.GetSchemaByVersion(cfgViper.GetString("apiVersion"))
|
||||
if err != nil {
|
||||
l.Log().Fatalf("Cannot validate config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
if err := config.ValidateSchemaFile(tmpfile.Name(), schema); err != nil {
|
||||
l.Log().Fatalf("Schema Validation failed for config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
l.Log().Infof("Using config file %s (%s#%s)", configFile, strings.ToLower(cfgViper.GetString("apiVersion")), strings.ToLower(cfgViper.GetString("kind")))
|
||||
}
|
||||
if l.Log().GetLevel() >= logrus.DebugLevel {
|
||||
c, _ := yaml.Marshal(cfgViper.AllSettings())
|
||||
l.Log().Debugf("Configuration:\n%s", c)
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -23,11 +23,21 @@ package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
k3d "github.com/rancher/k3d/v3/pkg/types"
|
||||
|
||||
"github.com/rancher/k3d/v3/pkg/util"
|
||||
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// Regexp pattern to match node filters
|
||||
var filterRegexp = regexp.MustCompile(`^(?P<group>server|agent|loadbalancer|all)(?P<subsetSpec>\[(?P<subset>(?P<subsetList>(\d+,?)+)|(?P<subsetRange>\d*:\d*)|(?P<subsetWildcard>\*))\])?$`)
|
||||
|
||||
// SplitFiltersFromFlag separates a flag's value from the node filter, if there is one
|
||||
func SplitFiltersFromFlag(flag string) (string, []string, error) {
|
||||
|
||||
@ -50,10 +60,10 @@ func SplitFiltersFromFlag(flag string) (string, []string, error) {
|
||||
// Case 1.1: Escaped backslash
|
||||
if strings.HasSuffix(it, "\\\\") {
|
||||
it = strings.TrimSuffix(it, "\\")
|
||||
l.Log().Warnf("The part '%s' of the flag input '%s' ends with a double backslash, so we assume you want to escape the backslash before the '@'. That's the only time we do this.", it, flag)
|
||||
log.Warnf("The part '%s' of the flag input '%s' ends with a double backslash, so we assume you want to escape the backslash before the '@'. That's the only time we do this.", it, flag)
|
||||
} else {
|
||||
// Case 1.2: Unescaped backslash -> Escaping the '@' -> remove suffix and append it to buffer, followed by the escaped @ sign
|
||||
l.Log().Tracef("Item '%s' just before an '@' ends with '\\', so we assume it's escaping a literal '@'", it)
|
||||
log.Tracef("Item '%s' just before an '@' ends with '\\', so we assume it's escaping a literal '@'", it)
|
||||
buffer += strings.TrimSuffix(it, "\\") + "@"
|
||||
continue
|
||||
}
|
||||
@ -77,3 +87,144 @@ func SplitFiltersFromFlag(flag string) (string, []string, error) {
|
||||
return newsplit[0], strings.Split(newsplit[1], ";"), nil
|
||||
|
||||
}
|
||||
|
||||
// FilterNodes takes a string filter to return a filtered list of nodes
|
||||
func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
|
||||
|
||||
if len(filters) == 0 || len(filters[0]) == 0 {
|
||||
log.Warnln("No node filter specified")
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// map roles to subsets
|
||||
serverNodes := []*k3d.Node{}
|
||||
agentNodes := []*k3d.Node{}
|
||||
var serverlb *k3d.Node
|
||||
for _, node := range nodes {
|
||||
if node.Role == k3d.ServerRole {
|
||||
serverNodes = append(serverNodes, node)
|
||||
} else if node.Role == k3d.AgentRole {
|
||||
agentNodes = append(agentNodes, node)
|
||||
} else if node.Role == k3d.LoadBalancerRole {
|
||||
serverlb = node
|
||||
}
|
||||
}
|
||||
|
||||
filteredNodes := []*k3d.Node{}
|
||||
set := make(map[*k3d.Node]struct{})
|
||||
|
||||
// range over all instances of group[subset] specs
|
||||
for _, filter := range filters {
|
||||
|
||||
// match regex with capturing groups
|
||||
match := filterRegexp.FindStringSubmatch(filter)
|
||||
|
||||
if len(match) == 0 {
|
||||
return nil, fmt.Errorf("Failed to parse node filters: invalid format or empty subset in '%s'", filter)
|
||||
}
|
||||
|
||||
// map capturing group names to submatches
|
||||
submatches := util.MapSubexpNames(filterRegexp.SubexpNames(), match)
|
||||
|
||||
// if one of the filters is 'all', we only return this and drop all others
|
||||
if submatches["group"] == "all" {
|
||||
// TODO: filterNodes: only log if really more than one is specified
|
||||
log.Warnf("Node filter 'all' set, but more were specified in '%+v'", filters)
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// Choose the group of nodes to operate on
|
||||
groupNodes := []*k3d.Node{}
|
||||
if submatches["group"] == string(k3d.ServerRole) {
|
||||
groupNodes = serverNodes
|
||||
} else if submatches["group"] == string(k3d.AgentRole) {
|
||||
groupNodes = agentNodes
|
||||
} else if submatches["group"] == string(k3d.LoadBalancerRole) {
|
||||
filteredNodes = append(filteredNodes, serverlb)
|
||||
return filteredNodes, nil // early exit if filtered group is the loadbalancer
|
||||
}
|
||||
|
||||
/* Option 1) subset defined by list */
|
||||
if submatches["subsetList"] != "" {
|
||||
for _, index := range strings.Split(submatches["subsetList"], ",") {
|
||||
if index != "" {
|
||||
num, err := strconv.Atoi(index)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to convert subset number to integer in '%s'", filter)
|
||||
}
|
||||
if num < 0 || num >= len(groupNodes) {
|
||||
return nil, fmt.Errorf("Index out of range: index '%d' < 0 or > number of available nodes in filter '%s'", num, filter)
|
||||
}
|
||||
if _, exists := set[groupNodes[num]]; !exists {
|
||||
filteredNodes = append(filteredNodes, groupNodes[num])
|
||||
set[groupNodes[num]] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Option 2) subset defined by range */
|
||||
} else if submatches["subsetRange"] != "" {
|
||||
|
||||
/*
|
||||
* subset specified by a range 'START:END', where each side is optional
|
||||
*/
|
||||
|
||||
split := strings.Split(submatches["subsetRange"], ":")
|
||||
if len(split) != 2 {
|
||||
return nil, fmt.Errorf("Failed to parse subset range in '%s'", filter)
|
||||
}
|
||||
|
||||
start := 0
|
||||
end := len(groupNodes) - 1
|
||||
|
||||
var err error
|
||||
|
||||
if split[0] != "" {
|
||||
start, err = strconv.Atoi(split[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to convert subset range start to integer in '%s'", filter)
|
||||
}
|
||||
if start < 0 || start >= len(groupNodes) {
|
||||
return nil, fmt.Errorf("Invalid subset range: start < 0 or > number of available nodes in '%s'", filter)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if split[1] != "" {
|
||||
end, err = strconv.Atoi(split[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to convert subset range start to integer in '%s'", filter)
|
||||
}
|
||||
if end < start || end >= len(groupNodes) {
|
||||
return nil, fmt.Errorf("Invalid subset range: end < start or > number of available nodes in '%s'", filter)
|
||||
}
|
||||
}
|
||||
|
||||
for i := start; i <= end; i++ {
|
||||
if _, exists := set[groupNodes[i]]; !exists {
|
||||
filteredNodes = append(filteredNodes, groupNodes[i])
|
||||
set[groupNodes[i]] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
/* Option 3) subset defined by wildcard */
|
||||
} else if submatches["subsetWildcard"] == "*" {
|
||||
/*
|
||||
* '*' = all nodes
|
||||
*/
|
||||
for _, node := range groupNodes {
|
||||
if _, exists := set[node]; !exists {
|
||||
filteredNodes = append(filteredNodes, node)
|
||||
set[node] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
/* Option X) invalid/unknown subset */
|
||||
} else {
|
||||
return nil, fmt.Errorf("Failed to parse node specifiers: unknown subset in '%s'", filter)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return filteredNodes, nil
|
||||
}
|
||||
|
@ -1,89 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package util
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/liggitt/tabwriter"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type NodePrinter interface {
|
||||
Print(*tabwriter.Writer, *k3d.Node)
|
||||
}
|
||||
|
||||
type NodePrinterFunc func(*tabwriter.Writer, *k3d.Node)
|
||||
|
||||
func (npf NodePrinterFunc) Print(writter *tabwriter.Writer, node *k3d.Node) {
|
||||
npf(writter, node)
|
||||
}
|
||||
|
||||
// PrintNodes prints a list of nodes, either as a table or as a JSON/YAML listing
|
||||
func PrintNodes(nodes []*k3d.Node, outputFormat string, headers *[]string, nodePrinter NodePrinter) {
|
||||
outputFormat = strings.ToLower(outputFormat)
|
||||
|
||||
tabwriter := tabwriter.NewWriter(os.Stdout, 6, 4, 3, ' ', tabwriter.RememberWidths)
|
||||
defer tabwriter.Flush()
|
||||
|
||||
if outputFormat != "json" && outputFormat != "yaml" {
|
||||
if headers != nil {
|
||||
_, err := fmt.Fprintf(tabwriter, "%s\n", strings.Join(*headers, "\t"))
|
||||
if err != nil {
|
||||
l.Log().Fatalln("Failed to print headers")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(nodes, func(i, j int) bool {
|
||||
return nodes[i].Name < nodes[j].Name
|
||||
})
|
||||
|
||||
if outputFormat == "json" || outputFormat == "yaml" {
|
||||
var b []byte
|
||||
var err error
|
||||
|
||||
switch outputFormat {
|
||||
case "json":
|
||||
b, err = json.Marshal(nodes)
|
||||
case "yaml":
|
||||
b, err = yaml.Marshal(nodes)
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
fmt.Println(string(b))
|
||||
} else {
|
||||
for _, node := range nodes {
|
||||
if !(outputFormat == "json" || outputFormat == "yaml") {
|
||||
nodePrinter.Print(tabwriter, node)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -28,7 +28,7 @@ import (
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
k3d "github.com/rancher/k3d/v3/pkg/types"
|
||||
)
|
||||
|
||||
// HandlePlugin takes care of finding and executing a plugin based on the longest prefix
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -24,82 +24,60 @@ package util
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/rancher/k3d/v5/pkg/util"
|
||||
k3d "github.com/rancher/k3d/v3/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var apiPortRegexp = regexp.MustCompile(`^(?P<hostref>(?P<hostip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})|(?P<hostname>\S+):)?(?P<port>(\d{1,5}|random))$`)
|
||||
// ParseAPIPort parses/validates a string to create an exposeAPI struct from it
|
||||
func ParseAPIPort(portString string) (k3d.ExposeAPI, error) {
|
||||
|
||||
// ParsePortExposureSpec parses/validates a string to create an exposePort struct from it
|
||||
func ParsePortExposureSpec(exposedPortSpec, internalPort string) (*k3d.ExposureOpts, error) {
|
||||
var exposeAPI k3d.ExposeAPI
|
||||
|
||||
match := apiPortRegexp.FindStringSubmatch(exposedPortSpec)
|
||||
|
||||
if len(match) == 0 {
|
||||
return nil, fmt.Errorf("Failed to parse Port Exposure specification '%s': Format must be [(HostIP|HostName):]HostPort", exposedPortSpec)
|
||||
split := strings.Split(portString, ":")
|
||||
if len(split) > 2 {
|
||||
log.Errorln("Failed to parse API Port specification")
|
||||
return exposeAPI, fmt.Errorf("api-port format error")
|
||||
}
|
||||
|
||||
submatches := util.MapSubexpNames(apiPortRegexp.SubexpNames(), match)
|
||||
|
||||
// no port specified (or not matched via regex)
|
||||
if submatches["port"] == "" {
|
||||
return nil, fmt.Errorf("Failed to find port in Port Exposure spec '%s'", exposedPortSpec)
|
||||
}
|
||||
|
||||
api := &k3d.ExposureOpts{}
|
||||
|
||||
// check if there's a host reference
|
||||
if submatches["hostname"] != "" {
|
||||
l.Log().Tracef("Port Exposure: found hostname: %s", submatches["hostname"])
|
||||
addrs, err := net.LookupHost(submatches["hostname"])
|
||||
if len(split) == 1 {
|
||||
exposeAPI = k3d.ExposeAPI{Port: split[0]}
|
||||
} else {
|
||||
// Make sure 'host' can be resolved to an IP address
|
||||
addrs, err := net.LookupHost(split[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to lookup host '%s' specified for Port Exposure: %+v", submatches["hostname"], err)
|
||||
return exposeAPI, err
|
||||
}
|
||||
api.Host = submatches["hostname"]
|
||||
submatches["hostip"] = addrs[0] // set hostip to the resolved address
|
||||
exposeAPI = k3d.ExposeAPI{Host: split[0], HostIP: addrs[0], Port: split[1]}
|
||||
}
|
||||
|
||||
realPortString := ""
|
||||
|
||||
if submatches["hostip"] == "" {
|
||||
submatches["hostip"] = k3d.DefaultAPIHost
|
||||
}
|
||||
|
||||
// start with the IP, if there is any
|
||||
if submatches["hostip"] != "" {
|
||||
realPortString += submatches["hostip"] + ":"
|
||||
}
|
||||
|
||||
// port: get a free one if there's none defined or set to random
|
||||
if submatches["port"] == "" || submatches["port"] == "random" {
|
||||
l.Log().Debugf("Port Exposure Mapping didn't specify hostPort, choosing one randomly...")
|
||||
// Verify 'port' is an integer and within port ranges
|
||||
if exposeAPI.Port == "" || exposeAPI.Port == "random" {
|
||||
log.Debugf("API-Port Mapping didn't specify hostPort, choosing one randomly...")
|
||||
freePort, err := GetFreePort()
|
||||
if err != nil || freePort == 0 {
|
||||
l.Log().Warnf("Failed to get random free port: %+v", err)
|
||||
l.Log().Warnf("Falling back to internal port %s (may be blocked though)...", internalPort)
|
||||
submatches["port"] = internalPort
|
||||
log.Warnf("Failed to get random free port:\n%+v", err)
|
||||
log.Warnf("Falling back to default port %s (may be blocked though)...", k3d.DefaultAPIPort)
|
||||
exposeAPI.Port = k3d.DefaultAPIPort
|
||||
} else {
|
||||
submatches["port"] = strconv.Itoa(freePort)
|
||||
l.Log().Debugf("Got free port for Port Exposure: '%d'", freePort)
|
||||
exposeAPI.Port = strconv.Itoa(freePort)
|
||||
log.Debugf("Got free port for API: '%d'", freePort)
|
||||
}
|
||||
}
|
||||
|
||||
realPortString += fmt.Sprintf("%s:%s/tcp", submatches["port"], internalPort)
|
||||
|
||||
portMapping, err := nat.ParsePortSpec(realPortString)
|
||||
p, err := strconv.Atoi(exposeAPI.Port)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse port spec for Port Exposure '%s': %+v", realPortString, err)
|
||||
log.Errorln("Failed to parse port mapping")
|
||||
return exposeAPI, err
|
||||
}
|
||||
|
||||
api.Port = portMapping[0].Port // there can be only one due to our regexp
|
||||
api.Binding = portMapping[0].Binding
|
||||
if p < 0 || p > 65535 {
|
||||
log.Errorln("Failed to parse API Port specification")
|
||||
return exposeAPI, fmt.Errorf("Port value '%d' out of range", p)
|
||||
}
|
||||
|
||||
return api, nil
|
||||
return exposeAPI, nil
|
||||
|
||||
}
|
||||
|
||||
@ -112,12 +90,14 @@ func ValidatePortMap(portmap string) (string, error) {
|
||||
func GetFreePort() (int, error) {
|
||||
tcpAddress, err := net.ResolveTCPAddr("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to resolve address 'localhost:0': %w", err)
|
||||
log.Errorln("Failed to resolve address")
|
||||
return 0, err
|
||||
}
|
||||
|
||||
tcpListener, err := net.ListenTCP("tcp", tcpAddress)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to create tcp listener: %w", err)
|
||||
log.Errorln("Failed to create TCP Listener")
|
||||
return 0, err
|
||||
}
|
||||
defer tcpListener.Close()
|
||||
|
||||
|
@ -1,35 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package util
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
)
|
||||
|
||||
// validateRuntimeLabelKey validates a given label key is not reserved for internal k3d usage
|
||||
func ValidateRuntimeLabelKey(labelKey string) {
|
||||
if strings.HasPrefix(labelKey, "k3s.") || strings.HasPrefix(labelKey, "k3d.") || labelKey == "app" {
|
||||
l.Log().Fatalf("runtime label \"%s\" is reserved for internal usage", labelKey)
|
||||
}
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -24,17 +24,13 @@ package util
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
rt "runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v3/pkg/runtimes"
|
||||
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ValidateVolumeMount checks, if the source of volume mounts exists and if the destination is an absolute path
|
||||
// - SRC: source directory/file -> tests: must exist
|
||||
// - DEST: source directory/file -> tests: must be absolute path
|
||||
// ValidateVolumeMount checks, if the source of volume mounts exists and if the destination is an absolute path
|
||||
// - SRC: source directory/file -> tests: must exist
|
||||
// - DEST: source directory/file -> tests: must be absolute path
|
||||
@ -44,29 +40,17 @@ func ValidateVolumeMount(runtime runtimes.Runtime, volumeMount string) (string,
|
||||
|
||||
// validate 'SRC[:DEST]' substring
|
||||
split := strings.Split(volumeMount, ":")
|
||||
// a volume mapping can have 3 parts seperated by a ':' followed by a node filter
|
||||
// [SOURCE:]DEST[:OPT[,OPT]][@NODEFILTER[;NODEFILTER...]]
|
||||
// On Windows the source path needs to be an absolute path which means the path starts with
|
||||
// a drive designator and will also have a ':' in it. So for Windows the maxParts is increased by one.
|
||||
maxParts := 3
|
||||
if rt.GOOS == "windows" {
|
||||
maxParts++
|
||||
}
|
||||
if len(split) < 1 {
|
||||
return "", fmt.Errorf("No volume/path specified")
|
||||
}
|
||||
if len(split) > maxParts {
|
||||
return "", fmt.Errorf("Invalid volume mount '%s': maximal %d ':' allowed", volumeMount, maxParts-1)
|
||||
if len(split) > 3 {
|
||||
return "", fmt.Errorf("Invalid volume mount '%s': maximal 2 ':' allowed", volumeMount)
|
||||
}
|
||||
|
||||
// we only have SRC specified -> DEST = SRC
|
||||
// On windows the first part of the SRC is the drive letter, so we need to concat the first and second parts to get the path.
|
||||
if len(split) == 1 {
|
||||
src = split[0]
|
||||
dest = src
|
||||
} else if rt.GOOS == "windows" {
|
||||
src = split[0] + ":" + split[1]
|
||||
dest = split[2]
|
||||
} else {
|
||||
src = split[0]
|
||||
dest = split[1]
|
||||
@ -81,7 +65,7 @@ func ValidateVolumeMount(runtime runtimes.Runtime, volumeMount string) (string,
|
||||
}
|
||||
if !isNamedVolume {
|
||||
if _, err := os.Stat(src); err != nil {
|
||||
l.Log().Warnf("Failed to stat file/directory/named volume that you're trying to mount: '%s' in '%s' -> Please make sure it exists", src, volumeMount)
|
||||
log.Warnf("Failed to stat file/directory/named volume that you're trying to mount: '%s' in '%s' -> Please make sure it exists", src, volumeMount)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -98,7 +82,7 @@ func ValidateVolumeMount(runtime runtimes.Runtime, volumeMount string) (string,
|
||||
func verifyNamedVolume(runtime runtimes.Runtime, volumeName string) error {
|
||||
volumeName, err := runtime.GetVolume(volumeName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to verify named volume: %w", err)
|
||||
return err
|
||||
}
|
||||
if volumeName == "" {
|
||||
return fmt.Errorf("Failed to find named volume '%s'", volumeName)
|
||||
|
@ -1,27 +0,0 @@
|
||||
image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}{{else}}latest{{/if}}-dind
|
||||
{{#if build.tags}}
|
||||
tags:
|
||||
{{#each build.tags}}
|
||||
- {{this}}
|
||||
{{/each}}
|
||||
{{/if}}
|
||||
manifests:
|
||||
- image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}dind-linux-amd64
|
||||
platform:
|
||||
architecture: amd64
|
||||
os: linux
|
||||
- image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}dind-linux-arm64
|
||||
platform:
|
||||
variant: v8
|
||||
architecture: arm64
|
||||
os: linux
|
||||
- image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}dind-linux-arm
|
||||
platform:
|
||||
variant: v7
|
||||
architecture: arm
|
||||
os: linux
|
||||
- image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}dind-linux-arm
|
||||
platform:
|
||||
variant: v6
|
||||
architecture: arm
|
||||
os: linux
|
@ -1,15 +0,0 @@
|
||||
# docgen
|
||||
|
||||
Only used to generate the command tree for <https://k3d.io/usage/commands>.
|
||||
|
||||
The code will output files in [`../docs/usage/commands/`](../docs/usage/commands/)
|
||||
|
||||
## Run
|
||||
|
||||
```bash
|
||||
# ensure that you're in the docgen dir, as the relative path to the docs/ dir is hardcoded
|
||||
cd docgen
|
||||
|
||||
# run
|
||||
./run.sh
|
||||
```
|
@ -1,13 +0,0 @@
|
||||
module github.com/rancher/k3d/docgen
|
||||
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/rancher/k3d/v5 v5.0.0-00010101000000-000000000000
|
||||
github.com/spf13/cobra v1.2.1
|
||||
golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78 // indirect
|
||||
golang.org/x/term v0.0.0-20210406210042-72f3dc4e9b72 // indirect
|
||||
)
|
||||
|
||||
replace github.com/rancher/k3d/v5 => /PATH/TO/YOUR/REPO/DIRECTORY
|
1447
docgen/go.sum
1447
docgen/go.sum
File diff suppressed because it is too large
Load Diff
@ -1,16 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3d/v5/cmd"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/spf13/cobra/doc"
|
||||
)
|
||||
|
||||
func main() {
|
||||
k3d := cmd.NewCmdK3d()
|
||||
k3d.DisableAutoGenTag = true
|
||||
|
||||
if err := doc.GenMarkdownTree(k3d, "../docs/usage/commands"); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
@ -1,22 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
REPLACE_PLACEHOLDER="/PATH/TO/YOUR/REPO/DIRECTORY"
|
||||
|
||||
CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
[ -d "$CURR_DIR" ] || { echo "FATAL: no current dir (maybe running in zsh?)"; exit 1; }
|
||||
|
||||
REPO_DIR=${CURR_DIR%"/docgen"}
|
||||
|
||||
echo "$REPO_DIR"
|
||||
|
||||
sed -i "s%$REPLACE_PLACEHOLDER%$REPO_DIR%" "$CURR_DIR/go.mod"
|
||||
|
||||
go mod tidy
|
||||
|
||||
go mod vendor
|
||||
|
||||
go run ./main.go
|
||||
|
||||
sed -i "s%$REPO_DIR%$REPLACE_PLACEHOLDER%" "$CURR_DIR/go.mod"
|
||||
|
||||
rm -r "$CURR_DIR/vendor"
|
@ -1,6 +1,5 @@
|
||||
nav:
|
||||
arrange:
|
||||
- index.md
|
||||
- usage
|
||||
- design
|
||||
- faq
|
||||
collapse: false
|
||||
- internals
|
||||
- faq
|
@ -1,5 +0,0 @@
|
||||
title: Design
|
||||
nav:
|
||||
- project.md
|
||||
- defaults.md
|
||||
- networking.md
|
@ -1,60 +0,0 @@
|
||||
# Defaults
|
||||
|
||||
## k3d reserved settings
|
||||
|
||||
When you create a K3s cluster in Docker using k3d, we make use of some K3s configuration options, making them "reserved" for k3d.
|
||||
This means, that overriding those options with your own may break the cluster setup.
|
||||
|
||||
### Environment Variables
|
||||
|
||||
The following K3s environment variables are used to configure the cluster:
|
||||
|
||||
| Variable | K3d Default | Configurable? |
|
||||
|----------|-------------|---------------|
|
||||
| `K3S_URL`| `https://$CLUSTERNAME-server-0:6443` | no |
|
||||
| `K3S_TOKEN`| random | yes (`--token`) |
|
||||
| `K3S_KUBECONFIG_OUTPUT`| `/output/kubeconfig.yaml` | no |
|
||||
|
||||
## k3d Loadbalancer
|
||||
|
||||
By default, k3d creates an Nginx loadbalancer alongside the clusters it creates to handle the port-forwarding.
|
||||
The loadbalancer can partly be configured using k3d-defined settings.
|
||||
|
||||
| Nginx setting | k3d default | k3d setting |
|
||||
|-------------|-------------|-------------|
|
||||
| `proxy_timeout` (default for all server stanzas) | `600` (s) | `settings.defaultProxyTimeout` | |
|
||||
|`worker_connections` | `1024` | `settings.workerConnections` |
|
||||
|
||||
### Overrides
|
||||
|
||||
- Example via CLI: `k3d cluster create --lb-config-override settings.defaultProxyTimeout=900`
|
||||
- Example via Config File:
|
||||
|
||||
```yaml
|
||||
# ... truncated ...
|
||||
k3d:
|
||||
loadbalancer:
|
||||
configOverrides:
|
||||
- settings.workerConnections=2048
|
||||
```
|
||||
|
||||
## Multiple server nodes
|
||||
|
||||
- by default, when `--server` > 1 and no `--datastore-x` option is set, the first server node (server-0) will be the initializing server node
|
||||
- the initializing server node will have the `--cluster-init` flag appended
|
||||
- all other server nodes will refer to the initializing server node via `--server https://<init-node>:6443`
|
||||
|
||||
## API-Ports
|
||||
|
||||
- by default, we expose the API-Port (`6443`) by forwarding traffic from the default server loadbalancer (nginx container) to the server node(s)
|
||||
- port `6443` of the loadbalancer is then mapped to a specific (`--api-port` flag) or a random (default) port on the host system
|
||||
|
||||
## Kubeconfig
|
||||
|
||||
- if `--kubeconfig-update-default` is set, we use the default loading rules to get the default kubeconfig:
|
||||
- First: kubeconfig specified via the KUBECONFIG environment variable (error out if multiple are specified)
|
||||
- Second: default kubeconfig in home directory (e.g. `$HOME/.kube/config`)
|
||||
|
||||
## Networking
|
||||
|
||||
- [by default, k3d creates a new (docker) network for every cluster](./networking)
|
@ -1,27 +0,0 @@
|
||||
# Networking
|
||||
|
||||
- Related issues:
|
||||
- [rancher/k3d #220](https://github.com/rancher/k3d/issues/220)
|
||||
|
||||
## Introduction
|
||||
|
||||
By default, k3d creates a new (docker) network for every new cluster.
|
||||
Using the `--network STRING` flag upon creation to connect to an existing network.
|
||||
Existing networks won't be managed by k3d together with the cluster lifecycle.
|
||||
|
||||
## Connecting to docker "internal"/pre-defined networks
|
||||
|
||||
### `host` network
|
||||
|
||||
When using the `--network` flag to connect to the host network (i.e. `k3d cluster create --network host`), you won't be able to create more than **one server node**.
|
||||
An edge case would be one server node (with agent disabled) and one agent node.
|
||||
|
||||
### `bridge` network
|
||||
|
||||
By default, every network that k3d creates is working in `bridge` mode.
|
||||
But when you try to use `--network bridge` to connect to docker's internal `bridge` network, you may run into issues with grabbing certificates from the API-Server.
|
||||
Single-Node clusters should work though.
|
||||
|
||||
### `none` "network"
|
||||
|
||||
Well.. this doesn't really make sense for k3d anyway ¯\\\_(ツ)\_/¯
|
@ -1,109 +0,0 @@
|
||||
# Project Overview
|
||||
|
||||
## About This Page
|
||||
|
||||
On this page we'll try to give an overview of all the moving bits and pieces in k3d to ease contributions to the project.
|
||||
|
||||
## Directory Overview
|
||||
|
||||
- [`.github/`](https://github.com/rancher/k3d/tree/main/.github)
|
||||
- templates for issues and pull requests
|
||||
- GitHub Action workflow definitions
|
||||
- [`cmd/`](https://github.com/rancher/k3d/tree/main/cmd)
|
||||
- everything related to the actual k3d CLI, like the whole command tree, config initialization, argument parsing, etc.
|
||||
- [`docgen/`](https://github.com/rancher/k3d/tree/main/docgen)
|
||||
- sub-module used to auto-generate the documentation for the CLI commands, which ends up in [`docs/usage/commands/`](https://github.com/rancher/k3d/tree/main/docs/usage/commands)
|
||||
- [`docs/`](https://github.com/rancher/k3d/tree/main/docs)
|
||||
- all the resources used to build [k3d.io](https://k3d.io) using mkdocs
|
||||
- [`pkg/`](<https://github.com/rancher/k3d/tree/main/pkg>)
|
||||
- the place where the magic happens.. here you find all the main logic of k3d
|
||||
- all function calls within [`cmd/`](https://github.com/rancher/k3d/tree/main/cmd) that do non-trivial things are imported from here
|
||||
- this (or rather sub-packages) is what other projects would import as a module to work with k3d without using the CLI
|
||||
- [`proxy/`](https://github.com/rancher/k3d/tree/main/proxy)
|
||||
- configuration to build the [`rancher/k3d-proxy`](https://hub.docker.com/r/rancher/k3d-proxy/) container image which is used as a loadbalancer/proxy in front of (almost) every k3d cluster
|
||||
- this is basically just a combination of NGINX with confd and some k3d-specific configuration details
|
||||
- [`tests/`](https://github.com/rancher/k3d/tree/main/tests)
|
||||
- a set of bash scripts used for end-to-end (E2E) tests of k3d
|
||||
- mostly used for all the functionality of the k3d CLI which cannot be tested using Go unit tests
|
||||
- [`tools/`](https://github.com/rancher/k3d/tree/main/tools)
|
||||
- sub-module used to build the [`rancher/k3d-tools`](https://hub.docker.com/r/rancher/k3d-tools) container image which supports some k3d functionality like `k3d image import`
|
||||
- [`vendor/`](https://github.com/rancher/k3d/tree/main/vendor)
|
||||
- result of `go mod vendor`, which contains all dependencies of k3d
|
||||
- [`version/`](https://github.com/rancher/k3d/tree/main/version)
|
||||
- package used to code k3d/k3s versions into releases
|
||||
- this is where `go build` injects the version tags when building k3d
|
||||
- that's the output you see when issuing `k3d version`
|
||||
|
||||
## Packages Overview
|
||||
|
||||
- [`pkg/`](https://github.com/rancher/k3d/tree/main/pkg)
|
||||
- [`actions/`](https://github.com/rancher/k3d/tree/main/pkg/actions)
|
||||
- hook actions describing actions (commands, etc.) that run at specific stages of the node/cluster lifecycle
|
||||
- e.g. writing configuration files to the container filesystem just before the node (container) starts
|
||||
- [`client/`](https://github.com/rancher/k3d/tree/main/pkg/client)
|
||||
- all the top level functionality to work with k3d primitives
|
||||
- create/retrieve/update/delete/start/stop clusters, nodes, registries, etc. managed by k3d
|
||||
- [`config/`](https://github.com/rancher/k3d/tree/main/pkg/config)
|
||||
- everything related to the k3d configuration (files), like `SimpleConfig` and `ClusterConfig`
|
||||
- [`runtimes/`](https://github.com/rancher/k3d/tree/main/pkg/runtimes)
|
||||
- interface and implementations of runtimes that power k3d (currently, that's only Docker)
|
||||
- functions in [`client/`](https://github.com/rancher/k3d/tree/main/pkg/client) eventually call runtime functions to "materialize" nodes and clusters
|
||||
- [`tools/`](https://github.com/rancher/k3d/tree/main/pkg/tools)
|
||||
- functions eventually calling the [`k3d-tools`](https://hub.docker.com/r/rancher/k3d-tools) container (see [`tools/`](https://github.com/rancher/k3d/tree/main/tools) in the repo root)
|
||||
- [`types/`](https://github.com/rancher/k3d/tree/main/pkg/types)
|
||||
- definition of all k3d primitives and many other details and defaults
|
||||
- e.g. contains the definition of a `Node` or a `Cluster` in k3d
|
||||
- [`util/`](https://github.com/rancher/k3d/tree/main/pkg/util)
|
||||
- some helper functions e.g. for string manipulation/generation, regexp or other re-usable usages
|
||||
|
||||
## Anatomy of a Cluster
|
||||
|
||||
By default, every k3d cluster consists of at least 2 containers (nodes):
|
||||
|
||||
1. (optional, but default and strongly recommended) loadbalancer
|
||||
|
||||
- image: [`rancher/k3d-proxy`](https://hub.docker.com/r/rancher/k3d-proxy/), built from [`proxy/`](https://github.com/rancher/k3d/tree/main/proxy)
|
||||
- purpose: proxy and load balance requests from the outside (i.e. most of the times your local host) to the cluster
|
||||
- by default, it e.g. proxies all the traffic for the Kubernetes API to port `6443` (default listening port of K3s) to all the server nodes in the cluster
|
||||
- can be used for multiple port-mappings to one or more nodes in your cluster
|
||||
- that way, port-mappings can also easily be added/removed after the cluster creation, as we can simply re-create the proxy without affecting cluster state
|
||||
|
||||
2. (required, always present) primary server node
|
||||
|
||||
- image: [`rancher/k3s`](https://hub.docker.com/r/rancher/k3s/), built from [`github.com/k3s-io/k3s`](https://github.com/k3s-io/k3s)
|
||||
- purpose: (initializing) server (formerly: master) node of the cluster
|
||||
- runs the K3s executable (which runs containerd, the Kubernetes API Server, etcd/sqlite, etc.): `k3s server`
|
||||
- in a multi-server setup, it initializes the cluster with an embedded etcd database (using the K3s `--cluster-init` flag)
|
||||
|
||||
3. (optional) secondary server node(s)
|
||||
|
||||
- image: [`rancher/k3s`](https://hub.docker.com/r/rancher/k3s/), built from [`github.com/k3s-io/k3s`](https://github.com/k3s-io/k3s)
|
||||
|
||||
4. (optional) agent node(s)
|
||||
|
||||
- image: [`rancher/k3s`](https://hub.docker.com/r/rancher/k3s/), built from [`github.com/k3s-io/k3s`](https://github.com/k3s-io/k3s)
|
||||
- purpose: running the K3s agent process (kubelet, etc.): `k3s agent`
|
||||
|
||||
## Automation (CI)
|
||||
|
||||
The k3d repository mainly leverages the following two CI systems:
|
||||
|
||||
- GitHub Actions
|
||||
- 2 workflows in <https://github.com/rancher/k3d/tree/main/.github/workflows> to push the artifact to AUR (Arch Linux User Repository)
|
||||
- logs/history can be seen in the Actions tab: <https://github.com/rancher/k3d/actions>
|
||||
- DroneCI
|
||||
- a set of pipelines in a single file: <https://github.com/rancher/k3d/blob/main/.drone.yml>
|
||||
- static code analysis
|
||||
- build
|
||||
- tests
|
||||
- docker builds + pushes
|
||||
- render + push docs
|
||||
- (pre-) release to GitHub
|
||||
- `push` events end up here (also does the releases, when a tag is pushed): <https://drone-publish.rancher.io/rancher/k3d>
|
||||
- `pr`s end up here: <https://drone-pr.rancher.io/rancher/k3d>
|
||||
|
||||
## Documentation
|
||||
|
||||
The website [k3d.io](https://k3d.io) containing all the documentation for k3d is built using [`mkdocs`](https://www.mkdocs.org/), configured via the [`mkdocs.yml`](https://github.com/rancher/k3d/blob/main/mkdocs.yml) config file with all the content residing in the [`docs/`](https://github.com/rancher/k3d/tree/main/docs) directory (Markdown).
|
||||
Use `mkdocs serve` in the repository root to build and serve the webpage locally.
|
||||
Some parts of the documentation are being auto-generated, like [`docs/usage/commands/`](https://github.com/rancher/k3d/tree/main/docs/usage/commands) is auto-generated using Cobra's command docs generation functionality in [`docgen/`](https://github.com/rancher/k3d/tree/main/docgen).
|
@ -1,4 +1,4 @@
|
||||
title: FAQ
|
||||
nav:
|
||||
arrange:
|
||||
- faq.md
|
||||
collapse: true
|
||||
- v1vsv3-comparison.md
|
126
docs/faq/faq.md
126
docs/faq/faq.md
@ -1,13 +1,13 @@
|
||||
# FAQ
|
||||
# FAQ / Nice to know
|
||||
|
||||
## Issues with BTRFS
|
||||
|
||||
- As [@jaredallard](https://github.com/jaredallard) [pointed out](https://github.com/rancher/k3d/pull/48), people running `k3d` on a system with **btrfs**, may need to mount `/dev/mapper` into the nodes for the setup to work.
|
||||
- This will do: `#!bash k3d cluster create CLUSTER_NAME -v /dev/mapper:/dev/mapper`
|
||||
- This will do: `k3d cluster create CLUSTER_NAME -v /dev/mapper:/dev/mapper`
|
||||
|
||||
## Issues with ZFS
|
||||
|
||||
- k3s currently has [no support for ZFS](https://github.com/rancher/k3s/issues/66) and thus, creating multi-server setups (e.g. `#!bash k3d cluster create multiserver --servers 3`) fails, because the initializing server node (server flag `--cluster-init`) errors out with the following log:
|
||||
- k3s currently has [no support for ZFS](ttps://github.com/rancher/k3s/issues/66) and thus, creating multi-server setups (e.g. `k3d cluster create multiserver --servers 3`) fails, because the initializing server node (server flag `--cluster-init`) errors out with the following log:
|
||||
|
||||
```bash
|
||||
starting kubernetes: preparing server: start cluster and https: raft_init(): io: create I/O capabilities probe file: posix_allocate: operation not supported on socket
|
||||
@ -24,13 +24,7 @@
|
||||
- Possible [fix/workaround by @zer0def](https://github.com/rancher/k3d/issues/133#issuecomment-549065666):
|
||||
- use a docker storage driver which cleans up properly (e.g. overlay2)
|
||||
- clean up or expand docker root filesystem
|
||||
- change the kubelet's eviction thresholds upon cluster creation:
|
||||
|
||||
```bash
|
||||
k3d cluster create \
|
||||
--k3s-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%@agent:*' \
|
||||
--k3s-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%@agent:*'
|
||||
```
|
||||
- change the kubelet's eviction thresholds upon cluster creation: `k3d cluster create --k3s-agent-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%' --k3s-agent-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%'`
|
||||
|
||||
## Restarting a multi-server cluster or the initializing server node fails
|
||||
|
||||
@ -44,118 +38,14 @@
|
||||
|
||||
- The Problem: Passing a feature flag to the Kubernetes API Server running inside k3s.
|
||||
- Example: you want to enable the EphemeralContainers feature flag in Kubernetes
|
||||
- Solution: `#!bash k3d cluster create --k3s-arg '--kube-apiserver-arg=feature-gates=EphemeralContainers=true@server:*'`
|
||||
- **Note**: Be aware of where the flags require dashes (`--`) and where not.
|
||||
- Solution: `#!bash k3d cluster create --k3s-server-arg '--kube-apiserver-arg=feature-gates=EphemeralContainers=true'`
|
||||
- Note: Be aware of where the flags require dashes (`--`) and where not.
|
||||
- the k3s flag (`--kube-apiserver-arg`) has the dashes
|
||||
- the kube-apiserver flag `feature-gates` doesn't have them (k3s adds them internally)
|
||||
|
||||
- Second example:
|
||||
|
||||
```bash
|
||||
k3d cluster create k3d-one \
|
||||
--k3s-arg "--cluster-cidr=10.118.0.0/17@server:*" \
|
||||
--k3s-arg "--service-cidr=10.118.128.0/17@server:*" \
|
||||
--k3s-arg "--disable=servicelb@server:*" \
|
||||
--k3s-arg "--disable=traefik@server:*" \
|
||||
--verbose
|
||||
```
|
||||
|
||||
- **Note**: There are many ways to use the `"` and `'` quotes, just be aware, that sometimes shells also try to interpret/interpolate parts of the commands
|
||||
- Second example: `#!bash k3d cluster create k3d-one --k3s-server-arg --cluster-cidr="10.118.0.0/17" --k3s-server-arg --service-cidr="10.118.128.0/17" --k3s-server-arg --disable=servicelb --k3s-server-arg --disable=traefik --verbose`
|
||||
- Note: There are many ways to use the `"` and `'` quotes, just be aware, that sometimes shells also try to interpret/interpolate parts of the commands
|
||||
|
||||
## How to access services (like a database) running on my Docker Host Machine
|
||||
|
||||
- As of version v3.1.0, we're injecting the `host.k3d.internal` entry into the k3d containers (k3s nodes) and into the CoreDNS ConfigMap, enabling you to access your host system by referring to it as `host.k3d.internal`
|
||||
|
||||
## Running behind a corporate proxy
|
||||
|
||||
Running k3d behind a corporate proxy can lead to some issues with k3d that have already been reported in more than one issue.
|
||||
Some can be fixed by passing the `HTTP_PROXY` environment variables to k3d, some have to be fixed in docker's `daemon.json` file and some are as easy as adding a volume mount.
|
||||
|
||||
## Pods fail to start: `x509: certificate signed by unknown authority`
|
||||
|
||||
- Example Error Message:
|
||||
|
||||
```bash
|
||||
Failed to create pod sandbox: rpc error: code = Unknown desc = failed to get sandbox image "docker.io/rancher/pause:3.1": failed to pull image "docker.io/rancher/pause:3.1": failed to pull and unpack image "docker.io/rancher/pause:3.1": failed to resolve reference "docker.io/rancher/pause:3.1": failed to do request: Head https://registry-1.docker.io/v2/rancher/pause/manifests/3.1: x509: certificate signed by unknown authority
|
||||
```
|
||||
|
||||
- Problem: inside the container, the certificate of the corporate proxy cannot be validated
|
||||
- Possible Solution: Mounting the CA Certificate from your host into the node containers at start time via `k3d cluster create --volume /path/to/your/certs.crt:/etc/ssl/certs/yourcert.crt`
|
||||
- Issue: [rancher/k3d#535](https://github.com/rancher/k3d/discussions/535#discussioncomment-474982)
|
||||
|
||||
## Spurious PID entries in `/proc` after deleting `k3d` cluster with shared mounts
|
||||
|
||||
- When you perform cluster create and deletion operations multiple times with **same cluster name** and **shared volume mounts**, it was observed that `grep k3d /proc/*/mountinfo` shows many spurious entries
|
||||
- Problem: Due to above, at times you'll see `no space left on device: unknown` when a pod is scheduled to the nodes
|
||||
- If you observe anything of above sort you can check for inaccessible file systems and unmount them by using below command (note: please remove `xargs umount -l` and check for the diff o/p first)
|
||||
- `diff <(df -ha | grep pods | awk '{print $NF}') <(df -h | grep pods | awk '{print $NF}') | awk '{print $2}' | xargs umount -l`
|
||||
- As per the conversation on [rancher/k3d#594](https://github.com/rancher/k3d/issues/594#issuecomment-837900646) above issue wasn't reported/known earlier and so there are high chances that it's not universal.
|
||||
|
||||
## [SOLVED] Nodes fail to start or get stuck in `NotReady` state with log `nf_conntrack_max: permission denied`
|
||||
|
||||
### Problem
|
||||
|
||||
- When: This happens when running k3d on a Linux system with a kernel version >= 5.12.2 (and others like >= 5.11.19) when creating a new cluster
|
||||
- the node(s) stop or get stuck with a log line like this: `<TIMESTAMP> F0516 05:05:31.782902 7 server.go:495] open /proc/sys/net/netfilter/nf_conntrack_max: permission denied`
|
||||
- Why: The issue was introduced by a change in the Linux kernel ([Changelog 5.12.2](https://cdn.kernel.org/pub/linux/kernel/v5.x/ChangeLog-5.12.2): [Commit](https://github.com/torvalds/linux/commit/671c54ea8c7ff47bd88444f3fffb65bf9799ce43)), that changed the netfilter_conntrack behavior in a way that `kube-proxy` is not able to set the `nf_conntrack_max` value anymore
|
||||
|
||||
### Workaround
|
||||
|
||||
- Workaround: as a workaround, we can tell `kube-proxy` to not even try to set this value:
|
||||
|
||||
```bash
|
||||
k3d cluster create \
|
||||
--k3s-arg "--kube-proxy-arg=conntrack-max-per-core=0@server:*" \
|
||||
--k3s-arg "--kube-proxy-arg=conntrack-max-per-core=0@agent:*" \
|
||||
--image rancher/k3s:v1.20.6-k3s
|
||||
```
|
||||
|
||||
### Fix
|
||||
|
||||
- **Note**: k3d v4.4.5 already uses rancher/k3s:v1.21.1-k3s1 as the new default k3s image, so no workarounds needed there!
|
||||
|
||||
This is going to be fixed "upstream" in k3s itself in [rancher/k3s#3337](https://github.com/k3s-io/k3s/pull/3337) and backported to k3s versions as low as v1.18.
|
||||
|
||||
- **The fix was released and backported in k3s, so you don't need to use the workaround when using one of the following k3s versions (or later ones)**
|
||||
- v1.18.19-k3s1 ([rancher/k3s#3344](https://github.com/k3s-io/k3s/pull/3344))
|
||||
- v1.19.11-k3s1 ([rancher/k3s#3343](https://github.com/k3s-io/k3s/pull/3343))
|
||||
- v1.20.7-k3s1 ([rancher/k3s#3342](https://github.com/k3s-io/k3s/pull/3342))
|
||||
- v1.21.1-k3s1 ([rancher/k3s#3341](https://github.com/k3s-io/k3s/pull/3341)))
|
||||
- Issue Reference: [rancher/k3s#607](https://github.com/rancher/k3d/issues/607)
|
||||
|
||||
## DockerHub Pull Rate Limit
|
||||
|
||||
### Problem
|
||||
|
||||
You're deploying something to the cluster using an image from DockerHub and the image fails to be pulled, with a `429` response code and a message saying `You have reached your pull rate limit. You may increase the limit by authenticating and upgrading`.
|
||||
|
||||
### Cause
|
||||
|
||||
This is caused by DockerHub's pull rate limit (see <https://docs.docker.com/docker-hub/download-rate-limit/>), which limits pulls from unauthenticated/anonymous users to 100 pulls per hour and for authenticated users (not paying customers) to 200 pulls per hour (as of the time of writing).
|
||||
|
||||
### Solution
|
||||
|
||||
a) use images from a private registry, e.g. configured as a pull-through cache for DockerHub
|
||||
b) use a different public registry without such limitations, if the same image is stored there
|
||||
c) authenticate containerd inside k3s/k3d to use your DockerHub user
|
||||
|
||||
#### (c) Authenticate Containerd against DockerHub
|
||||
|
||||
1. Create a registry configuration file for containerd:
|
||||
|
||||
```yaml
|
||||
# saved as e.g. $HOME/registries.yaml
|
||||
configs:
|
||||
"docker.io":
|
||||
auth:
|
||||
username: "$USERNAME"
|
||||
password: "$PASSWORD"
|
||||
```
|
||||
|
||||
2. Create a k3d cluster using that config:
|
||||
|
||||
```bash
|
||||
k3d cluster create --registry-config $HOME/registries.yaml
|
||||
```
|
||||
|
||||
3. Profit. That's it. In the test for this, we pulled the same image 120 times in a row (confirmed, that pull numbers went up), without being rate limited (as a non-paying, normal user)
|
||||
|
63
docs/faq/v1vsv3-comparison.md
Normal file
63
docs/faq/v1vsv3-comparison.md
Normal file
@ -0,0 +1,63 @@
|
||||
# Feature Comparison: v1 vs. v3
|
||||
|
||||
## v1.x feature -> implementation in v3
|
||||
|
||||
```text
|
||||
- k3d
|
||||
- check-tools -> won't do
|
||||
- shell -> planned: `k3d shell CLUSTER`
|
||||
- --name -> planned: drop (now as arg)
|
||||
- --command -> planned: keep
|
||||
- --shell -> planned: keep (or second arg)
|
||||
- auto, bash, zsh
|
||||
- create -> `k3d cluster create CLUSTERNAME`
|
||||
- --name -> dropped, implemented via arg
|
||||
- --volume -> implemented
|
||||
- --port -> implemented
|
||||
- --port-auto-offset -> TBD
|
||||
- --api-port -> implemented
|
||||
- --wait -> implemented
|
||||
- --image -> implemented
|
||||
- --server-arg -> implemented as `--k3s-server-arg`
|
||||
- --agent-arg -> implemented as `--k3s-agent-arg`
|
||||
- --env -> planned
|
||||
- --label -> planned
|
||||
- --workers -> implemented
|
||||
- --auto-restart -> dropped (docker's `unless-stopped` is set by default)
|
||||
- --enable-registry -> planned (possible consolidation into less registry-related commands?)
|
||||
- --registry-name -> TBD
|
||||
- --registry-port -> TBD
|
||||
- --registry-volume -> TBD
|
||||
- --registries-file -> TBD
|
||||
- --enable-registry-cache -> TBD
|
||||
- (add-node) -> `k3d node create NODENAME`
|
||||
- --role -> implemented
|
||||
- --name -> dropped, implemented as arg
|
||||
- --count -> implemented as `--replicas`
|
||||
- --image -> implemented
|
||||
- --arg -> planned
|
||||
- --env -> planned
|
||||
- --volume -> planned
|
||||
- --k3s -> TBD
|
||||
- --k3s-secret -> TBD
|
||||
- --k3s-token -> TBD
|
||||
- delete -> `k3d cluster delete CLUSTERNAME`
|
||||
- --name -> dropped, implemented as arg
|
||||
- --all -> implemented
|
||||
- --prune -> TBD
|
||||
- --keep-registry-volume -> TBD
|
||||
- stop -> `k3d cluster stop CLUSTERNAME`
|
||||
- --name -> dropped, implemented as arg
|
||||
- --all -> implemented
|
||||
- start -> `k3d cluster start CLUSTERNAME`
|
||||
- --name -> dropped, implemented as arg
|
||||
- --all -> implemented
|
||||
- list -> dropped, implemented as `k3d get clusters`
|
||||
- get-kubeconfig -> `k3d kubeconfig get|merge CLUSTERNAME`
|
||||
- --name -> dropped, implemented as arg
|
||||
- --all -> implemented
|
||||
- --overwrite -> implemented
|
||||
- import-images -> `k3d image import [--cluster CLUSTERNAME] [--keep] IMAGES`
|
||||
- --name -> implemented as `--cluster`
|
||||
- --no-remove -> implemented as `--keep-tarball`
|
||||
```
|
@ -2,33 +2,23 @@
|
||||
|
||||

|
||||
|
||||
## What is k3d?
|
||||
**This page is targeting k3d v3.0.0 and newer!**
|
||||
|
||||
k3d is a lightweight wrapper to run [k3s](https://github.com/rancher/k3s) (Rancher Lab's minimal Kubernetes distribution) in docker.
|
||||
|
||||
k3d makes it very easy to create single- and multi-node [k3s](https://github.com/rancher/k3s) clusters in docker, e.g. for local development on Kubernetes.
|
||||
|
||||
**Note:** k3d is a **community-driven project**, that is supported by Rancher (SUSE) and it's not an official Rancher (SUSE) project.
|
||||
|
||||
??? Tip "View a quick demo"
|
||||
<asciinema-player src="/static/asciicast/20210917_k3d_v5.0.0_01.cast" cols=200 rows=32></asciinema-player>
|
||||
<asciinema-player src="/static/asciicast/20200715_k3d.01.cast" cols=200 rows=32></asciinema-player>
|
||||
|
||||
## Learning
|
||||
|
||||
!!! Tip "k3d demo repository: [iwilltry42/k3d-demo](https://github.com/iwilltry42/k3d-demo)"
|
||||
Featured use-cases include:
|
||||
|
||||
- **hot-reloading** of code when developing on k3d (Python Flask App)
|
||||
- build-deploy-test cycle using **Tilt**
|
||||
- full cluster lifecycle for simple and **multi-server** clusters
|
||||
- Proof of Concept of using k3d as a service in **Drone CI**
|
||||
|
||||
- [Rancher Meetup - May 2020 - Simplifying Your Cloud-Native Development Workflow With K3s, K3c and K3d (YouTube)](https://www.youtube.com/watch?v=hMr3prm9gDM)
|
||||
- k3d demo repository: [iwilltry42/k3d-demo](https://github.com/iwilltry42/k3d-demo)
|
||||
|
||||
## Requirements
|
||||
|
||||
- [**docker**](https://docs.docker.com/install/) to be able to use k3d at all
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) to interact with the Kubernetes cluster
|
||||
- [docker](https://docs.docker.com/install/)
|
||||
|
||||
## Releases
|
||||
|
||||
@ -43,48 +33,22 @@ k3d makes it very easy to create single- and multi-node [k3s](https://github.com
|
||||
|
||||
You have several options there:
|
||||
|
||||
### [:fontawesome-regular-file-code: Install Script](https://raw.githubusercontent.com/rancher/k3d/main/install.sh)
|
||||
- use the install script to grab the latest release:
|
||||
- wget: `#!bash wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash`
|
||||
- curl: `#!bash curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash`
|
||||
- use the install script to grab a specific release (via `TAG` environment variable):
|
||||
- wget: `#!bash wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v3.0.0 bash`
|
||||
- curl: `#!bash curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v3.0.0 bash`
|
||||
|
||||
#### Install current latest release
|
||||
|
||||
- wget: `#!bash wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash`
|
||||
- curl: `#!bash curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash`
|
||||
|
||||
#### Install specific release
|
||||
|
||||
Use the install script to grab a specific release (via `TAG` environment variable):
|
||||
|
||||
- wget: `#!bash wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v5.0.0 bash`
|
||||
- curl: `#!bash curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v5.0.0 bash`
|
||||
|
||||
### Other Installers
|
||||
|
||||
??? Tip "Other Installation Methods"
|
||||
|
||||
- [:fontawesome-solid-beer: Homebrew (MacOS/Linux)](https://brew.sh): `#!bash brew install k3d`
|
||||
|
||||
*Note*: The formula can be found in [homebrew/homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/k3d.rb) and is mirrored to [homebrew/linuxbrew-core](https://github.com/Homebrew/linuxbrew-core/blob/master/Formula/k3d.rb)
|
||||
|
||||
- [:material-arch: AUR (Arch Linux User Repository)](https://aur.archlinux.org/): `#!bash yay -S rancher-k3d-bin`
|
||||
|
||||
Package [rancher-k3d-bin](https://aur.archlinux.org/packages/rancher-k3d-bin/)
|
||||
|
||||
- [:material-github: Download GitHub Release](https://github.com/rancher/k3d/releases)
|
||||
|
||||
Grab a release binary from the [release tab](https://github.com/rancher/k3d/releases) and install it yourself
|
||||
|
||||
- [:material-microsoft-windows: Chocolatey (Windows)](https://chocolatey.org/): `choco install k3d`
|
||||
|
||||
*Note*: package source can be found in [erwinkersten/chocolatey-packages](https://github.com/erwinkersten/chocolatey-packages/tree/master/automatic/k3d)
|
||||
|
||||
- [arkade](https://github.com/alexellis/arkade): `arkade get k3d`
|
||||
|
||||
- [asdf](https://asdf-vm.com): `asdf plugin-add k3d && asdf install k3d latest`
|
||||
|
||||
*Note*: `asdf plugin-add k3d`, then `asdf install k3d <tag>` with `<tag> = latest` or `5.x.x` for a specific version (maintained by [spencergilbert/asdf-k3d](https://github.com/spencergilbert/asdf-k3d))
|
||||
|
||||
- Others
|
||||
- install via go: `#!bash go install github.com/rancher/k3d@latest` (**Note**: this will give you unreleased/bleeding-edge changes)
|
||||
- use [Homebrew](https://brew.sh): `#!bash brew install k3d` (Homebrew is available for MacOS and Linux)
|
||||
- Formula can be found in [homebrew/homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/k3d.rb) and is mirrored to [homebrew/linuxbrew-core](https://github.com/Homebrew/linuxbrew-core/blob/master/Formula/k3d.rb)
|
||||
- install via [AUR](https://aur.archlinux.org/) package [rancher-k3d-bin](https://aur.archlinux.org/packages/rancher-k3d-bin/): `yay -S rancher-k3d-bin`
|
||||
- grab a release from the [release tab](https://github.com/rancher/k3d/releases) and install it yourself.
|
||||
- install via go: `#!bash go install github.com/rancher/k3d` (**Note**: this will give you unreleased/bleeding-edge changes)
|
||||
- use [arkade](https://github.com/alexellis/arkade): `arkade get k3d`
|
||||
- use [asdf](https://asdf-vm.com): `asdf plugin-add k3d`, then `asdf install k3d <tag>` with `<tag> = latest` or `3.x.x` for a specific version (maintained by [spencergilbert/asdf-k3d](https://github.com/spencergilbert/asdf-k3d))
|
||||
- use [Chocolatey](https://chocolatey.org/): `choco install k3d` (Chocolatey package manager is available for Windows)
|
||||
- package source can be found in [erwinkersten/chocolatey-packages](https://github.com/erwinkersten/chocolatey-packages/tree/master/automatic/k3d)
|
||||
|
||||
## Quick Start
|
||||
|
||||
@ -94,23 +58,18 @@ Create a cluster named `mycluster` with just a single server node:
|
||||
k3d cluster create mycluster
|
||||
```
|
||||
|
||||
Get the new cluster's connection details merged into your default kubeconfig (usually specified using the `KUBECONFIG` environment variable or the default path `#!bash $HOME/.kube/config`) and directly switch to the new context:
|
||||
|
||||
```bash
|
||||
k3d kubeconfig merge mycluster --switch-context
|
||||
```
|
||||
|
||||
Use the new cluster with [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/), e.g.:
|
||||
|
||||
```bash
|
||||
kubectl get nodes
|
||||
```
|
||||
|
||||
??? Note "Getting the cluster's kubeconfig (included in `k3d cluster create`)"
|
||||
Get the new cluster's connection details merged into your default kubeconfig (usually specified using the `KUBECONFIG` environment variable or the default path `#!bash $HOME/.kube/config`) and directly switch to the new context:
|
||||
|
||||
```bash
|
||||
k3d kubeconfig merge mycluster --kubeconfig-switch-context
|
||||
```
|
||||
|
||||
## Related Projects
|
||||
|
||||
- [vscode-k3d](https://github.com/inercia/vscode-k3d/): VSCode Extension to handle k3d clusters from within VSCode
|
||||
- [k3x](https://github.com/inercia/k3x): a graphics interface (for Linux) to k3d.
|
||||
- [AbsaOSS/k3d-action](https://github.com/AbsaOSS/k3d-action): fully customizable GitHub Action to run lightweight Kubernetes clusters.
|
||||
- [AutoK3s](https://github.com/cnrancher/autok3s): a lightweight tool to help run K3s everywhere including k3d provider.
|
||||
- [nolar/setup-k3d-k3s](https://github.com/nolar/setup-k3d-k3s): setup K3d/K3s for GitHub Actions.
|
||||
|
4
docs/internals/.pages
Normal file
4
docs/internals/.pages
Normal file
@ -0,0 +1,4 @@
|
||||
title: Internals
|
||||
arrange:
|
||||
- defaults.md
|
||||
- networking.md
|
12
docs/internals/defaults.md
Normal file
12
docs/internals/defaults.md
Normal file
@ -0,0 +1,12 @@
|
||||
# Defaults
|
||||
|
||||
- multiple server nodes
|
||||
- by default, when `--server` > 1 and no `--datastore-x` option is set, the first server node (server-0) will be the initializing server node
|
||||
- the initializing server node will have the `--cluster-init` flag appended
|
||||
- all other server nodes will refer to the initializing server node via `--server https://<init-node>:6443`
|
||||
- API-Ports
|
||||
- by default, we don't expose any API-Port (no host port mapping)
|
||||
- kubeconfig
|
||||
- if `--[update|merge]-default-kubeconfig` is set, we use the default loading rules to get the default kubeconfig:
|
||||
- First: kubeconfig specified via the KUBECONFIG environment variable (error out if multiple are specified)
|
||||
- Second: default kubeconfig in home directory (e.g. `$HOME/.kube/config`)
|
28
docs/internals/networking.md
Normal file
28
docs/internals/networking.md
Normal file
@ -0,0 +1,28 @@
|
||||
# Networking
|
||||
|
||||
- Related issues:
|
||||
- [rancher/k3d #220](https://github.com/rancher/k3d/issues/220)
|
||||
|
||||
## Introduction
|
||||
|
||||
By default, k3d creates a new (docker) network for every new cluster.
|
||||
Using the `--network STRING` flag upon creation to connect to an existing network.
|
||||
Existing networks won't be managed by k3d together with the cluster lifecycle.
|
||||
|
||||
## Connecting to docker "internal"/pre-defined networks
|
||||
|
||||
### `host` network
|
||||
|
||||
When using the `--network` flag to connect to the host network (i.e. `k3d cluster create --network host`),
|
||||
you won't be able to create more than **one server node**.
|
||||
An edge case would be one server node (with agent disabled) and one agent node.
|
||||
|
||||
### `bridge` network
|
||||
|
||||
By default, every network that k3d creates is working in `bridge` mode.
|
||||
But when you try to use `--network bridge` to connect to docker's internal `bridge` network, you may
|
||||
run into issues with grabbing certificates from the API-Server. Single-Node clusters should work though.
|
||||
|
||||
### `none` "network"
|
||||
|
||||
Well.. this doesn't really make sense for k3d anyway ¯\_(ツ)_/¯
|
@ -1,8 +1,5 @@
|
||||
mkdocs==1.2.2
|
||||
mkdocs-material==7.2.6
|
||||
pymdown-extensions==8.2
|
||||
mkdocs-git-revision-date-localized-plugin==0.9.3
|
||||
mkdocs-awesome-pages-plugin==2.5.0
|
||||
mdx_truly_sane_lists==1.2 # https://github.com/radude/mdx_truly_sane_lists
|
||||
mkdocs-include-markdown-plugin==3.2.2 # https://github.com/mondeja/mkdocs-include-markdown-plugin
|
||||
mike==1.1.0 # versioned docs: https://github.com/jimporter/mike
|
||||
mkdocs
|
||||
mkdocs-material
|
||||
pymdown-extensions
|
||||
mkdocs-git-revision-date-localized-plugin
|
||||
mkdocs-awesome-pages-plugin
|
162
docs/static/asciicast/20210917_k3d_v5.0.0_01.cast
vendored
162
docs/static/asciicast/20210917_k3d_v5.0.0_01.cast
vendored
@ -1,162 +0,0 @@
|
||||
{"version": 2, "width": 213, "height": 45, "timestamp": 1631908903, "env": {"SHELL": "bash", "TERM": "xterm-256color"}}
|
||||
[0.018381, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
|
||||
[1.636481, "o", "k"]
|
||||
[1.702291, "o", "3"]
|
||||
[1.835268, "o", "d"]
|
||||
[2.024007, "o", " "]
|
||||
[2.111734, "o", "v"]
|
||||
[2.210891, "o", "e"]
|
||||
[2.343441, "o", "r"]
|
||||
[2.516933, "o", "s"]
|
||||
[2.583471, "o", "i"]
|
||||
[2.773563, "o", "o"]
|
||||
[2.927568, "o", "n"]
|
||||
[3.159219, "o", "\r\n\u001b[?2004l\r"]
|
||||
[3.179508, "o", "k3d version v5.0.0\r\nk3s version v1.21.4-k3s1 (default)\r\n"]
|
||||
[3.180754, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
|
||||
[4.57973, "o", "k"]
|
||||
[4.656235, "o", "3"]
|
||||
[4.763252, "o", "d"]
|
||||
[4.865396, "o", " "]
|
||||
[4.986278, "o", "c"]
|
||||
[5.051494, "o", "l"]
|
||||
[5.238737, "o", "u"]
|
||||
[5.292747, "o", "s"]
|
||||
[5.381595, "o", "t"]
|
||||
[5.503508, "o", "e"]
|
||||
[5.578881, "o", "r"]
|
||||
[5.666704, "o", " "]
|
||||
[5.766742, "o", "c"]
|
||||
[5.962787, "o", "r"]
|
||||
[6.029469, "o", "e"]
|
||||
[6.061464, "o", "a"]
|
||||
[6.184275, "o", "t"]
|
||||
[6.281805, "o", "e"]
|
||||
[6.445508, "o", " "]
|
||||
[6.666863, "o", "-"]
|
||||
[7.20248, "o", "-"]
|
||||
[7.334019, "o", "a"]
|
||||
[7.490134, "o", "g"]
|
||||
[7.566087, "o", "e"]
|
||||
[7.631634, "o", "n"]
|
||||
[7.729597, "o", "t"]
|
||||
[7.897099, "o", "s"]
|
||||
[8.049496, "o", " "]
|
||||
[8.280178, "o", "3"]
|
||||
[8.499599, "o", " "]
|
||||
[8.631147, "o", "d"]
|
||||
[8.707104, "o", "e"]
|
||||
[8.773508, "o", "m"]
|
||||
[8.91407, "o", "o"]
|
||||
[9.113612, "o", "\r\n\u001b[?2004l\r"]
|
||||
[9.132118, "o", "\u001b[36mINFO\u001b[0m[0000] Prep: Network \r\n"]
|
||||
[9.183203, "o", "\u001b[36mINFO\u001b[0m[0000] Created network 'k3d-demo' \r\n"]
|
||||
[9.187229, "o", "\u001b[36mINFO\u001b[0m[0000] Created volume 'k3d-demo-images' \r\n"]
|
||||
[10.187972, "o", "\u001b[36mINFO\u001b[0m[0001] Creating node 'k3d-demo-server-0' \r\n"]
|
||||
[10.281058, "o", "\u001b[36mINFO\u001b[0m[0001] Creating node 'k3d-demo-agent-0' \r\n"]
|
||||
[10.368708, "o", "\u001b[36mINFO\u001b[0m[0001] Creating node 'k3d-demo-agent-1' \r\n"]
|
||||
[10.455282, "o", "\u001b[36mINFO\u001b[0m[0001] Creating node 'k3d-demo-agent-2' \r\n"]
|
||||
[10.536337, "o", "\u001b[36mINFO\u001b[0m[0001] Creating LoadBalancer 'k3d-demo-serverlb' \r\n"]
|
||||
[10.609539, "o", "\u001b[36mINFO\u001b[0m[0001] Using the k3d-tools node to gather environment information \r\n"]
|
||||
[10.628592, "o", "\u001b[36mINFO\u001b[0m[0001] Starting new tools node... \r\n"]
|
||||
[10.702678, "o", "\u001b[36mINFO\u001b[0m[0001] Starting Node 'k3d-demo-tools' \r\n"]
|
||||
[11.394216, "o", "\u001b[36mINFO\u001b[0m[0002] Deleted k3d-demo-tools \r\n"]
|
||||
[11.394427, "o", "\u001b[36mINFO\u001b[0m[0002] Starting cluster 'demo' \r\n\u001b[36mINFO\u001b[0m[0002] Starting servers... \r\n"]
|
||||
[11.404635, "o", "\u001b[36mINFO\u001b[0m[0002] Starting Node 'k3d-demo-server-0' \r\n"]
|
||||
[16.378372, "o", "\u001b[36mINFO\u001b[0m[0007] Starting agents... \r\n"]
|
||||
[16.388922, "o", "\u001b[36mINFO\u001b[0m[0007] Starting Node 'k3d-demo-agent-0' \r\n"]
|
||||
[16.389848, "o", "\u001b[36mINFO\u001b[0m[0007] Starting Node 'k3d-demo-agent-1' \r\n"]
|
||||
[16.397254, "o", "\u001b[36mINFO\u001b[0m[0007] Starting Node 'k3d-demo-agent-2' \r\n"]
|
||||
[31.590126, "o", "\u001b[36mINFO\u001b[0m[0022] Starting helpers... \r\n"]
|
||||
[31.637947, "o", "\u001b[36mINFO\u001b[0m[0022] Starting Node 'k3d-demo-serverlb' \r\n"]
|
||||
[38.185432, "o", "\u001b[36mINFO\u001b[0m[0029] Trying to get IP of the docker host and inject it into the cluster as 'host.k3d.internal' for easy access \r\n"]
|
||||
[50.256861, "o", "\u001b[36mINFO\u001b[0m[0041] Cluster 'demo' created successfully! \r\n\u001b[36mINFO\u001b[0m[0041] --kubeconfig-update-default=false --> sets --kubeconfig-switch-context=false \r\n"]
|
||||
[50.295453, "o", "\u001b[36mINFO\u001b[0m[0041] You can now use it like this: \r\nkubectl config use-context k3d-demo\r\nkubectl cluster-info\r\n"]
|
||||
[50.299281, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
|
||||
[52.777117, "o", "k"]
|
||||
[52.873341, "o", "3"]
|
||||
[53.006105, "o", "d"]
|
||||
[53.147707, "o", " "]
|
||||
[53.245736, "o", "c"]
|
||||
[53.343772, "o", "l"]
|
||||
[53.551038, "o", "u"]
|
||||
[53.617941, "o", "s"]
|
||||
[53.724853, "o", "t"]
|
||||
[53.878933, "o", "e"]
|
||||
[53.956281, "o", "r"]
|
||||
[54.076303, "o", " "]
|
||||
[54.21845, "o", "l"]
|
||||
[54.339561, "o", "s"]
|
||||
[54.447647, "o", "\r\n\u001b[?2004l\r"]
|
||||
[54.47118, "o", "NAME SERVERS AGENTS LOADBALANCER\r\ndemo 1/1 3/3 true\r\n"]
|
||||
[54.472506, "o", "\u001b[?2004h"]
|
||||
[54.472562, "o", "\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
|
||||
[54.838629, "o", "k"]
|
||||
[54.918551, "o", "3"]
|
||||
[55.015846, "o", "d"]
|
||||
[55.115834, "o", " "]
|
||||
[55.290514, "o", "n"]
|
||||
[55.378089, "o", "o"]
|
||||
[55.454292, "o", "d"]
|
||||
[55.508669, "o", "e"]
|
||||
[55.869687, "o", " "]
|
||||
[56.05605, "o", "l"]
|
||||
[56.176004, "o", "s"]
|
||||
[56.31685, "o", "\r\n\u001b[?2004l\r"]
|
||||
[56.341161, "o", "NAME ROLE CLUSTER STATUS\r\nk3d-demo-agent-0 agent demo running\r\nk3d-demo-agent-1 agent demo running\r\nk3d-demo-agent-2 agent demo running\r\nk3d-demo-server-0 server demo running\r\nk3d-demo-serverlb loadbalancer demo running\r\n"]
|
||||
[56.34231, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
|
||||
[57.733293, "o", "k"]
|
||||
[57.932149, "o", "u"]
|
||||
[58.059135, "o", "b"]
|
||||
[58.137901, "o", "e"]
|
||||
[58.23908, "o", "c"]
|
||||
[58.418996, "o", "t"]
|
||||
[58.496899, "o", "l"]
|
||||
[58.687091, "o", " "]
|
||||
[58.740349, "o", "g"]
|
||||
[58.832322, "o", "e"]
|
||||
[58.955499, "o", "t"]
|
||||
[59.067944, "o", " "]
|
||||
[59.246223, "o", "n"]
|
||||
[59.344781, "o", "o"]
|
||||
[59.426918, "o", "d"]
|
||||
[59.493282, "o", "e"]
|
||||
[59.672248, "o", "s"]
|
||||
[59.772331, "o", "\r\n\u001b[?2004l\r"]
|
||||
[60.41166, "o", "NAME STATUS ROLES AGE VERSION\r\nk3d-demo-agent-2 Ready <none> 29s v1.21.4+k3s1\r\nk3d-demo-server-0 Ready control-plane,master 41s v1.21.4+k3s1\r\nk3d-demo-agent-0 Ready <none> 31s v1.21.4+k3s1\r\nk3d-demo-agent-1 Ready <none> 31s v1.21.4+k3s1\r\n"]
|
||||
[60.414302, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
|
||||
[61.301105, "o", "k"]
|
||||
[61.534792, "o", "u"]
|
||||
[61.723192, "o", "b"]
|
||||
[61.800647, "o", "e"]
|
||||
[61.912191, "o", "c"]
|
||||
[62.111433, "o", "t"]
|
||||
[62.220654, "o", "l"]
|
||||
[62.400417, "o", " "]
|
||||
[62.434071, "o", "g"]
|
||||
[62.523052, "o", "e"]
|
||||
[62.634216, "o", "t"]
|
||||
[62.700412, "o", " "]
|
||||
[62.923073, "o", "p"]
|
||||
[63.120958, "o", "o"]
|
||||
[63.231192, "o", "d"]
|
||||
[63.287011, "o", "s"]
|
||||
[63.497854, "o", " "]
|
||||
[63.642017, "o", "-"]
|
||||
[63.896056, "o", "A"]
|
||||
[64.129633, "o", "\r\n\u001b[?2004l\r"]
|
||||
[64.180813, "o", "NAMESPACE NAME READY STATUS RESTARTS AGE\r\nkube-system coredns-7448499f4d-rrmh5 1/1 Running 0 34s\r\nkube-system metrics-server-86cbb8457f-6hkns 1/1 Running 0 34s\r\nkube-system local-path-provisioner-5ff76fc89d-ltzd4 1/1 Running 0 34s\r\nkube-system helm-install-traefik-crd-st9fm 0/1 Completed 0 34s\r\nkube-system traefik-97b44b794-lgljm 0/1 ContainerCreating 0 11s\r\nkube-system helm-install-traefik-6t7fr 0/1 Completed 1 "]
|
||||
[64.181, "o", "34s\r\nkube-system svclb-traefik-wztvf 2/2 Running 0 11s\r\nkube-system svclb-traefik-ksk54 2/2 Running 0 11s\r\nkube-system svclb-traefik-s286b 2/2 Running 0 11s\r\nkube-system svclb-traefik-ksbmz 2/2 Running 0 11s\r\n"]
|
||||
[64.182931, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
|
||||
[66.050907, "o", "#"]
|
||||
[66.160953, "o", " "]
|
||||
[66.559434, "o", "P"]
|
||||
[66.768444, "o", "r"]
|
||||
[66.844975, "o", "o"]
|
||||
[67.022583, "o", "f"]
|
||||
[67.098851, "o", "i"]
|
||||
[67.286285, "o", "t"]
|
||||
[67.921864, "o", "."]
|
||||
[69.59588, "o", "\r\n\u001b[?2004l\r"]
|
||||
[69.596126, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
|
||||
[70.123764, "o", "\u001b[?2004l\r\r\nexit\r\n"]
|
21
docs/static/css/extra.css
vendored
21
docs/static/css/extra.css
vendored
@ -1,5 +1,6 @@
|
||||
.md-header__button.md-logo img, .md-header__button.md-logo svg {
|
||||
.md-header-nav__button.md-logo img, .md-header-nav__button.md-logo svg {
|
||||
width: 3rem;
|
||||
height: 3rem;
|
||||
}
|
||||
|
||||
.md-header-nav__button.md-logo {
|
||||
@ -23,28 +24,10 @@
|
||||
position: relative;
|
||||
}
|
||||
|
||||
/* This is equal to light mode */
|
||||
[data-md-color-primary=black] .md-tabs {
|
||||
|
||||
/* Set color of the tab bar */
|
||||
background-color: #0DCEFF;
|
||||
}
|
||||
|
||||
/* Dark Mode */
|
||||
[data-md-color-scheme="slate"] .md-header {
|
||||
/* keep black backgroud of title bar (header) */
|
||||
background-color: black;
|
||||
}
|
||||
|
||||
/* Tab Bar */
|
||||
.md-tabs {
|
||||
color: black;
|
||||
}
|
||||
|
||||
.md-tabs__item {
|
||||
font-weight: bolder;
|
||||
}
|
||||
|
||||
.md-tabs__link--active {
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
@ -1,9 +1,6 @@
|
||||
title: Guides
|
||||
nav:
|
||||
- configfile.md
|
||||
title: Usage
|
||||
arrange:
|
||||
- commands.md
|
||||
- kubeconfig.md
|
||||
- multiserver.md
|
||||
- registries.md
|
||||
- exposing_services.md
|
||||
- advanced
|
||||
- commands
|
||||
- guides
|
@ -1,4 +0,0 @@
|
||||
title: Advanced Guides
|
||||
nav:
|
||||
- calico.md
|
||||
- cuda.md
|
@ -1,116 +0,0 @@
|
||||
# Running CUDA workloads
|
||||
|
||||
If you want to run CUDA workloads on the K3s container you need to customize the container.
|
||||
CUDA workloads require the NVIDIA Container Runtime, so containerd needs to be configured to use this runtime.
|
||||
The K3s container itself also needs to run with this runtime.
|
||||
If you are using Docker you can install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html).
|
||||
|
||||
## Building a customized K3s image
|
||||
|
||||
To get the NVIDIA container runtime in the K3s image you need to build your own K3s image.
|
||||
The native K3s image is based on Alpine but the NVIDIA container runtime is not supported on Alpine yet.
|
||||
To get around this we need to build the image with a supported base image.
|
||||
|
||||
### Dockerfile
|
||||
|
||||
[Dockerfile](cuda/Dockerfile):
|
||||
|
||||
```Dockerfile
|
||||
{% include "cuda/Dockerfile" %}
|
||||
```
|
||||
|
||||
This Dockerfile is based on the [K3s Dockerfile](https://github.com/rancher/k3s/blob/master/package/Dockerfile)
|
||||
The following changes are applied:
|
||||
|
||||
1. Change the base images to nvidia/cuda:11.2.0-base-ubuntu18.04 so the NVIDIA Container Runtime can be installed. The version of `cuda:xx.x.x` must match the one you're planning to use.
|
||||
2. Add a custom containerd `config.toml` template to add the NVIDIA Container Runtime. This replaces the default `runc` runtime
|
||||
3. Add a manifest for the NVIDIA driver plugin for Kubernetes
|
||||
|
||||
### Configure containerd
|
||||
|
||||
We need to configure containerd to use the NVIDIA Container Runtime. We need to customize the config.toml that is used at startup. K3s provides a way to do this using a [config.toml.tmpl](cuda/config.toml.tmpl) file. More information can be found on the [K3s site](https://rancher.com/docs/k3s/latest/en/advanced/#configuring-containerd).
|
||||
|
||||
```go
|
||||
{% include "cuda/config.toml.tmpl" %}
|
||||
```
|
||||
|
||||
### The NVIDIA device plugin
|
||||
|
||||
To enable NVIDIA GPU support on Kubernetes you also need to install the [NVIDIA device plugin](https://github.com/NVIDIA/k8s-device-plugin). The device plugin is a deamonset and allows you to automatically:
|
||||
|
||||
* Expose the number of GPUs on each nodes of your cluster
|
||||
* Keep track of the health of your GPUs
|
||||
* Run GPU enabled containers in your Kubernetes cluster.
|
||||
|
||||
```yaml
|
||||
{% include "cuda/device-plugin-daemonset.yaml" %}
|
||||
```
|
||||
|
||||
### Build the K3s image
|
||||
|
||||
To build the custom image we need to build K3s because we need the generated output.
|
||||
|
||||
Put the following files in a directory:
|
||||
|
||||
* [Dockerfile](cuda/Dockerfile)
|
||||
* [config.toml.tmpl](cuda/config.toml.tmpl)
|
||||
* [device-plugin-daemonset.yaml](cuda/device-plugin-daemonset.yaml)
|
||||
* [build.sh](cuda/build.sh)
|
||||
* [cuda-vector-add.yaml](cuda/cuda-vector-add.yaml)
|
||||
|
||||
The `build.sh` script is configured using exports & defaults to `v1.21.2+k3s1`. Please set at least the `IMAGE_REGISTRY` variable! The script performs the following steps builds the custom K3s image including the nvidia drivers.
|
||||
|
||||
[build.sh](cuda/build.sh):
|
||||
|
||||
```bash
|
||||
{% include "cuda/build.sh" %}
|
||||
```
|
||||
|
||||
## Run and test the custom image with k3d
|
||||
|
||||
You can use the image with k3d:
|
||||
|
||||
```bash
|
||||
k3d cluster create gputest --image=$IMAGE --gpus=1
|
||||
```
|
||||
|
||||
Deploy a [test pod](cuda/cuda-vector-add.yaml):
|
||||
|
||||
```bash
|
||||
kubectl apply -f cuda-vector-add.yaml
|
||||
kubectl logs cuda-vector-add
|
||||
```
|
||||
|
||||
This should output something like the following:
|
||||
|
||||
```bash
|
||||
$ kubectl logs cuda-vector-add
|
||||
|
||||
[Vector addition of 50000 elements]
|
||||
Copy input data from the host memory to the CUDA device
|
||||
CUDA kernel launch with 196 blocks of 256 threads
|
||||
Copy output data from the CUDA device to the host memory
|
||||
Test PASSED
|
||||
Done
|
||||
```
|
||||
|
||||
If the `cuda-vector-add` pod is stuck in `Pending` state, probably the device-driver daemonset didn't get deployed correctly from the auto-deploy manifests. In that case, you can apply it manually via `#!bash kubectl apply -f device-plugin-daemonset.yaml`.
|
||||
|
||||
## Known issues
|
||||
|
||||
* This approach does not work on WSL2 yet. The NVIDIA driver plugin and container runtime rely on the NVIDIA Management Library (NVML) which is not yet supported. See the [CUDA on WSL User Guide](https://docs.nvidia.com/cuda/wsl-user-guide/index.html#known-limitations).
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
Most of the information in this article was obtained from various sources:
|
||||
|
||||
* [Add NVIDIA GPU support to k3s with containerd](https://dev.to/mweibel/add-nvidia-gpu-support-to-k3s-with-containerd-4j17)
|
||||
* [microk8s](https://github.com/ubuntu/microk8s)
|
||||
* [K3s](https://github.com/rancher/k3s)
|
||||
* [k3s-gpu](https://gitlab.com/vainkop1/k3s-gpu)
|
||||
|
||||
## Authors
|
||||
|
||||
* [@markrexwinkel](https://github.com/markrexwinkel)
|
||||
* [@vainkop](https://github.com/vainkop)
|
||||
* [@iwilltry42](https://github.com/iwilltry42)
|
@ -1,47 +0,0 @@
|
||||
ARG K3S_TAG="v1.21.2-k3s1"
|
||||
FROM rancher/k3s:$K3S_TAG as k3s
|
||||
|
||||
FROM nvidia/cuda:11.2.0-base-ubuntu18.04
|
||||
|
||||
ARG NVIDIA_CONTAINER_RUNTIME_VERSION
|
||||
ENV NVIDIA_CONTAINER_RUNTIME_VERSION=$NVIDIA_CONTAINER_RUNTIME_VERSION
|
||||
|
||||
RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get -y install gnupg2 curl
|
||||
|
||||
# Install NVIDIA Container Runtime
|
||||
RUN curl -s -L https://nvidia.github.io/nvidia-container-runtime/gpgkey | apt-key add -
|
||||
|
||||
RUN curl -s -L https://nvidia.github.io/nvidia-container-runtime/ubuntu18.04/nvidia-container-runtime.list | tee /etc/apt/sources.list.d/nvidia-container-runtime.list
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get -y install nvidia-container-runtime=${NVIDIA_CONTAINER_RUNTIME_VERSION}
|
||||
|
||||
COPY --from=k3s / /
|
||||
|
||||
RUN mkdir -p /etc && \
|
||||
echo 'hosts: files dns' > /etc/nsswitch.conf
|
||||
|
||||
RUN chmod 1777 /tmp
|
||||
|
||||
# Provide custom containerd configuration to configure the nvidia-container-runtime
|
||||
RUN mkdir -p /var/lib/rancher/k3s/agent/etc/containerd/
|
||||
|
||||
COPY config.toml.tmpl /var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl
|
||||
|
||||
# Deploy the nvidia driver plugin on startup
|
||||
RUN mkdir -p /var/lib/rancher/k3s/server/manifests
|
||||
|
||||
COPY device-plugin-daemonset.yaml /var/lib/rancher/k3s/server/manifests/nvidia-device-plugin-daemonset.yaml
|
||||
|
||||
VOLUME /var/lib/kubelet
|
||||
VOLUME /var/lib/rancher/k3s
|
||||
VOLUME /var/lib/cni
|
||||
VOLUME /var/log
|
||||
|
||||
ENV PATH="$PATH:/bin/aux"
|
||||
|
||||
ENTRYPOINT ["/bin/k3s"]
|
||||
CMD ["agent"]
|
@ -1,21 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
K3S_TAG=${K3S_TAG:="v1.21.2-k3s1"} # replace + with -, if needed
|
||||
IMAGE_REGISTRY=${IMAGE_REGISTRY:="MY_REGISTRY"}
|
||||
IMAGE_REPOSITORY=${IMAGE_REPOSITORY:="rancher/k3s"}
|
||||
IMAGE_TAG="$K3S_TAG-cuda"
|
||||
IMAGE=${IMAGE:="$IMAGE_REGISTRY/$IMAGE_REPOSITORY:$IMAGE_TAG"}
|
||||
|
||||
NVIDIA_CONTAINER_RUNTIME_VERSION=${NVIDIA_CONTAINER_RUNTIME_VERSION:="3.5.0-1"}
|
||||
|
||||
echo "IMAGE=$IMAGE"
|
||||
|
||||
# due to some unknown reason, copying symlinks fails with buildkit enabled
|
||||
DOCKER_BUILDKIT=0 docker build \
|
||||
--build-arg K3S_TAG=$K3S_TAG \
|
||||
--build-arg NVIDIA_CONTAINER_RUNTIME_VERSION=$NVIDIA_CONTAINER_RUNTIME_VERSION \
|
||||
-t $IMAGE .
|
||||
docker push $IMAGE
|
||||
echo "Done!"
|
@ -2,92 +2,70 @@
|
||||
|
||||
```bash
|
||||
k3d
|
||||
--verbose # GLOBAL: enable verbose (debug) logging (default: false)
|
||||
--trace # GLOBAL: enable super verbose logging (trace logging) (default: false)
|
||||
--verbose # enable verbose (debug) logging (default: false)
|
||||
--version # show k3d and k3s version
|
||||
-h, --help # GLOBAL: show help text
|
||||
|
||||
-h, --help # show help text
|
||||
version # show k3d and k3s version
|
||||
help [COMMAND] # show help text for any command
|
||||
completion [bash | zsh | (psh | powershell)] # generate completion scripts for common shells
|
||||
cluster [CLUSTERNAME] # default cluster name is 'k3s-default'
|
||||
create
|
||||
-a, --agents # specify how many agent nodes you want to create (integer, default: 0)
|
||||
--agents-memory # specify memory limit for agent containers/nodes (unit, e.g. 1g)
|
||||
--api-port # specify the port on which the cluster will be accessible (format '[HOST:]HOSTPORT', default: random)
|
||||
-c, --config # use a config file (format 'PATH')
|
||||
-e, --env # add environment variables to the nodes (quoted string, format: 'KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]', use flag multiple times)
|
||||
--gpus # [from docker CLI] add GPU devices to the node containers (string, e.g. 'all')
|
||||
-i, --image # specify which k3s image should be used for the nodes (string, default: 'docker.io/rancher/k3s:v1.20.0-k3s2', tag changes per build)
|
||||
--k3s-arg # add additional arguments to the k3s server/agent (quoted string, use flag multiple times) (see https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/#k3s-server-cli-help & https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/#k3s-agent-cli-help)
|
||||
--kubeconfig-switch-context # (implies --kubeconfig-update-default) automatically sets the current-context of your default kubeconfig to the new cluster's context (default: true)
|
||||
--kubeconfig-update-default # enable the automated update of the default kubeconfig with the details of the newly created cluster (also sets '--wait=true') (default: true)
|
||||
-l, --label # add (docker) labels to the node containers (format: 'KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]', use flag multiple times)
|
||||
--network # specify an existing (docker) network you want to connect to (string)
|
||||
--no-hostip # disable the automatic injection of the Host IP as 'host.k3d.internal' into the containers and CoreDNS (default: false)
|
||||
--no-image-volume # disable the creation of a volume for storing images (used for the 'k3d image import' command) (default: false)
|
||||
--no-lb # disable the creation of a load balancer in front of the server nodes (default: false)
|
||||
--no-rollback # disable the automatic rollback actions, if anything goes wrong (default: false)
|
||||
-p, --port # add some more port mappings (format: '[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]', use flag multiple times)
|
||||
--registry-create # create a new (docker) registry dedicated for this cluster (default: false)
|
||||
--registry-use # use an existing local (docker) registry with this cluster (string, use multiple times)
|
||||
-s, --servers # specify how many server nodes you want to create (integer, default: 1)
|
||||
--servers-memory # specify memory limit for server containers/nodes (unit, e.g. 1g)
|
||||
--token # specify a cluster token (string, default: auto-generated)
|
||||
--timeout # specify a timeout, after which the cluster creation will be interrupted and changes rolled back (duration, e.g. '10s')
|
||||
-v, --volume # specify additional bind-mounts (format: '[SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]', use flag multiple times)
|
||||
--wait # enable waiting for all server nodes to be ready before returning (default: true)
|
||||
--api-port # specify the port on which the cluster will be accessible (e.g. via kubectl)
|
||||
-i, --image # specify which k3s image should be used for the nodes
|
||||
--k3s-agent-arg # add additional arguments to the k3s agent (see https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/#k3s-agent-cli-help)
|
||||
--k3s-server-arg # add additional arguments to the k3s server (see https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/#k3s-server-cli-help)
|
||||
-s, --servers # specify how many server nodes you want to create
|
||||
--network # specify a network you want to connect to
|
||||
--no-hostip # disable the automatic injection of the Host IP as 'host.k3d.internal' into the containers and CoreDN
|
||||
--no-image-volume # disable the creation of a volume for storing images (used for the 'k3d load image' command)
|
||||
--no-lb # disable the creation of a LoadBalancer in front of the server nodes
|
||||
--no-rollback # disable the automatic rollback actions, if anything goes wrong
|
||||
-p, --port # add some more port mappings
|
||||
--token # specify a cluster token (default: auto-generated)
|
||||
--timeout # specify a timeout, after which the cluster creation will be interrupted and changes rolled back
|
||||
--update-default-kubeconfig # enable the automated update of the default kubeconfig with the details of the newly created cluster (also sets '--wait=true')
|
||||
--switch-context # (implies --update-default-kubeconfig) automatically sets the current-context of your default kubeconfig to the new cluster's context
|
||||
-v, --volume # specify additional bind-mounts
|
||||
--wait # enable waiting for all server nodes to be ready before returning
|
||||
-a, --agents # specify how many agent nodes you want to create
|
||||
-e, --env # add environment variables to the node containers
|
||||
start CLUSTERNAME # start a (stopped) cluster
|
||||
-a, --all # start all clusters (default: false)
|
||||
--wait # wait for all servers and server-loadbalancer to be up before returning (default: true)
|
||||
--timeout # maximum waiting time for '--wait' before canceling/returning (duration, e.g. '10s')
|
||||
-a, --all # start all clusters
|
||||
--wait # wait for all servers and server-loadbalancer to be up before returning
|
||||
--timeout # maximum waiting time for '--wait' before canceling/returning
|
||||
stop CLUSTERNAME # stop a cluster
|
||||
-a, --all # stop all clusters (default: false)
|
||||
-a, --all # stop all clusters
|
||||
delete CLUSTERNAME # delete an existing cluster
|
||||
-a, --all # delete all existing clusters (default: false)
|
||||
-a, --all # delete all existing clusters
|
||||
list [CLUSTERNAME [CLUSTERNAME ...]]
|
||||
--no-headers # do not print headers (default: false)
|
||||
--token # show column with cluster tokens (default: false)
|
||||
-o, --output # format the output (format: 'json|yaml')
|
||||
completion [bash | zsh | fish | (psh | powershell)] # generate completion scripts for common shells
|
||||
config
|
||||
init # write a default k3d config (as a starting point)
|
||||
-f, --force # force overwrite target file (default: false)
|
||||
-o, --output # file to write to (string, default "k3d-default.yaml")
|
||||
help [COMMAND] # show help text for any command
|
||||
image
|
||||
import [IMAGE | ARCHIVE [IMAGE | ARCHIVE ...]] # Load one or more images from the local runtime environment or tar-archives into k3d clusters
|
||||
-c, --cluster # clusters to load the image into (string, use flag multiple times, default: k3s-default)
|
||||
-k, --keep-tarball # do not delete the image tarball from the shared volume after completion (default: false)
|
||||
kubeconfig
|
||||
get (CLUSTERNAME [CLUSTERNAME ...] | --all) # get kubeconfig from cluster(s) and write it to stdout
|
||||
-a, --all # get kubeconfigs from all clusters (default: false)
|
||||
merge | write (CLUSTERNAME [CLUSTERNAME ...] | --all) # get kubeconfig from cluster(s) and merge it/them into a (kubeconfig-)file
|
||||
-a, --all # get kubeconfigs from all clusters (default: false)
|
||||
-s, --kubeconfig-switch-context # switch current-context in kubeconfig to the new context (default: true)
|
||||
-d, --kubeconfig-merge-default # update the default kubeconfig (usually $KUBECONFIG or $HOME/.kube/config)
|
||||
-o, --output # specify the output file where the kubeconfig should be written to (string)
|
||||
--overwrite # [Careful!] forcefully overwrite the output file, ignoring existing contents (default: false)
|
||||
-u, --update # update conflicting fields in existing kubeconfig (default: true)
|
||||
--no-headers # do not print headers
|
||||
--token # show column with cluster tokens
|
||||
node
|
||||
create NODENAME # Create new nodes (and add them to existing clusters)
|
||||
-c, --cluster # specify the cluster that the node shall connect to (string, default: k3s-default)
|
||||
-i, --image # specify which k3s image should be used for the node(s) (string, default: 'docker.io/rancher/k3s:v1.20.0-k3s2', tag changes per build)
|
||||
--replicas # specify how many replicas you want to create with this spec (integer, default: 1)
|
||||
--role # specify the node role (string, format: 'agent|server', default: agent)
|
||||
--timeout # specify a timeout duration, after which the node creation will be interrupted, if not done yet (duration, e.g. '10s')
|
||||
--wait # wait for the node to be up and running before returning (default: true)
|
||||
-c, --cluster # specify the cluster that the node shall connect to
|
||||
-i, --image # specify which k3s image should be used for the node(s)
|
||||
--replicas # specify how many replicas you want to create with this spec
|
||||
--role # specify the node role
|
||||
--wait # wait for the node to be up and running before returning
|
||||
--timeout # specify a timeout duration, after which the node creation will be interrupted, if not done yet
|
||||
start NODENAME # start a (stopped) node
|
||||
stop NODENAME # stop a node
|
||||
delete NODENAME # delete an existing node
|
||||
-a, --all # delete all existing nodes (default: false)
|
||||
-r, --registries # also delete registries, as a special type of node (default: false)
|
||||
-a, --all # delete all existing nodes
|
||||
list NODENAME
|
||||
--no-headers # do not print headers (default: false)
|
||||
registry
|
||||
create REGISTRYNAME
|
||||
-i, --image # specify image used for the registry (string, default: "docker.io/library/registry:2")
|
||||
-p, --port # select host port to map to (format: '[HOST:]HOSTPORT', default: 'random')
|
||||
delete REGISTRYNAME
|
||||
-a, --all # delete all existing registries (default: false)
|
||||
list [NAME [NAME...]]
|
||||
--no-headers # disable table headers (default: false)
|
||||
version # show k3d and k3s version
|
||||
--no-headers # do not print headers
|
||||
kubeconfig
|
||||
get (CLUSTERNAME [CLUSTERNAME ...] | --all) # get kubeconfig from cluster(s) and write it to stdout
|
||||
-a, --all # get kubeconfigs from all clusters
|
||||
merge | write (CLUSTERNAME [CLUSTERNAME ...] | --all) # get kubeconfig from cluster(s) and merge it/them into into a file in $HOME/.k3d (or whatever you specify via the flags)
|
||||
-a, --all # get kubeconfigs from all clusters
|
||||
--output # specify the output file where the kubeconfig should be written to
|
||||
--overwrite # [Careful!] forcefully overwrite the output file, ignoring existing contents
|
||||
-s, --switch-context # switch current-context in kubeconfig to the new context
|
||||
-u, --update # update conflicting fields in existing kubeconfig (default: true)
|
||||
-d, --merge-default-kubeconfig # update the default kubeconfig (usually $KUBECONFIG or $HOME/.kube/config)
|
||||
image
|
||||
import [IMAGE | ARCHIVE [IMAGE | ARCHIVE ...]] # Load one or more images from the local runtime environment or tar-archives into k3d clusters
|
||||
-c, --cluster # clusters to load the image into
|
||||
-k, --keep-tarball # do not delete the image tarball from the shared volume after completion
|
||||
```
|
||||
|
@ -1 +0,0 @@
|
||||
title: Commands
|
@ -1,36 +0,0 @@
|
||||
## k3d
|
||||
|
||||
https://k3d.io/ -> Run k3s in Docker!
|
||||
|
||||
### Synopsis
|
||||
|
||||
https://k3d.io/
|
||||
k3d is a wrapper CLI that helps you to easily create k3s clusters inside docker.
|
||||
Nodes of a k3d cluster are docker containers running a k3s image.
|
||||
All Nodes of a k3d cluster are part of the same docker network.
|
||||
|
||||
```
|
||||
k3d [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for k3d
|
||||
--timestamps Enable Log timestamps
|
||||
--trace Enable super verbose output (trace logging)
|
||||
--verbose Enable verbose output (debug logging)
|
||||
--version Show k3d and default k3s version
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3d cluster](k3d_cluster.md) - Manage cluster(s)
|
||||
* [k3d completion](k3d_completion.md) - Generate completion scripts for [bash, zsh, fish, powershell | psh]
|
||||
* [k3d config](k3d_config.md) - Work with config file(s)
|
||||
* [k3d image](k3d_image.md) - Handle container images.
|
||||
* [k3d kubeconfig](k3d_kubeconfig.md) - Manage kubeconfig(s)
|
||||
* [k3d node](k3d_node.md) - Manage node(s)
|
||||
* [k3d registry](k3d_registry.md) - Manage registry/registries
|
||||
* [k3d version](k3d_version.md) - Show k3d and default k3s version
|
||||
|
@ -1,36 +0,0 @@
|
||||
## k3d cluster
|
||||
|
||||
Manage cluster(s)
|
||||
|
||||
### Synopsis
|
||||
|
||||
Manage cluster(s)
|
||||
|
||||
```
|
||||
k3d cluster [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for cluster
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--timestamps Enable Log timestamps
|
||||
--trace Enable super verbose output (trace logging)
|
||||
--verbose Enable verbose output (debug logging)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3d](k3d.md) - https://k3d.io/ -> Run k3s in Docker!
|
||||
* [k3d cluster create](k3d_cluster_create.md) - Create a new cluster
|
||||
* [k3d cluster delete](k3d_cluster_delete.md) - Delete cluster(s).
|
||||
* [k3d cluster edit](k3d_cluster_edit.md) - [EXPERIMENTAL] Edit cluster(s).
|
||||
* [k3d cluster list](k3d_cluster_list.md) - List cluster(s)
|
||||
* [k3d cluster start](k3d_cluster_start.md) - Start existing k3d cluster(s)
|
||||
* [k3d cluster stop](k3d_cluster_stop.md) - Stop existing k3d cluster(s)
|
||||
|
@ -1,72 +0,0 @@
|
||||
## k3d cluster create
|
||||
|
||||
Create a new cluster
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Create a new k3s cluster with containerized nodes (k3s in docker).
|
||||
Every cluster will consist of one or more containers:
|
||||
- 1 (or more) server node container (k3s)
|
||||
- (optionally) 1 loadbalancer container as the entrypoint to the cluster (nginx)
|
||||
- (optionally) 1 (or more) agent node containers (k3s)
|
||||
|
||||
|
||||
```
|
||||
k3d cluster create NAME [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-a, --agents int Specify how many agents you want to create
|
||||
--agents-memory string Memory limit imposed on the agents nodes [From docker]
|
||||
--api-port [HOST:]HOSTPORT Specify the Kubernetes API server port exposed on the LoadBalancer (Format: [HOST:]HOSTPORT)
|
||||
- Example: `k3d cluster create --servers 3 --api-port 0.0.0.0:6550`
|
||||
-c, --config string Path of a config file to use
|
||||
-e, --env KEY[=VALUE][@NODEFILTER[;NODEFILTER...]] Add environment variables to nodes (Format: KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]
|
||||
- Example: `k3d cluster create --agents 2 -e "HTTP_PROXY=my.proxy.com@server:0" -e "SOME_KEY=SOME_VAL@server:0"`
|
||||
--gpus string GPU devices to add to the cluster node containers ('all' to pass all GPUs) [From docker]
|
||||
-h, --help help for create
|
||||
-i, --image string Specify k3s image that you want to use for the nodes
|
||||
--k3s-arg ARG@NODEFILTER[;@NODEFILTER] Additional args passed to k3s command (Format: ARG@NODEFILTER[;@NODEFILTER])
|
||||
- Example: `k3d cluster create --k3s-arg "--disable=traefik@server:0"
|
||||
--k3s-node-label KEY[=VALUE][@NODEFILTER[;NODEFILTER...]] Add label to k3s node (Format: KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]
|
||||
- Example: `k3d cluster create --agents 2 --k3s-node-label "my.label@agent:0,1" --k3s-node-label "other.label=somevalue@server:0"`
|
||||
--kubeconfig-switch-context Directly switch the default kubeconfig's current-context to the new cluster's context (requires --kubeconfig-update-default) (default true)
|
||||
--kubeconfig-update-default Directly update the default kubeconfig with the new cluster's context (default true)
|
||||
--lb-config-override strings Use dotted YAML path syntax to override nginx loadbalancer settings
|
||||
--network string Join an existing network
|
||||
--no-image-volume Disable the creation of a volume for importing images
|
||||
--no-lb Disable the creation of a LoadBalancer in front of the server nodes
|
||||
--no-rollback Disable the automatic rollback actions, if anything goes wrong
|
||||
-p, --port [HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER] Map ports from the node containers (via the serverlb) to the host (Format: [HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER])
|
||||
- Example: `k3d cluster create --agents 2 -p 8080:80@agent:0 -p 8081@agent:1`
|
||||
--registry-config string Specify path to an extra registries.yaml file
|
||||
--registry-create NAME[:HOST][:HOSTPORT] Create a k3d-managed registry and connect it to the cluster (Format: NAME[:HOST][:HOSTPORT]
|
||||
- Example: `k3d cluster create --registry-create mycluster-registry:0.0.0.0:5432`
|
||||
--registry-use stringArray Connect to one or more k3d-managed registries running locally
|
||||
--runtime-label KEY[=VALUE][@NODEFILTER[;NODEFILTER...]] Add label to container runtime (Format: KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]
|
||||
- Example: `k3d cluster create --agents 2 --runtime-label "my.label@agent:0,1" --runtime-label "other.label=somevalue@server:0"`
|
||||
-s, --servers int Specify how many servers you want to create
|
||||
--servers-memory string Memory limit imposed on the server nodes [From docker]
|
||||
--subnet 172.28.0.0/16 [Experimental: IPAM] Define a subnet for the newly created container network (Example: 172.28.0.0/16)
|
||||
--timeout duration Rollback changes if cluster couldn't be created in specified duration.
|
||||
--token string Specify a cluster token. By default, we generate one.
|
||||
-v, --volume [SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]] Mount volumes into the nodes (Format: [SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]
|
||||
- Example: `k3d cluster create --agents 2 -v /my/path@agent:0,1 -v /tmp/test:/tmp/other@server:0`
|
||||
--wait Wait for the server(s) to be ready before returning. Use '--timeout DURATION' to not wait forever. (default true)
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--timestamps Enable Log timestamps
|
||||
--trace Enable super verbose output (trace logging)
|
||||
--verbose Enable verbose output (debug logging)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3d cluster](k3d_cluster.md) - Manage cluster(s)
|
||||
|
@ -1,32 +0,0 @@
|
||||
## k3d cluster delete
|
||||
|
||||
Delete cluster(s).
|
||||
|
||||
### Synopsis
|
||||
|
||||
Delete cluster(s).
|
||||
|
||||
```
|
||||
k3d cluster delete [NAME [NAME ...] | --all] [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-a, --all Delete all existing clusters
|
||||
-c, --config string Path of a config file to use
|
||||
-h, --help help for delete
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--timestamps Enable Log timestamps
|
||||
--trace Enable super verbose output (trace logging)
|
||||
--verbose Enable verbose output (debug logging)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3d cluster](k3d_cluster.md) - Manage cluster(s)
|
||||
|
@ -1,32 +0,0 @@
|
||||
## k3d cluster edit
|
||||
|
||||
[EXPERIMENTAL] Edit cluster(s).
|
||||
|
||||
### Synopsis
|
||||
|
||||
[EXPERIMENTAL] Edit cluster(s).
|
||||
|
||||
```
|
||||
k3d cluster edit CLUSTER [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for edit
|
||||
--port-add [HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER] [EXPERIMENTAL] Map ports from the node containers (via the serverlb) to the host (Format: [HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER])
|
||||
- Example: `k3d node edit k3d-mycluster-serverlb --port-add 8080:80`
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--timestamps Enable Log timestamps
|
||||
--trace Enable super verbose output (trace logging)
|
||||
--verbose Enable verbose output (debug logging)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3d cluster](k3d_cluster.md) - Manage cluster(s)
|
||||
|
@ -1,33 +0,0 @@
|
||||
## k3d cluster list
|
||||
|
||||
List cluster(s)
|
||||
|
||||
### Synopsis
|
||||
|
||||
List cluster(s).
|
||||
|
||||
```
|
||||
k3d cluster list [NAME [NAME...]] [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for list
|
||||
--no-headers Disable headers
|
||||
-o, --output string Output format. One of: json|yaml
|
||||
--token Print k3s cluster token
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--timestamps Enable Log timestamps
|
||||
--trace Enable super verbose output (trace logging)
|
||||
--verbose Enable verbose output (debug logging)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3d cluster](k3d_cluster.md) - Manage cluster(s)
|
||||
|
@ -1,33 +0,0 @@
|
||||
## k3d cluster start
|
||||
|
||||
Start existing k3d cluster(s)
|
||||
|
||||
### Synopsis
|
||||
|
||||
Start existing k3d cluster(s)
|
||||
|
||||
```
|
||||
k3d cluster start [NAME [NAME...] | --all] [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-a, --all Start all existing clusters
|
||||
-h, --help help for start
|
||||
--timeout duration Maximum waiting time for '--wait' before canceling/returning.
|
||||
--wait Wait for the server(s) (and loadbalancer) to be ready before returning. (default true)
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--timestamps Enable Log timestamps
|
||||
--trace Enable super verbose output (trace logging)
|
||||
--verbose Enable verbose output (debug logging)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3d cluster](k3d_cluster.md) - Manage cluster(s)
|
||||
|
@ -1,31 +0,0 @@
|
||||
## k3d cluster stop
|
||||
|
||||
Stop existing k3d cluster(s)
|
||||
|
||||
### Synopsis
|
||||
|
||||
Stop existing k3d cluster(s).
|
||||
|
||||
```
|
||||
k3d cluster stop [NAME [NAME...] | --all] [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-a, --all Stop all existing clusters
|
||||
-h, --help help for stop
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--timestamps Enable Log timestamps
|
||||
--trace Enable super verbose output (trace logging)
|
||||
--verbose Enable verbose output (debug logging)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3d cluster](k3d_cluster.md) - Manage cluster(s)
|
||||
|
@ -1,68 +0,0 @@
|
||||
## k3d completion
|
||||
|
||||
Generate completion scripts for [bash, zsh, fish, powershell | psh]
|
||||
|
||||
### Synopsis
|
||||
|
||||
To load completions:
|
||||
|
||||
Bash:
|
||||
|
||||
$ source <(k3d completion bash)
|
||||
|
||||
# To load completions for each session, execute once:
|
||||
# Linux:
|
||||
$ k3d completion bash > /etc/bash_completion.d/k3d
|
||||
# macOS:
|
||||
$ k3d completion bash > /usr/local/etc/bash_completion.d/k3d
|
||||
|
||||
Zsh:
|
||||
|
||||
# If shell completion is not already enabled in your environment,
|
||||
# you will need to enable it. You can execute the following once:
|
||||
|
||||
$ echo "autoload -U compinit; compinit" >> ~/.zshrc
|
||||
|
||||
# To load completions for each session, execute once:
|
||||
$ k3d completion zsh > "${fpath[1]}/k3d"
|
||||
|
||||
# You will need to start a new shell for this setup to take effect.
|
||||
|
||||
fish:
|
||||
|
||||
$ k3d completion fish | source
|
||||
|
||||
# To load completions for each session, execute once:
|
||||
$ k3d completion fish > ~/.config/fish/completions/k3d.fish
|
||||
|
||||
PowerShell:
|
||||
|
||||
PS> k3d completion powershell | Out-String | Invoke-Expression
|
||||
|
||||
# To load completions for every new session, run:
|
||||
PS> k3d completion powershell > k3d.ps1
|
||||
# and source this file from your PowerShell profile.
|
||||
|
||||
|
||||
```
|
||||
k3d completion SHELL
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for completion
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--timestamps Enable Log timestamps
|
||||
--trace Enable super verbose output (trace logging)
|
||||
--verbose Enable verbose output (debug logging)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3d](k3d.md) - https://k3d.io/ -> Run k3s in Docker!
|
||||
|
@ -1,32 +0,0 @@
|
||||
## k3d config
|
||||
|
||||
Work with config file(s)
|
||||
|
||||
### Synopsis
|
||||
|
||||
Work with config file(s)
|
||||
|
||||
```
|
||||
k3d config [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for config
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--timestamps Enable Log timestamps
|
||||
--trace Enable super verbose output (trace logging)
|
||||
--verbose Enable verbose output (debug logging)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3d](k3d.md) - https://k3d.io/ -> Run k3s in Docker!
|
||||
* [k3d config init](k3d_config_init.md) -
|
||||
* [k3d config migrate](k3d_config_migrate.md) -
|
||||
|
@ -1,28 +0,0 @@
|
||||
## k3d config init
|
||||
|
||||
|
||||
|
||||
```
|
||||
k3d config init [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-f, --force Force overwrite of target file
|
||||
-h, --help help for init
|
||||
-o, --output string Write a default k3d config (default "k3d-default.yaml")
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--timestamps Enable Log timestamps
|
||||
--trace Enable super verbose output (trace logging)
|
||||
--verbose Enable verbose output (debug logging)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3d config](k3d_config.md) - Work with config file(s)
|
||||
|
@ -1,26 +0,0 @@
|
||||
## k3d config migrate
|
||||
|
||||
|
||||
|
||||
```
|
||||
k3d config migrate INPUT [OUTPUT] [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for migrate
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--timestamps Enable Log timestamps
|
||||
--trace Enable super verbose output (trace logging)
|
||||
--verbose Enable verbose output (debug logging)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3d config](k3d_config.md) - Work with config file(s)
|
||||
|
@ -1,31 +0,0 @@
|
||||
## k3d image
|
||||
|
||||
Handle container images.
|
||||
|
||||
### Synopsis
|
||||
|
||||
Handle container images.
|
||||
|
||||
```
|
||||
k3d image [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for image
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--timestamps Enable Log timestamps
|
||||
--trace Enable super verbose output (trace logging)
|
||||
--verbose Enable verbose output (debug logging)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3d](k3d.md) - https://k3d.io/ -> Run k3s in Docker!
|
||||
* [k3d image import](k3d_image_import.md) - Import image(s) from docker into k3d cluster(s).
|
||||
|
@ -1,45 +0,0 @@
|
||||
## k3d image import
|
||||
|
||||
Import image(s) from docker into k3d cluster(s).
|
||||
|
||||
### Synopsis
|
||||
|
||||
Import image(s) from docker into k3d cluster(s).
|
||||
|
||||
If an IMAGE starts with the prefix 'docker.io/', then this prefix is stripped internally.
|
||||
That is, 'docker.io/rancher/k3d-tools:latest' is treated as 'rancher/k3d-tools:latest'.
|
||||
|
||||
If an IMAGE starts with the prefix 'library/' (or 'docker.io/library/'), then this prefix is stripped internally.
|
||||
That is, 'library/busybox:latest' (or 'docker.io/library/busybox:latest') are treated as 'busybox:latest'.
|
||||
|
||||
If an IMAGE does not have a version tag, then ':latest' is assumed.
|
||||
That is, 'rancher/k3d-tools' is treated as 'rancher/k3d-tools:latest'.
|
||||
|
||||
A file ARCHIVE always takes precedence.
|
||||
So if a file './rancher/k3d-tools' exists, k3d will try to import it instead of the IMAGE of the same name.
|
||||
|
||||
```
|
||||
k3d image import [IMAGE | ARCHIVE [IMAGE | ARCHIVE...]] [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-c, --cluster stringArray Select clusters to load the image to. (default [k3s-default])
|
||||
-h, --help help for import
|
||||
-k, --keep-tarball Do not delete the tarball containing the saved images from the shared volume
|
||||
-t, --keep-tools Do not delete the tools node after import
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--timestamps Enable Log timestamps
|
||||
--trace Enable super verbose output (trace logging)
|
||||
--verbose Enable verbose output (debug logging)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3d image](k3d_image.md) - Handle container images.
|
||||
|
@ -1,32 +0,0 @@
|
||||
## k3d kubeconfig
|
||||
|
||||
Manage kubeconfig(s)
|
||||
|
||||
### Synopsis
|
||||
|
||||
Manage kubeconfig(s)
|
||||
|
||||
```
|
||||
k3d kubeconfig [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for kubeconfig
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--timestamps Enable Log timestamps
|
||||
--trace Enable super verbose output (trace logging)
|
||||
--verbose Enable verbose output (debug logging)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3d](k3d.md) - https://k3d.io/ -> Run k3s in Docker!
|
||||
* [k3d kubeconfig get](k3d_kubeconfig_get.md) - Print kubeconfig(s) from cluster(s).
|
||||
* [k3d kubeconfig merge](k3d_kubeconfig_merge.md) - Write/Merge kubeconfig(s) from cluster(s) into new or existing kubeconfig/file.
|
||||
|
@ -1,31 +0,0 @@
|
||||
## k3d kubeconfig get
|
||||
|
||||
Print kubeconfig(s) from cluster(s).
|
||||
|
||||
### Synopsis
|
||||
|
||||
Print kubeconfig(s) from cluster(s).
|
||||
|
||||
```
|
||||
k3d kubeconfig get [CLUSTER [CLUSTER [...]] | --all] [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-a, --all Output kubeconfigs from all existing clusters
|
||||
-h, --help help for get
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--timestamps Enable Log timestamps
|
||||
--trace Enable super verbose output (trace logging)
|
||||
--verbose Enable verbose output (debug logging)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3d kubeconfig](k3d_kubeconfig.md) - Manage kubeconfig(s)
|
||||
|
@ -1,36 +0,0 @@
|
||||
## k3d kubeconfig merge
|
||||
|
||||
Write/Merge kubeconfig(s) from cluster(s) into new or existing kubeconfig/file.
|
||||
|
||||
### Synopsis
|
||||
|
||||
Write/Merge kubeconfig(s) from cluster(s) into new or existing kubeconfig/file.
|
||||
|
||||
```
|
||||
k3d kubeconfig merge [CLUSTER [CLUSTER [...]] | --all] [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-a, --all Get kubeconfigs from all existing clusters
|
||||
-h, --help help for merge
|
||||
-d, --kubeconfig-merge-default Merge into the default kubeconfig ($KUBECONFIG or /home/thklein/.kube/config)
|
||||
-s, --kubeconfig-switch-context Switch to new context (default true)
|
||||
-o, --output string Define output [ - | FILE ] (default from $KUBECONFIG or /home/thklein/.kube/config
|
||||
--overwrite [Careful!] Overwrite existing file, ignoring its contents
|
||||
-u, --update Update conflicting fields in existing kubeconfig (default true)
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--timestamps Enable Log timestamps
|
||||
--trace Enable super verbose output (trace logging)
|
||||
--verbose Enable verbose output (debug logging)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3d kubeconfig](k3d_kubeconfig.md) - Manage kubeconfig(s)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user