Compare commits
143 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
a969f2fa50 | ||
|
def200ac0e | ||
|
e00db082a4 | ||
|
28c13446f2 | ||
|
7340380ec7 | ||
|
c9c002207a | ||
|
c7c4aea4cf | ||
|
12b821e31c | ||
|
b7416d77aa | ||
|
5d1c83d47a | ||
|
8c776dc471 | ||
|
436fd2df8b | ||
|
3e12d67da3 | ||
|
a5489e08f7 | ||
|
8f35992cff | ||
|
8582cd387d | ||
|
676efcbf4a | ||
|
6337c298e6 | ||
|
bb7b6e3605 | ||
|
f08bb6dad8 | ||
|
b595638644 | ||
|
9048c7526b | ||
|
fb47728ddc | ||
|
92aba64161 | ||
|
e184a90f2c | ||
|
ac1dbb9f0a | ||
|
5b7ec7e29d | ||
|
da2d541a5c | ||
|
34bf23ef85 | ||
|
0ec51e1318 | ||
|
fdefd62687 | ||
|
0b7e118046 | ||
|
ba8f931e65 | ||
|
2ec3efd877 | ||
|
c2aeeca226 | ||
|
80853aa848 | ||
|
840a61a47d | ||
|
3c1422b9d3 | ||
|
bbdc073467 | ||
|
f865919c6b | ||
|
2db715c064 | ||
|
5111fab6f9 | ||
|
ecd86ed993 | ||
|
49ff9cb0fd | ||
|
4d8876f649 | ||
|
c2289c71b6 | ||
|
f80d7e8ba0 | ||
|
56f80d766f | ||
|
c1190c669a | ||
|
c972374434 | ||
|
fcaf3d79f4 | ||
|
a3696aa3d4 | ||
|
d8951c0eb3 | ||
|
4aff326f89 | ||
|
3896f5a019 | ||
|
e54f762832 | ||
|
284db4d74b | ||
|
7ed52c687d | ||
|
7275929907 | ||
|
e9007ba2ef | ||
|
92e19d7e17 | ||
|
739486e382 | ||
|
5c5c4c54c6 | ||
|
59224236b9 | ||
|
01a0526301 | ||
|
2c747cf6d7 | ||
|
dc4c29361f | ||
|
8a65268312 | ||
|
97bebbad66 | ||
|
6e05342812 | ||
|
122ea4637a | ||
|
0eaaef4c45 | ||
|
f9fe8ef0c1 | ||
|
99d6a18be7 | ||
|
b1b7e957bc | ||
|
c3b799c6e2 | ||
|
469b56c253 | ||
|
576ac040cc | ||
|
fbe93eb039 | ||
|
10179f8555 | ||
|
8079d7d2be | ||
|
3fd1061e8c | ||
|
2205264a11 | ||
|
3b0c095765 | ||
|
85b19f7ef7 | ||
|
dc19eb7139 | ||
|
e79c3a98d8 | ||
|
9b8fcd17be | ||
|
18a3db3d9b | ||
|
02b80fecfc | ||
|
e22299f485 | ||
|
6ad5a7b9dd | ||
|
b9ea408d49 | ||
|
6d771a20fa | ||
|
e7b372f0fd | ||
|
c25618cbdc | ||
|
1631a7b91a | ||
|
fb74afceff | ||
|
b485682448 | ||
|
645a5163d4 | ||
|
36bda94dbd | ||
|
f3e2ef5508 | ||
|
9f56c006ee | ||
|
6bdcb3f7f9 | ||
|
89cb12181a | ||
|
d11b96ac7e | ||
|
af0f30c8d4 | ||
|
5c00056118 | ||
|
b09a9680e6 | ||
|
836201611b | ||
|
33c3f7cbda | ||
|
9b888c8216 | ||
|
0d01441553 | ||
|
81748f9b3a | ||
|
1e0aac19f6 | ||
|
418c1887fe | ||
|
9aa5af8d5d | ||
|
802038abb1 | ||
|
b5c4204303 | ||
|
44d6fef28c | ||
|
61107ca5bf | ||
|
95c62650fa | ||
|
80f5aa7d58 | ||
|
1fabe118fa | ||
|
3631e65570 | ||
|
164758d29a | ||
|
eff97313da | ||
|
552075709f | ||
|
66e86e20ab | ||
|
6581844d2e | ||
|
bde5bc6ff3 | ||
|
0317cc395b | ||
|
f33fa2d4cc | ||
|
487418974e | ||
|
9bb49fc860 | ||
|
ee1eb58466 | ||
|
e4e7a32e0d | ||
|
40c9d4b1eb | ||
|
3e80e37faf | ||
|
c8f8fdb139 | ||
|
89ba0f67b7 | ||
|
53eba1828d | ||
|
d6364af32c |
@ -1,150 +0,0 @@
|
||||
{
|
||||
"files": [
|
||||
"README.md"
|
||||
],
|
||||
"imageSize": 100,
|
||||
"commit": false,
|
||||
"contributors": [
|
||||
{
|
||||
"login": "iwilltry42",
|
||||
"name": "Thorsten Klein",
|
||||
"avatar_url": "https://avatars3.githubusercontent.com/u/25345277?v=4",
|
||||
"profile": "https://twitter.com/iwilltry42",
|
||||
"contributions": [
|
||||
"code",
|
||||
"doc",
|
||||
"ideas",
|
||||
"maintenance"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "zeerorg",
|
||||
"name": "Rishabh Gupta",
|
||||
"avatar_url": "https://avatars0.githubusercontent.com/u/13547997?v=4",
|
||||
"profile": "https://blog.zeerorg.site/",
|
||||
"contributions": [
|
||||
"ideas",
|
||||
"code"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "louiznk",
|
||||
"name": "Louis Tournayre",
|
||||
"avatar_url": "https://avatars3.githubusercontent.com/u/25585516?v=4",
|
||||
"profile": "http://www.zenika.com",
|
||||
"contributions": [
|
||||
"doc"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "lionelnicolas",
|
||||
"name": "Lionel Nicolas",
|
||||
"avatar_url": "https://avatars3.githubusercontent.com/u/6538664?v=4",
|
||||
"profile": "https://github.com/lionelnicolas",
|
||||
"contributions": [
|
||||
"code"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "toonsevrin",
|
||||
"name": "Toon Sevrin",
|
||||
"avatar_url": "https://avatars1.githubusercontent.com/u/5507199?v=4",
|
||||
"profile": "https://github.com/toonsevrin.keys",
|
||||
"contributions": [
|
||||
"code"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "dhoppe",
|
||||
"name": "Dennis Hoppe",
|
||||
"avatar_url": "https://avatars3.githubusercontent.com/u/1111056?v=4",
|
||||
"profile": "http://debian-solutions.de",
|
||||
"contributions": [
|
||||
"doc",
|
||||
"example"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "JohnnyCrazy",
|
||||
"name": "Jonas Dellinger",
|
||||
"avatar_url": "https://avatars0.githubusercontent.com/u/3109892?v=4",
|
||||
"profile": "https://dellinger.dev",
|
||||
"contributions": [
|
||||
"infra"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "markrexwinkel",
|
||||
"name": "markrexwinkel",
|
||||
"avatar_url": "https://avatars2.githubusercontent.com/u/10704814?v=4",
|
||||
"profile": "https://github.com/markrexwinkel",
|
||||
"contributions": [
|
||||
"doc"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "inercia",
|
||||
"name": "Alvaro",
|
||||
"avatar_url": "https://avatars2.githubusercontent.com/u/1841612?v=4",
|
||||
"profile": "http://inerciatech.com/",
|
||||
"contributions": [
|
||||
"code",
|
||||
"ideas",
|
||||
"plugin"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "nunix",
|
||||
"name": "Nuno do Carmo",
|
||||
"avatar_url": "https://avatars2.githubusercontent.com/u/905874?v=4",
|
||||
"profile": "http://wsl.dev",
|
||||
"contributions": [
|
||||
"content",
|
||||
"tutorial",
|
||||
"question"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "erwinkersten",
|
||||
"name": "Erwin Kersten",
|
||||
"avatar_url": "https://avatars0.githubusercontent.com/u/4391121?v=4",
|
||||
"profile": "https://github.com/erwinkersten",
|
||||
"contributions": [
|
||||
"doc"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "searsaw",
|
||||
"name": "Alex Sears",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/3712883?v=4",
|
||||
"profile": "http://www.alexsears.com",
|
||||
"contributions": [
|
||||
"doc"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "Shanduur",
|
||||
"name": "Mateusz Urbanek",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/32583062?v=4",
|
||||
"profile": "http://shanduur.github.io",
|
||||
"contributions": [
|
||||
"code"
|
||||
]
|
||||
},
|
||||
{
|
||||
"login": "benjaminjb",
|
||||
"name": "Benjamin Blattberg",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/4651855?v=4",
|
||||
"profile": "https://github.com/benjaminjb",
|
||||
"contributions": [
|
||||
"code"
|
||||
]
|
||||
}
|
||||
],
|
||||
"contributorsPerLine": 7,
|
||||
"projectName": "k3d",
|
||||
"projectOwner": "rancher",
|
||||
"repoType": "github",
|
||||
"repoHost": "https://github.com",
|
||||
"skipCi": true
|
||||
}
|
@ -1,7 +0,0 @@
|
||||
.github/
|
||||
.local/
|
||||
bin/
|
||||
_dist/
|
||||
tools/
|
||||
proxy/
|
||||
site/
|
431
.drone.yml
431
.drone.yml
@ -1,431 +0,0 @@
|
||||
---
|
||||
###########################################
|
||||
##### k3d CLI/binary release pipeline #####
|
||||
###########################################
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: main
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
|
||||
- name: lint
|
||||
image: golang:1.17
|
||||
commands:
|
||||
- make ci-setup
|
||||
- make check-fmt lint
|
||||
when:
|
||||
event:
|
||||
- push
|
||||
- pull_request
|
||||
- tag
|
||||
|
||||
- name: test
|
||||
image: docker:20.10
|
||||
volumes:
|
||||
- name: dockersock
|
||||
path: /var/run
|
||||
commands:
|
||||
- apk add git bash curl sudo jq make
|
||||
- sleep 5 # give docker enough time to start
|
||||
- make e2e
|
||||
when:
|
||||
event:
|
||||
- push
|
||||
- pull_request
|
||||
- tag
|
||||
|
||||
- name: build
|
||||
image: golang:1.17
|
||||
environment:
|
||||
GIT_TAG: "${DRONE_TAG}"
|
||||
commands:
|
||||
- make ci-setup
|
||||
- make build-cross
|
||||
depends_on:
|
||||
- lint
|
||||
- test
|
||||
when:
|
||||
event:
|
||||
- push
|
||||
- tag
|
||||
|
||||
- name: pre-release
|
||||
image: plugins/github-release
|
||||
settings:
|
||||
api_key:
|
||||
from_secret: github_token
|
||||
files:
|
||||
- _dist/*
|
||||
checksum:
|
||||
- sha256
|
||||
prerelease: true
|
||||
depends_on:
|
||||
- lint
|
||||
- test
|
||||
- build
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
ref:
|
||||
include:
|
||||
# include only pre-release tags
|
||||
- "refs/tags/*rc*"
|
||||
- "refs/tags/*beta*"
|
||||
- "refs/tags/*alpha*"
|
||||
- "refs/tags/*test*"
|
||||
- "refs/tags/*dev*"
|
||||
|
||||
- name: release
|
||||
image: plugins/github-release
|
||||
settings:
|
||||
api_key:
|
||||
from_secret: github_token
|
||||
files:
|
||||
- _dist/*
|
||||
checksum:
|
||||
- sha256
|
||||
depends_on:
|
||||
- lint
|
||||
- test
|
||||
- build
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
ref:
|
||||
exclude:
|
||||
# exclude pre-release tags
|
||||
- "refs/tags/*rc*"
|
||||
- "refs/tags/*beta*"
|
||||
- "refs/tags/*alpha*"
|
||||
- "refs/tags/*test*"
|
||||
- "refs/tags/*dev*"
|
||||
|
||||
services:
|
||||
# Starting the docker service to be used by dind
|
||||
- name: docker
|
||||
image: docker:20.10-dind
|
||||
privileged: true
|
||||
volumes:
|
||||
- name: dockersock
|
||||
path: /var/run
|
||||
|
||||
volumes:
|
||||
- name: dockersock
|
||||
temp: {}
|
||||
|
||||
|
||||
---
|
||||
###########################
|
||||
###### Docker Images ######
|
||||
###########################
|
||||
#
|
||||
# +++ Docker Images +++
|
||||
# Tagged using the auto_tag feature of the docker plugin
|
||||
# See http://plugins.drone.io/drone-plugins/drone-docker/#autotag
|
||||
# > if event type is `tag`
|
||||
# > > 1.0.0 produces docker tags 1, 1.0, 1.0.0
|
||||
# > > 1.0.0-rc.1 produces docker tags 1.0.0-rc.1
|
||||
# > if event type is `push` and target branch == default branch (main)
|
||||
# > > tag `latest`
|
||||
|
||||
|
||||
################################
|
||||
##### Docker Images: amd64 #####
|
||||
################################
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: linux_amd64
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
|
||||
- name: build_push_binary
|
||||
environment:
|
||||
DOCKER_BUILDKIT: "1"
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-amd64
|
||||
dockerfile: Dockerfile
|
||||
target: binary-only
|
||||
context: .
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
build_args:
|
||||
- GIT_TAG_OVERRIDE=${DRONE_TAG}
|
||||
|
||||
- name: build_push_dind
|
||||
image: plugins/docker
|
||||
environment:
|
||||
DOCKER_BUILDKIT: "1"
|
||||
settings:
|
||||
repo: rancher/k3d
|
||||
auto_tag: true
|
||||
auto_tag_suffix: dind-linux-amd64
|
||||
dockerfile: Dockerfile
|
||||
target: dind
|
||||
context: .
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
build_args:
|
||||
- GIT_TAG_OVERRIDE=${DRONE_TAG}
|
||||
- ARCH=amd64
|
||||
|
||||
- name: build_push_proxy
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d-proxy
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-amd64
|
||||
dockerfile: proxy/Dockerfile
|
||||
context: proxy/
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
|
||||
- name: build_push_tools
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d-tools
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-amd64
|
||||
dockerfile: tools/Dockerfile
|
||||
context: tools/
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag # see note at the start of the "Docker Images" section: creates SemVer tagged images using the `auto_tag` option of the docker plugin
|
||||
- push # `auto_tag` option only creates the `latest` tag if target branch is default branch (i.e. `main`)
|
||||
|
||||
depends_on:
|
||||
- main
|
||||
|
||||
---
|
||||
|
||||
################################
|
||||
##### Docker Images: arm #####
|
||||
################################
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: linux_arm
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: arm
|
||||
|
||||
steps:
|
||||
|
||||
- name: build_push_proxy
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d-proxy
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-arm
|
||||
dockerfile: proxy/Dockerfile
|
||||
context: proxy/
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
build_args:
|
||||
- ARCH=arm
|
||||
|
||||
- name: build_push_tools
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d-tools
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-arm
|
||||
dockerfile: tools/Dockerfile
|
||||
context: tools/
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag # see note at the start of the "Docker Images" section: creates SemVer tagged images using the `auto_tag` option of the docker plugin
|
||||
- push # `auto_tag` option only creates the `latest` tag if target branch is default branch (i.e. `main`)
|
||||
|
||||
depends_on:
|
||||
- main
|
||||
|
||||
---
|
||||
|
||||
################################
|
||||
##### Docker Images: arm64 #####
|
||||
################################
|
||||
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: linux_arm64
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: arm64
|
||||
|
||||
steps:
|
||||
|
||||
- name: build_push_binary
|
||||
environment:
|
||||
DOCKER_BUILDKIT: "1"
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-arm64
|
||||
dockerfile: Dockerfile
|
||||
target: binary-only
|
||||
context: .
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
build_args:
|
||||
- GIT_TAG_OVERRIDE=${DRONE_TAG}
|
||||
|
||||
- name: build_push_dind
|
||||
image: plugins/docker
|
||||
environment:
|
||||
DOCKER_BUILDKIT: "1"
|
||||
settings:
|
||||
repo: rancher/k3d
|
||||
auto_tag: true
|
||||
auto_tag_suffix: dind-linux-arm64
|
||||
dockerfile: Dockerfile
|
||||
target: dind
|
||||
context: .
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
build_args:
|
||||
- GIT_TAG_OVERRIDE=${DRONE_TAG}
|
||||
- ARCH=arm64
|
||||
|
||||
- name: build_push_proxy
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d-proxy
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-arm64
|
||||
dockerfile: proxy/Dockerfile
|
||||
context: proxy/
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
build_args:
|
||||
- ARCH=arm64
|
||||
|
||||
- name: build_push_tools
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: rancher/k3d-tools
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-arm64
|
||||
dockerfile: tools/Dockerfile
|
||||
context: tools/
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag # see note at the start of the "Docker Images" section: creates SemVer tagged images using the `auto_tag` option of the docker plugin
|
||||
- push # `auto_tag` option only creates the `latest` tag if target branch is default branch (i.e. `main`)
|
||||
|
||||
depends_on:
|
||||
- main
|
||||
|
||||
---
|
||||
|
||||
##############################
|
||||
###### Docker Manifests ######
|
||||
##############################
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: manifests
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
- name: push_manifest_binary
|
||||
image: plugins/manifest
|
||||
settings:
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
spec: manifest.tmpl
|
||||
auto_tag: true
|
||||
ignore_missing: true # expected, as we dropped arm due to missing base image for that arch
|
||||
|
||||
- name: push_manifest_dind
|
||||
image: plugins/manifest
|
||||
settings:
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
spec: dind-manifest.tmpl
|
||||
auto_tag: true
|
||||
ignore_missing: true # expected, as we dropped arm due to missing base image for that arch
|
||||
|
||||
- name: push_manifest_proxy
|
||||
image: plugins/manifest
|
||||
settings:
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
spec: proxy/manifest.tmpl
|
||||
auto_tag: true
|
||||
ignore_missing: false
|
||||
|
||||
- name: push_manifest_tools
|
||||
image: plugins/manifest
|
||||
settings:
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
spec: tools/manifest.tmpl
|
||||
auto_tag: true
|
||||
ignore_missing: false
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- tag # see note at the start of the "Docker Images" section: creates SemVer tagged images using the `auto_tag` option of the manifest plugin
|
||||
- push # `auto_tag` option only creates the `latest` tag if target branch is default branch (i.e. `main`)
|
||||
|
||||
depends_on:
|
||||
- main
|
||||
- linux_amd64
|
||||
- linux_arm
|
||||
- linux_arm64
|
||||
|
22
.github/ISSUE_TEMPLATE/bug_report.md
vendored
22
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -1,38 +1,40 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: "[BUG] "
|
||||
title: "[BUG]"
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## What did you do
|
||||
### Describe what you did leading up to the issue
|
||||
|
||||
- How was the cluster created?
|
||||
- `k3d cluster create -x A -y B`
|
||||
- `k3d create -x A -y B`
|
||||
|
||||
- What did you do afterwards?
|
||||
- k3d commands?
|
||||
- docker commands?
|
||||
- OS operations (e.g. shutdown/reboot)?
|
||||
|
||||
## What did you expect to happen
|
||||
### Describe what you expected to happen
|
||||
|
||||
Concise description of what you expected to happen after doing what you described above.
|
||||
|
||||
## Screenshots or terminal output
|
||||
### Add screenshots or terminal output
|
||||
|
||||
If applicable, add screenshots or terminal output (code block) to help explain your problem.
|
||||
|
||||
## Which OS & Architecture
|
||||
### Describe your Setup
|
||||
|
||||
#### OS / Architecture
|
||||
|
||||
- Linux, Windows, MacOS / amd64, x86, ...?
|
||||
|
||||
## Which version of `k3d`
|
||||
#### k3d version
|
||||
|
||||
- output of `k3d version`
|
||||
- output of `k3d --version`
|
||||
|
||||
## Which version of docker
|
||||
### docker version
|
||||
|
||||
- output of `docker version` and `docker info`
|
||||
- output of `docker version`
|
||||
|
15
.github/ISSUE_TEMPLATE/feature_request.md
vendored
15
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@ -1,31 +1,30 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: "[FEATURE] "
|
||||
title: "[FEATURE]"
|
||||
labels: enhancement
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## Is your feature request related to a problem or a Pull Request
|
||||
### Related Issues and/or Pull-Requests
|
||||
|
||||
Please link to the issue/PR here and explain how your request is related to it.
|
||||
|
||||
## Scope of your request
|
||||
### Scope of your request
|
||||
|
||||
Do you need...
|
||||
|
||||
- a new noun (next to e.g. `cluster`, `node`, etc. used via `k3d <noun>`)?
|
||||
- a new verb (next to e.g. `cluster create`, `node start`, etc. used via `k3d <noun> <verb>`)
|
||||
- a new flag for a command (e.g. `k3d cluster create --<your-flag>`)?
|
||||
- a new command (next to e.g. `create`, `delete`, etc. used via `k3d <your-command>`)?
|
||||
- a new flag for a command (e.g. `k3d create --<your-flag>`)?
|
||||
- which command?
|
||||
- different functionality for an existing command/flag
|
||||
- which command or flag?
|
||||
|
||||
## Describe the solution you'd like
|
||||
### Describe the solution you'd like
|
||||
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
## Describe alternatives you've considered
|
||||
### Describe alternatives you've considered
|
||||
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
37
.github/ISSUE_TEMPLATE/help_question.md
vendored
Normal file
37
.github/ISSUE_TEMPLATE/help_question.md
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
---
|
||||
name: Help/Question
|
||||
about: Ask a question or request help for any challenge/issue
|
||||
title: "[HELP/QUESTION] "
|
||||
labels: help, question
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
### Your Question/Help Request
|
||||
|
||||
What's up?
|
||||
|
||||
### Information for Helpers
|
||||
|
||||
**What did you do?**
|
||||
|
||||
- How was the cluster created?
|
||||
- `k3d create -x A -y B`
|
||||
|
||||
- What did you do afterwards?
|
||||
- k3d commands?
|
||||
- docker commands?
|
||||
- OS operations (e.g. shutdown/reboot)?
|
||||
- kubectl commands?
|
||||
|
||||
**Which OS & Architecture?**
|
||||
|
||||
- Linux, Windows, MacOS / amd64, x86, ...?
|
||||
|
||||
**Which version of `k3d`?**
|
||||
|
||||
- output of `k3d --version`
|
||||
|
||||
**Which version of docker?**
|
||||
|
||||
- output of `docker version`
|
22
.github/ISSUE_TEMPLATE/question_help.md
vendored
22
.github/ISSUE_TEMPLATE/question_help.md
vendored
@ -1,22 +0,0 @@
|
||||
---
|
||||
name: Question or Help Wanted
|
||||
about: Get answers, receive Help.
|
||||
title: "[QUESTION/HELP] "
|
||||
labels: question
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
In general, please consider using GitHub Discussions for questions and general discussions: https://github.com/rancher/k3d/discussions .
|
||||
Especially please use Discussions for questions around use cases for k3d, etc.
|
||||
For everything else, fire away :)
|
||||
-->
|
||||
|
||||
## Question / Where do you need Help?
|
||||
|
||||
|
||||
## Scope of your Question
|
||||
|
||||
- Is your question related to a specific version of k3d (or k3s)?
|
||||
- Please paste the output of `k3d version` here
|
24
.github/pull_request_template.md
vendored
24
.github/pull_request_template.md
vendored
@ -1,24 +0,0 @@
|
||||
<!--
|
||||
Hi there, have an early THANK YOU for your contribution!
|
||||
k3d is a community-driven project, so we really highly appreciate any support.
|
||||
Please make sure, you've read our Code of Conduct and the Contributing Guidelines :)
|
||||
- Code of Conduct: https://github.com/rancher/k3d/blob/main/CODE_OF_CONDUCT.md
|
||||
- Contributing Guidelines: https://github.com/rancher/k3d/blob/main/CONTRIBUTING.md
|
||||
-->
|
||||
|
||||
# What
|
||||
|
||||
<!-- What does this PR do or change? -->
|
||||
|
||||
# Why
|
||||
|
||||
<!-- Link issues, discussions, etc. or just explain why you're creating this PR -->
|
||||
|
||||
# Implications
|
||||
|
||||
<!--
|
||||
Does this change existing behavior? If so, does it affect the CLI (cmd/) only or does it also/only change some internals of the Go module (pkg/)?
|
||||
Especially mention breaking changes here!
|
||||
-->
|
||||
|
||||
<!-- Get recognized using our all-contributors bot: https://github.com/rancher/k3d/blob/main/CONTRIBUTING.md#get-recognized -->
|
24
.github/workflows/aur-prerelease.yml
vendored
24
.github/workflows/aur-prerelease.yml
vendored
@ -1,24 +0,0 @@
|
||||
name: AUR Prerelease
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [prereleased]
|
||||
|
||||
jobs:
|
||||
aur-pre-release:
|
||||
runs-on: ubuntu-20.04
|
||||
container: archlinux:base-20210228.0.16308
|
||||
steps:
|
||||
- name: Checkout Project
|
||||
uses: actions/checkout@v1
|
||||
- name: Publish Pre-Release to AUR
|
||||
run: |
|
||||
# AUR Packages are not allowed to use - in the package version. its used to combine pkgver and pkgrel
|
||||
export COMMIT_REF=${GITHUB_REF/-/_}
|
||||
./deploy-aur.sh
|
||||
env:
|
||||
PACKAGE_NAME: rancher-k3d-beta-bin
|
||||
COMMIT_USERNAME: GitHub Action
|
||||
COMMIT_EMAIL: iwilltry42+k3d@gmail.com
|
||||
COMMIT_MESSAGE: "[CI] Updated to $NEW_RELEASE"
|
||||
SSH_PRIVATE_KEY: ${{ secrets.AUR_PRIVATE_KEY }}
|
23
.github/workflows/aur-release.yml
vendored
23
.github/workflows/aur-release.yml
vendored
@ -1,23 +0,0 @@
|
||||
name: AUR Release
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [released]
|
||||
|
||||
jobs:
|
||||
aur-release:
|
||||
runs-on: ubuntu-20.04
|
||||
container: archlinux:base-20210228.0.16308
|
||||
steps:
|
||||
- name: Checkout Project
|
||||
uses: actions/checkout@v1
|
||||
- name: Publish Release to AUR
|
||||
run: |
|
||||
export COMMIT_REF=$GITHUB_REF
|
||||
./deploy-aur.sh
|
||||
env:
|
||||
PACKAGE_NAME: rancher-k3d-bin
|
||||
COMMIT_USERNAME: GitHub Action
|
||||
COMMIT_EMAIL: iwilltry42+k3d@gmail.com
|
||||
COMMIT_MESSAGE: "[CI] Updated to $NEW_RELEASE"
|
||||
SSH_PRIVATE_KEY: ${{ secrets.AUR_PRIVATE_KEY }}
|
46
.github/workflows/docs.yml
vendored
46
.github/workflows/docs.yml
vendored
@ -1,46 +0,0 @@
|
||||
name: k3d.io
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
# only run on tags for real releases and special docs releases
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+'
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+-docs.[0-9]+'
|
||||
# tags-ignore:
|
||||
# - "*rc*"
|
||||
# - "*beta*"
|
||||
# - "*alpha*"
|
||||
# - "*test*"
|
||||
# - "*dev*"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
container:
|
||||
image: python:3.9
|
||||
steps:
|
||||
- name: Checkout Project
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install Requirements
|
||||
run: pip install -r docs/requirements.txt
|
||||
- name: Build with MkDocs (validation)
|
||||
run: |
|
||||
mkdocs build --verbose --clean --strict
|
||||
rm -r site/
|
||||
- name: Configure Git
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
id: git
|
||||
run: |
|
||||
git config --global user.name ghaction-k3d.io
|
||||
git config --global user.email ghaction@k3d.io
|
||||
echo ::set-output name=tag::${GITHUB_REF#refs/tags/}
|
||||
- name: Build & Deploy with Mike (versioned)
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
run: |
|
||||
mike deploy --update-aliases --push --rebase ${{ steps.git.outputs.tag }} stable
|
||||
|
||||
|
8
.gitignore
vendored
8
.gitignore
vendored
@ -6,11 +6,8 @@
|
||||
*.dylib
|
||||
|
||||
# Output folders
|
||||
tools/bin/
|
||||
tools/_dist/
|
||||
bin/
|
||||
_dist/
|
||||
site/
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
@ -21,8 +18,3 @@ site/
|
||||
# Editors
|
||||
.vscode/
|
||||
.local/
|
||||
.idea/
|
||||
*.iml
|
||||
|
||||
# Pipenv
|
||||
Pipfile*
|
@ -1,3 +0,0 @@
|
||||
linters-settings:
|
||||
errcheck:
|
||||
check-blank: false # to keep `_ = viper.BindPFlag(...)` from throwing errors
|
28
.travis.yml
Normal file
28
.travis.yml
Normal file
@ -0,0 +1,28 @@
|
||||
dist: bionic
|
||||
language: go
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
go:
|
||||
- 1.13.x
|
||||
git:
|
||||
depth: 1
|
||||
install: true
|
||||
before_script:
|
||||
- make ci-setup
|
||||
script:
|
||||
- make ci-tests-dind ci-dist
|
||||
deploy:
|
||||
provider: releases
|
||||
skip_cleanup: true
|
||||
api_key:
|
||||
secure: VFb0jmL6t+cuKWAGC5OLeseTJrK3CakD5Laeyp0JVG/o3/cYgHP0lIeDmJJMZL/Luxm5aL8QHsCRfQoMBAnj6Q8hchI9rbcYhFZzuFJYyDxjcrPm0+kM3yiK14lQZNvksy2ZSsO7D63c8x9sQVrEus4idVTUoxrpSXLM2eVjl6W0O2RdZvLsxgaLPwV1ufpihrqbXdEUjt/YSYpHiC5gS3o+FcyMGucJQdN/L7p6jyAqVgg4+t8bdyWj6+MEG4p8lmWhhbGzDo38iMxtCBu+nDHRsbivay3eJZ643VguX0lj62Vt5KUTcVJntmZqQ2UF6FoEVUPOegkrSeoiMuOH1+nYwcsfMFijMkrcFhb6bAisJJd6agdhFWXiSwL88FQkJh0DqeA0tFFIzDbTS/AZTY4Li8bWng3aCBgSXiMzIBf0es+wMDw0gwhfH44Y/RAsKSQJ/Lln00AaVzkOkOWOmu5Ks0CVYDy0M5QDQOCW2E9TIb7WdIMh3aNCkZi+rGovigejJv3vUZqkN03Og07Hbrjgfg28iY3isIt3soOrVqek2hJJFnKjUhhv2OhJm3z6FpTyMViUtSmJ+LTiBjpyiWC4QuaITDadCJTxZQwobhI+18c2Zi5/HjTX1pgD1wk3quv9R4bGjVINenefG6xxaNj+CeFTfrQnnHuXOL50828=
|
||||
file:
|
||||
- _dist/k3d-darwin-amd64
|
||||
- _dist/k3d-linux-386
|
||||
- _dist/k3d-linux-amd64
|
||||
- _dist/k3d-linux-arm
|
||||
- _dist/k3d-linux-arm64
|
||||
- _dist/k3d-windows-amd64.exe
|
||||
on:
|
||||
repo: rancher/k3d
|
||||
tags: true
|
470
CHANGELOG.md
470
CHANGELOG.md
@ -1,470 +0,0 @@
|
||||
# Changelog
|
||||
|
||||
## v5.0.3
|
||||
|
||||
### Enhancements & Fixes
|
||||
|
||||
- simplified way of getting a Docker API Client that works with Docker Contexts and `DOCKER_*` environment variable configuration (#829, @dragonflylee)
|
||||
- fix: didn't honor `DOCKER_TLS` environment variables before
|
||||
|
||||
## v5.0.2
|
||||
|
||||
### Enhancements
|
||||
|
||||
- CoreDNS Configmap is now edited in the auto-deploy manifest on disk instead of relying on `kubectl patch` command (#814)
|
||||
- refactor: add cmd subcommands in a single function call (#819, @moeryomenko)
|
||||
- handle ready-log-messages by type and intent & check them in single log streams instead of checking whole chunks every time (#818)
|
||||
|
||||
### Fixes
|
||||
|
||||
- fix: config file check failing with env var expansion because unexpanded input file was checked
|
||||
|
||||
### Misc
|
||||
|
||||
- cleanup: ensure that connections/streams are closed once unused (#818)
|
||||
- cleanup: split type definitions across multiple files to increase readability (#818)
|
||||
- docs: clarify `node create` help text about cluster reference (#808, @losinggeneration)
|
||||
- refactor: move from io/ioutil (deprecated) to io and os packages (#827, @Juneezee)
|
||||
|
||||
## v5.0.1
|
||||
|
||||
### Enhancement
|
||||
|
||||
- add `HostFromClusterNetwork` field to `LocalRegistryHosting` configmap as per KEP-1755 (#754)
|
||||
|
||||
### Fixes
|
||||
|
||||
- fix: nilpointer exception on failed exec process with no returned logreader
|
||||
- make post-create cluster preparation (DNS stuff mostly) more resilient (#780)
|
||||
- fix v1alpha2 -> v1alpha3 config migration (and other related issues) (#799)
|
||||
|
||||
### Misc
|
||||
|
||||
- docs: fix typo (#784)
|
||||
- docs: fix usage of legacy `--k3s-agent/server-arg` flag
|
||||
|
||||
## v5.0.0
|
||||
|
||||
This release contains a whole lot of new features, breaking changes as well as smaller fixes and improvements.
|
||||
The changelog shown here is likely not complete but gives a broad overview over the changes.
|
||||
For more details, please check the v5 milestone (<https://github.com/rancher/k3d/milestone/27>) or even the commit history.
|
||||
The docs have been updated, so you should also find the information you need there, with more to come!
|
||||
|
||||
The demo repository has also been updated to work with k3d v5: <https://github.com/iwilltry42/k3d-demo>.
|
||||
|
||||
**Info**: <https://k3d.io> is now versioned, so you can checkout different versions of the documentation by using the dropdown menu in the page title bar!
|
||||
|
||||
**Feedback welcome!**
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
- new syntax for nodefilters
|
||||
- dropped the usage of square brackets `[]` for indexing, as it caused problems with some shells trying to interpret them
|
||||
- new syntax: `@identifier[:index][:opt]` (see <https://github.com/rancher/k3d/discussions/652>)
|
||||
- example for a port-mapping: `--port 8080:80@server:0:proxy`
|
||||
- identifier = `server`, index = `0`, opt = `proxy`
|
||||
- `opt` is an extra optional argument used for different purposes depending on the flag
|
||||
- currently, only the `--port` flag has `opt`s, namely `proxy` and `direct` (see other breaking change)
|
||||
- port-mapping now go via the loadbalancer (serverlb) by default
|
||||
- the `--port` flag has the `proxy` opt (see new nodefilter syntax above) set by default
|
||||
- to leverage the old behavior of direct port-mappings, use the `direct` opt on the port flag
|
||||
- the nodefilter `loadbalancer` will now do the same as `servers:*;agents:*` (proxied via the loadbalancer)
|
||||
- flag `--registries-create` transformed from bool flag to string flag: let's you define the name and port-binding of the newly created registry, e.g. `--registry-create myregistry.localhost:5001`
|
||||
|
||||
### Fixes
|
||||
|
||||
- cleaned up and properly sorted the sanitization of existing resources used to create new nodes (#638)
|
||||
|
||||
### Features & Enhancements
|
||||
|
||||
- new command: `k3d node edit` to edit existing nodes (#615)
|
||||
- currently only allows `k3d node edit NODE --port-add HOSTPORT:CONTAINERPORT` for the serverlb/loadbalancer to add new ports
|
||||
- pkg: new `NodeEdit` function
|
||||
- new (hidden) command: `k3d debug` with some options for debugging k3d resources (#638)
|
||||
- e.g. `k3d debug loadbalancer get-config` to get the current loadbalancer configuration
|
||||
- loadbalancer / k3d-proxy (#638)
|
||||
- updated fork of `confd` to make usage of the file backend including a file watcher for auto-reloads
|
||||
- this also checks the config before applying it, so the lb doesn't crash on a faulty config
|
||||
- updating the loadbalancer writes the new config file and also checks if everything's going fine afterwards
|
||||
- some settings of the loadbalancer can now be configured using `--lb-config-override`, see docs at <https://k3d.io/v5.0.0/design/defaults/#k3d-loadbalancer>
|
||||
- helper images can now be set explicitly via environment variables: `K3D_IMAGE_LOADBALANCER` & `K3D_IMAGE_TOOLS` (#638)
|
||||
- concurrently add new nodes to an existing cluster (remove some dumb code) (#640)
|
||||
- `--wait` is now the default for `k3d node create`
|
||||
- normalized flag usage for k3s and runtime (#598, @ejose19)
|
||||
- rename `k3d cluster create --label` to `k3d cluster create --runtime-label` (as it's labelling the node on runtime level, e.g. docker)
|
||||
- config option moved to `options.runtime.labels`
|
||||
- add `k3d cluster create --k3s-node-label` to add Kubernetes node labels via k3s flag (#584, @developer-guy, @ejose, @dentrax)
|
||||
- new config option `options.k3s.nodeLabels`
|
||||
- the same for `k3d node create`
|
||||
- improved config file handling (#605)
|
||||
- new version `v1alpha3`
|
||||
- warning when using outdated version
|
||||
- validation dynamically based on provided config apiVersion
|
||||
- new default for `k3d config init`
|
||||
- new command `k3d config migrate INPUT [OUTPUT]` to migrate config files between versions
|
||||
- currently supported migration `v1alpha2` -> `v1alpha3`
|
||||
- pkg: new `Config` interface type to support new generic `FromViper` config file parsing
|
||||
- changed flags `--k3s-server-arg` & `--k3s-agent-arg` into `--k3s-arg` with nodefilter support (#605)
|
||||
- new config path `options.k3s.extraArgs`
|
||||
- config file: environment variables (`$VAR`, `${VAR}` will be expanded unconditionally) (#643)
|
||||
- docker context support (#601, @developer-guy & #674)
|
||||
- Feature flag using the environment variable `K3D_FIX_DNS` and setting it to a true value (e.g. `export K3D_FIX_DNS=1`) to forward DNS queries to your local machine, e.g. to use your local company DNS
|
||||
|
||||
### Misc
|
||||
|
||||
- tests/e2e: timeouts everywhere to avoid killing DroneCI (#638)
|
||||
- logs: really final output when creating/deleting nodes (so far, we were not outputting a final success message and the process was still doing stuff) (#640)
|
||||
- tests/e2e: add tests for v1alpha2 to v1alpha3 migration
|
||||
- docs: use v1alpha3 config version
|
||||
- docs: update general appearance and cleanup
|
||||
|
||||
## v4.4.8
|
||||
|
||||
## Enhancements
|
||||
|
||||
- Improved DroneCI Pipeline for Multiarch Images and SemVer Tags (#712)
|
||||
- **Important**: New images will not have the `v` prefix in the tag anymore!
|
||||
- but now real releases will use the "hierarchical" SemVer tags, so you could e.g. subscribe to rancher/k3d-proxy:4 to get v4.x.x images for the proxy container
|
||||
|
||||
## Fixes
|
||||
|
||||
- clusterCreate: do not override hostIP if hostPort is missing (#693, @lukaszo)
|
||||
- imageImport: import all listed images, not only the first one (#701, @mszostok)
|
||||
- clusterCreate: when memory constraints are set, only pull the image used for checking the edac folder, if it's not present on the machine
|
||||
- fix: update k3d-tools dependencies and use API Version Negotiation, so it still works with older versions of the Docker Engine (#679)
|
||||
|
||||
### Misc
|
||||
|
||||
- install script: add darwin/arm64 support (#676, @colelawrence)
|
||||
- docs: fix go install command (#677, @Rots)
|
||||
- docs: add project overview (<https://k3d.io/internals/project/>) (#680)
|
||||
|
||||
## v4.4.7
|
||||
|
||||
### Features / Enhancements
|
||||
|
||||
- new flag: `k3d image import --keep-tools` to not delete the tools node container after importing the image(s) (#672)
|
||||
- improve image name handling when importing images (#653, @cimnine)
|
||||
- normalize image names internally, e.g. strip prefixes that docker adds, but that break the process
|
||||
- see <https://k3d.io/usage/commands/k3d_image_import/> for more info
|
||||
|
||||
### Fixes
|
||||
|
||||
- Use default gateway, when bridge network doesn't have it (#666, @kuritka)
|
||||
- Start an existing, but not running tools node to re-use it when importing an image (#672)
|
||||
|
||||
### Misc
|
||||
|
||||
- deps: switching back to upstream viper including the StringArray fix
|
||||
- docs: reference to "nolar/setup-k3d-k3s" step for GitHub Actions (#668, @nolar)
|
||||
- docs: updated and simplified CUDA guide (#662, @vainkop) (#669)
|
||||
|
||||
## v4.4.6
|
||||
|
||||
### Fixes
|
||||
|
||||
- fix an issue where the cluster creation would stall waiting for the `starting worker processes` log message from the loadbalancer/serverlb
|
||||
- this was likely caused by a rounding issue when asking docker to get the container logs starting at a specific timestamp
|
||||
- we now drop subsecond precision for this to avoid the rounding issue, which was confirmed to work
|
||||
- see issues #592 & #621
|
||||
|
||||
### Misc
|
||||
|
||||
- to debug the issue mentioned above, we introduced a new environment variable `K3D_LOG_NODE_WAIT_LOGS`, which can be set to a list of node roles (e.g. `K3D_LOG_NODE_WAIT_LOGS=loadbalancer,agent`) to output the container logs that k3d inspects
|
||||
|
||||
## v4.4.5
|
||||
|
||||
### Fixes
|
||||
|
||||
- overall: use the getDockerClient helper function everywhere to e.g. support docker via ssh everywhere
|
||||
- nodeCreate: do not copy meminfo/edac volume mounts from existing nodes, to avoid conflicts with generated mounts
|
||||
- kubeconfig: fix file handling on windows (#626 + #628, @dragonflylee)
|
||||
|
||||
### Misc
|
||||
|
||||
- docs: add [FAQ entry](https://k3d.io/faq/faq/#nodes-fail-to-start-or-get-stuck-in-notready-state-with-log-nf_conntrack_max-permission-denied) on nf_conntrack_max: permission denied issue from kube-proxy (#607)
|
||||
- docs: cleanup, fix formatting, etc.
|
||||
- license: update to include 2021 in time range
|
||||
- docs: link to AutoK3s (#614, @JacieChao)
|
||||
- tests/e2e: update the list of tested k3s versions
|
||||
|
||||
## v4.4.4
|
||||
|
||||
### Enhancements
|
||||
|
||||
- nodes created via `k3d node create` now inherit the registry config from existing nodes (if there is any) (#597)
|
||||
- the cgroupv2 hotfix (custom entrypoint script) is now enabled by default (#603)
|
||||
- disable by setting the environment variable `K3D_FIX_CGROUPV2=false`
|
||||
|
||||
### Fixes
|
||||
|
||||
- fix using networks without IPAM config (e.g. `host`)
|
||||
|
||||
### Misc
|
||||
|
||||
- docs: edit links on k3d.io now point to the correct branch (`main`)
|
||||
- docs: new FAQ entry on spurious PID entries when using shared mounts (#609, @leelavg)
|
||||
|
||||
## v4.4.3
|
||||
|
||||
### Highlights
|
||||
|
||||
- cgroupv2 support: to properly work on cgroupv2 systems, k3s has to move all the processes from the root cgroup to a new /init cgroup and enable subtree_control
|
||||
- this is going to be included in the k3s agent code directly (<https://github.com/k3s-io/k3s/pull/3242>)
|
||||
- for now we're overriding the container entrypoint with a script that does this (#579, compare <https://github.com/k3s-io/k3s/pull/3237>)
|
||||
- thanks a lot for all the input and support @AkihiroSuda
|
||||
- **Usage**: set the environment variable `K3D_FIX_CGROUPV2` to a `true` value before/when creating a cluster with k3d
|
||||
- e.g. `export K3D_FIX_CGROUPV2=1`
|
||||
|
||||
### Fixes
|
||||
|
||||
- fix: docker volume not mountable due to validation failure
|
||||
- was not able to mount named volume on windows as we're checking for `:` meant for drive-letters and k3d separators
|
||||
|
||||
### Misc
|
||||
|
||||
- fix create command's flags typo (#568, @Jason-ZW)
|
||||
|
||||
## v4.4.2
|
||||
|
||||
### Fixes
|
||||
|
||||
- k3d-proxy: rename udp upstreams to avoid collisions/duplicates (#564)
|
||||
|
||||
### Features
|
||||
|
||||
- add *hidden* command `k3d runtime-info` used for debugging (#553)
|
||||
- this comes with some additions on package/runtime level
|
||||
- add *experimental* `--subnet` flag to get some k3d IPAM to ensure that server nodes keep static IPs across restarts (#560)
|
||||
|
||||
### Misc
|
||||
|
||||
- docs: fix typo (#556, @gcalmettes)
|
||||
- docs: fix typo (#561, @alechartung)
|
||||
- ci/drone: pre-release on `-dev.X` tags
|
||||
- ci/drone: always build no matter the branch name (just not release)
|
||||
- docs: add automatic command tree generation via cobra (#562)
|
||||
- makefile: use `go env gopath` as install target for tools (as per #445)
|
||||
- JSONSchema: add some examples and defaults (now also available via <https://raw.githubusercontent.com/rancher/k3d/main/pkg/config/v1alpha2/schema.json> in your IDE)
|
||||
|
||||
## v4.4.1
|
||||
|
||||
### Fixes
|
||||
|
||||
- use viper fork that contains a fix to make cobra's `StringArray` flags work properly
|
||||
- this fixes the issue, that flag values containing commas got split (because we had to use `StringSlice` type flags)
|
||||
- this is to be changed back to upstream viper as soon as <https://github.com/spf13/viper/pull/398> (or a similar fix) got merged
|
||||
|
||||
## v4.4.0
|
||||
|
||||
### Features / Enhancements
|
||||
|
||||
- Support for Memory Limits using e.g. `--servers-memory 1g` or `--agents-memory 1.5g` (#494, @konradmalik)
|
||||
- enabled by providing fake `meminfo` files
|
||||
|
||||
### Fixes
|
||||
|
||||
- fix absolute paths in volume mounts on Windows (#510, @markrexwinkel)
|
||||
|
||||
### Documentation
|
||||
|
||||
- clarify registry names in docs and help text
|
||||
- add usage section about config file (#534)
|
||||
- add FAQ entry on certificate error when running behind corporate proxy
|
||||
- add MacPorts install instructions (#539, @herbygillot)
|
||||
- Heal Shruggie: Replace amputated arm (#540, @claycooper)
|
||||
|
||||
## v4.3.0
|
||||
|
||||
### Features / Enhancements
|
||||
|
||||
- Use Go 1.16
|
||||
- update dependencies, including kubernetes, docker, containerd and more
|
||||
- add `darwin/arm64` (Apple Silicon, M1) build target (#530)
|
||||
- use the new `//go:embed` feature to directly embed the jsonschema in the binary (#529)
|
||||
- Add a status column to `k3d registry list` output (#496, @ebr)
|
||||
- Allow non-prefixed (i.e. without `k3d-` prefix) user input when fetching resources (e.g. `k3d node get mycluster-server-0` would return successfully)
|
||||
|
||||
### Fixes
|
||||
|
||||
- Allow absolute paths for volumes on Windows (#510, @markrexwinkel)
|
||||
- fix nil-pointer exception in case of non-existent IPAM network config
|
||||
- Properly handle combinations of host/hostIP in kubeAPI settings reflected in the kubeconfig (#500, @fabricev)
|
||||
|
||||
### Misc
|
||||
|
||||
- docs: fix typo in stop command help text (#513, @searsaw)
|
||||
- ci/ghaction: AUR (pre-)release now on Ubuntu 20.04 and latest archlinux image
|
||||
- REMOVE incomplete and unused `containerd` runtime from codebase, as it was causing issues to build for windows and hasn't made any progress in quite some time now
|
||||
|
||||
## v4.2.0
|
||||
|
||||
### Features / Enhancements
|
||||
|
||||
- add processing step for cluster config, to configure it e.g. for hostnetwork mode (#477, @konradmalik)
|
||||
- allow proxying UDP ports via the load balancer (#488, @k0da)
|
||||
|
||||
### Fixes
|
||||
|
||||
- fix usage of `DOCKER_HOST` env var for Kubeconfig server ref (trim port)
|
||||
- fix error when trying to attach the same node (e.g. registry) to the same network twice (#486, @kuritka)
|
||||
- fix Kube-API settings in configg file got overwritten (#490, @dtomasi)
|
||||
|
||||
### Misc
|
||||
|
||||
- add `k3d.version` label to created resources
|
||||
- add Pull-Request template
|
||||
- docs: add hint on minimal requirements for multi-server clusters (#481, @Filius-Patris)
|
||||
|
||||
## v4.1.1
|
||||
|
||||
### Fixes
|
||||
|
||||
- fix: `--k3s-server-arg` and `--k3s-agent-arg` didn't work (Viper StringArray incompatibility) (#482)
|
||||
|
||||
## v4.1.0
|
||||
|
||||
### Highlights
|
||||
|
||||
#### :scroll: Configuration Enhancements
|
||||
|
||||
- :snake: use [viper](https://github.com/spf13/viper) for configuration management
|
||||
- takes over the job of properly fetching and merging config options from
|
||||
- CLI arguments/flags
|
||||
- environment variables
|
||||
- config file
|
||||
- this also fixes some issues with using the config file (like cobra defaults overriding config file values)
|
||||
- :heavy_check_mark: add JSON-Schema validation for the `Simple` config file schema
|
||||
- :new: config version `k3d.io/v1alpha2` (some naming changes)
|
||||
- `exposeAPI` -> `kubeAPI`
|
||||
- `options.k3d.noRollback` -> `options.k3d.disableRollback`
|
||||
- `options.k3d.prepDisableHostIPInjection` -> `options.k3d.disableHostIPInjection`
|
||||
|
||||
#### :computer: Docker over SSH
|
||||
|
||||
- Support Docker over SSH (#324, @ekristen & @inercia)
|
||||
|
||||
### Features & Enhancements
|
||||
|
||||
- add root flag `--timestamps` to enable timestamped logs
|
||||
- improved multi-server cluster support (#467)
|
||||
- log a warning, if one tries to create a cluster with only 2 nodes (no majority possible, no fault tolerance)
|
||||
- revamped cluster start procedure: init-node, sorted servers, agents, helpers
|
||||
- different log messages per role and start-place (that we wait for to consider a node to be ready)
|
||||
- module: `NodeStartOpts` now accept a `ReadyLogMessage` and `NodeState` now takes a `Started` timestamp string
|
||||
|
||||
### Fixes
|
||||
|
||||
- do not ignore `--no-hostip` flag and don't inject hostip if `--network=host` (#471, @konradmalik)
|
||||
- fix: `--no-lb` ignored
|
||||
- fix: print error cause when serverlb fails to start
|
||||
|
||||
### Misc
|
||||
|
||||
- tests/e2e: add config override test
|
||||
- tests/e2e: add multi server start-stop cycle test
|
||||
- tests/e2e: improved logs with stage and test details.
|
||||
- builds&tests: use Docker 20.10 and BuildKit everywhere
|
||||
- :memo: docs: add <https://github.com/AbsaOSS/k3d-action> (GitHub Action) as a related project (#476, @kuritka)
|
||||
|
||||
### Tested with
|
||||
|
||||
- E2E Tests ran with k3s versions
|
||||
- v1.17.17-k3s1 (see Known Issues below)
|
||||
- v1.18.15-k3s1 (see Known Issues below)
|
||||
- v1.19.7-k3s1
|
||||
- v1.20.2-k3s1
|
||||
|
||||
### Known Issues
|
||||
|
||||
- automatic multi-server cluster restarts tend to fail with k3s versions v1.17.x & v1.18.x and probably earlier versions (using dqlite)
|
||||
- Using Viper brings us lots of nice features, but also one problem:
|
||||
- We had to switch StringArray flags to StringSlice flags, which
|
||||
- allow to use multiple flag values comma-separated in a single flag, but also
|
||||
- split flag values that contain a comma into separate parts (and we cannot handle issues that arise due to this)
|
||||
- so if you rely on commas in your flag values (e.g. for `--env X=a,b,c`), please consider filing an issue or supporting <https://github.com/spf13/viper/issues/246> and <https://github.com/spf13/viper/pull/398>
|
||||
- `--env X=a,b,c` would be treated the same as `--env X=a`, `--env b`, `--env c`
|
||||
|
||||
## v4.0.0
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
#### Module
|
||||
|
||||
**If you're using k3d as a Go module, please have a look into the code to see all the changes!**
|
||||
|
||||
- We're open for chats via Slack or GitHub discussions
|
||||
|
||||
- Module is now on `github.com/rancher/k3d/v4` due to lots of breaking changes
|
||||
- `pkg/cluster` is now `pkg/client`
|
||||
- `ClusterCreate` and `NodeCreate` don't start the entities (containers) anymore
|
||||
- `ClusterRun` and `NodeRun` orchestrate the new Create and Start functionality
|
||||
- `NodeDelete`/`ClusterDelete` now take an additional `NodeDeleteOpts`/`ClusterDeleteOpts` struct to toggle specific steps
|
||||
- NodeSpec now features a list of networks (required for registries)
|
||||
- New config flow: CLIConfig (SimpleConfig) -> ClusterConfig -> Cluster + Opts
|
||||
|
||||
#### CLI
|
||||
|
||||
- Some flags changed to also use `noun-action` syntax
|
||||
- e.g. `--switch-context --update-default-kubeconfig` -> `--kubeconfig-switch-context --kubeconfig-update-default`
|
||||
- this eases grouping and visibility
|
||||
|
||||
### Changes
|
||||
|
||||
#### Features
|
||||
|
||||
- **Registry Support**
|
||||
- k3d-managed registry like we had it in k3d v1.x
|
||||
- Option 1: default settings, paired with cluster creation
|
||||
- `k3d cluster create --registry-create` -> New registry for that cluster
|
||||
- `k3d cluster create --registry-use` -> Re-use existing registry
|
||||
- Option 2: customized, managed stand-alone
|
||||
- `k3d registry [create/start/stop/delete]`
|
||||
- Check the documentation, help text and tutorials for more details
|
||||
- Communicate managed registry using the LocalRegistryHostingV1 spec from [KEP-1755](https://github.com/kubernetes/enhancements/blob/0d69f7cea6fbe73a7d70fab569c6898f5ccb7be0/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry/README.md)
|
||||
- interesting especially for tools that reload images, like Tilt or Skaffold
|
||||
|
||||
- **Config File Support**
|
||||
- Put all your CLI-Arguments/Flags into a more readable config file and re-use it everywhere (keep it in your repo)
|
||||
- Note: this is not always a 1:1 matching in naming/syntax/semantics
|
||||
- `k3d cluster create --config myconfig.yaml`
|
||||
|
||||
```yaml
|
||||
apiVersion: k3d.io/v1alpha1
|
||||
kind: Simple
|
||||
name: mycluster
|
||||
servers: 3
|
||||
agents: 2
|
||||
ports:
|
||||
- port: 8080:80
|
||||
nodeFilters:
|
||||
- loadbalancer
|
||||
```
|
||||
|
||||
- Check out our test cases in [pkg/config/test_assets/](./pkg/config/test_assets/) for more config file examples
|
||||
- **Note**: The config file format (& feature) might still be a little rough around the edges and it's prone to change quickly until we hit a stable release of the config
|
||||
|
||||
- [WIP] Support for Lifecycle Hooks
|
||||
- Run any executable at specific stages during the cluster and node lifecycles
|
||||
- e.g. we modify the `registries.yaml` in the `preStart` stage of nodes
|
||||
- Guides will follow
|
||||
|
||||
- Print container creation time (#431, @inercia)
|
||||
- add output formats for `cluster ls` and `node ls` (#439, @inercia)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- import image: avoid nil pointer exception in specific cases
|
||||
- cluster delete: properly handle node and network (#437)
|
||||
- --port: fix bnil-pointer exception when exposing port on non-existent loadbalancer
|
||||
- completion/zsh: source completion file
|
||||
|
||||
#### Misc
|
||||
|
||||
- Now building with Go 1.15
|
||||
- same for the k3d-tools code
|
||||
- updated dependencies (including Docker v20.10)
|
||||
- tests/e2e: add `E2E_INCLUDE` and rename `E2E_SKIP` to `E2E_EXCLUDE`
|
||||
- tests/e2e: allow overriding the Helper Image Tag via `E2E_HELPER_IMAGE_TAG`
|
||||
- docs: spell checking (#434, @jsoref)
|
||||
- docs: add Chocolatey install option (#443, @erwinkersten)
|
@ -1,128 +0,0 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, religion, or sexual identity
|
||||
and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
iwilltry42@gmail.com.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series
|
||||
of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or
|
||||
permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within
|
||||
the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.0, available at
|
||||
<https://www.contributor-covenant.org/version/2/0/code_of_conduct.html>.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||
enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
<https://www.contributor-covenant.org/faq>. Translations are available at
|
||||
<https://www.contributor-covenant.org/translations>.
|
@ -1,27 +0,0 @@
|
||||
# Contributing Guidelines
|
||||
|
||||
Hi there! Welcome to the k3d and Rancher Community!
|
||||
We welcome everyone who likes to use and improve our software.
|
||||
|
||||
## Getting Started
|
||||
|
||||
Before starting to work with and on k3d, please read and understand our [**Code of Conduct**](./CODE_OF_CONDUCT.md).
|
||||
|
||||
Get an Overview of the k3d project in the documentation: [k3d.io/internals/project](https://k3d.io/internals/project)
|
||||
|
||||
Before opening an issue or a Pull-Request, please use GitHub's search function to check whether something similar is already in process and hook in there instead.
|
||||
|
||||
## Get Recognized
|
||||
|
||||
We want to foster a collaborative environment, where every contribution is welcomed and recognized.
|
||||
|
||||
If you want to show up in our Contributors section, please make use of the @all-contributors bot integrated with this repository.
|
||||
|
||||
Here's a full guide on using the bot: <https://allcontributors.org/docs/en/bot/usage>.
|
||||
The simplest way to use it is (in a comment on an issue or a pull-request): `@all-contributors please add <username> for <contributions>`, where `<contributions>` is a [list of contributions](https://allcontributors.org/docs/en/emoji-key).
|
||||
|
||||
Here's an [example comment on a PR](https://github.com/rancher/k3d/pull/368#issuecomment-704320376) to tell the bot to add @zeerorg (who had the initial idea for k3s in docker) to the list of contributors for the ideas and code he added:
|
||||
|
||||
> @all-contributors please add @zeerorg for ideas and code
|
||||
|
||||
The bot will open a PR to add the user to the list and posts the link as a follow-up comment on the issue/PR.
|
44
Dockerfile
44
Dockerfile
@ -1,41 +1,13 @@
|
||||
############################################################
|
||||
# builder #
|
||||
# -> golang image used solely for building the k3d binary #
|
||||
# -> built executable can then be copied into other stages #
|
||||
############################################################
|
||||
FROM golang:1.17 as builder
|
||||
ARG GIT_TAG_OVERRIDE
|
||||
FROM golang:1.13 as builder
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
RUN make build -e GIT_TAG_OVERRIDE=${GIT_TAG_OVERRIDE} && bin/k3d version
|
||||
RUN make build && bin/k3d --version
|
||||
|
||||
#######################################################
|
||||
# dind #
|
||||
# -> k3d + some tools in a docker-in-docker container #
|
||||
# -> used e.g. in our CI pipelines for testing #
|
||||
#######################################################
|
||||
FROM docker:20.10-dind as dind
|
||||
ARG OS=linux
|
||||
ARG ARCH=amd64
|
||||
FROM docker:19.03-dind
|
||||
|
||||
# install some basic packages needed for testing, etc.
|
||||
RUN echo "building for ${OS}/${ARCH}" && \
|
||||
apk update && \
|
||||
apk add bash curl sudo jq git make netcat-openbsd
|
||||
|
||||
# install kubectl to interact with the k3d cluster
|
||||
RUN curl -L https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/${OS}/${ARCH}/kubectl -o /usr/local/bin/kubectl && \
|
||||
chmod +x /usr/local/bin/kubectl
|
||||
|
||||
# install yq (yaml processor) from source, as the busybox yq had some issues
|
||||
RUN curl -L https://github.com/mikefarah/yq/releases/download/v4.9.6/yq_${OS}_${ARCH} -o /usr/bin/yq &&\
|
||||
chmod +x /usr/bin/yq
|
||||
# TODO: we could create a different stage for e2e tests
|
||||
RUN apk add bash curl sudo
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl && \
|
||||
chmod +x ./kubectl && \
|
||||
mv ./kubectl /usr/local/bin/kubectl
|
||||
COPY --from=builder /app/bin/k3d /bin/k3d
|
||||
|
||||
#########################################
|
||||
# binary-only #
|
||||
# -> only the k3d binary.. nothing else #
|
||||
#########################################
|
||||
FROM scratch as binary-only
|
||||
COPY --from=builder /app/bin/k3d /bin/k3d
|
||||
ENTRYPOINT ["/bin/k3d"]
|
||||
|
12
LICENSE
12
LICENSE
@ -1,6 +1,6 @@
|
||||
The MIT License (MIT)
|
||||
MIT License
|
||||
|
||||
Copyright © 2019-2021 Thorsten Klein <iwilltry42@gmail.com>
|
||||
Copyright (c) 2019 Thorsten Klein <iwilltry42@gmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -9,13 +9,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
190
Makefile
190
Makefile
@ -1,142 +1,78 @@
|
||||
###################################
|
||||
# #
|
||||
# CONFIGURATION #
|
||||
# #
|
||||
###################################
|
||||
|
||||
########## Shell/Terminal Settings ##########
|
||||
SHELL := /bin/bash
|
||||
|
||||
# determine if make is being executed from interactive terminal
|
||||
INTERACTIVE:=$(shell [ -t 0 ] && echo 1)
|
||||
|
||||
# Use Go Modules for everything
|
||||
export GO111MODULE=on
|
||||
|
||||
########## Tags ##########
|
||||
# Build targets
|
||||
TARGETS ?= darwin/amd64 linux/amd64 linux/386 linux/arm linux/arm64 windows/amd64
|
||||
TARGET_OBJS ?= darwin-amd64.tar.gz darwin-amd64.tar.gz.sha256 linux-amd64.tar.gz linux-amd64.tar.gz.sha256 linux-386.tar.gz linux-386.tar.gz.sha256 linux-arm.tar.gz linux-arm.tar.gz.sha256 linux-arm64.tar.gz linux-arm64.tar.gz.sha256 windows-amd64.zip windows-amd64.zip.sha256
|
||||
|
||||
# get git tag
|
||||
ifneq ($(GIT_TAG_OVERRIDE),)
|
||||
$(info GIT_TAG set from env override!)
|
||||
GIT_TAG := $(GIT_TAG_OVERRIDE)
|
||||
endif
|
||||
|
||||
GIT_TAG ?= $(shell git describe --tags)
|
||||
GIT_TAG := $(shell git describe --tags)
|
||||
ifeq ($(GIT_TAG),)
|
||||
GIT_TAG := $(shell git describe --always)
|
||||
endif
|
||||
|
||||
# Docker image tag derived from Git tag (with prefix "v" stripped off)
|
||||
K3D_IMAGE_TAG := $(GIT_TAG:v%=%)
|
||||
|
||||
# get latest k3s version: grep the tag and replace + with - (difference between git and dockerhub tags)
|
||||
K3S_TAG := $(shell curl --silent "https://update.k3s.io/v1-release/channels/stable" | egrep -o '/v[^ ]+"' | sed -E 's/\/|\"//g' | sed -E 's/\+/\-/')
|
||||
|
||||
# get latest k3s version: grep the tag JSON field, extract the tag and replace + with - (difference between git and dockerhub tags)
|
||||
K3S_TAG := $(shell curl --silent "https://api.github.com/repos/rancher/k3s/releases/latest" | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/' | sed -E 's/\+/\-/')
|
||||
ifeq ($(K3S_TAG),)
|
||||
$(warning K3S_TAG undefined: couldn't get latest k3s image tag!)
|
||||
$(warning Output of curl: $(shell curl --silent "https://update.k3s.io/v1-release/channels/stable"))
|
||||
$(warning Output of curl: $(shell curl --silent "https://api.github.com/repos/rancher/k3s/releases/latest"))
|
||||
$(error exiting)
|
||||
endif
|
||||
|
||||
########## Source Options ##########
|
||||
# DIRS defines a single level directly, we only look at *.go in this directory.
|
||||
# REC_DIRS defines a source code tree. All go files are analyzed recursively.
|
||||
DIRS := .
|
||||
REC_DIRS := cmd
|
||||
|
||||
########## Test Settings ##########
|
||||
E2E_LOG_LEVEL ?= WARN
|
||||
E2E_INCLUDE ?=
|
||||
E2E_EXCLUDE ?=
|
||||
E2E_EXTRA ?=
|
||||
E2E_RUNNER_START_TIMEOUT ?= 10
|
||||
E2E_HELPER_IMAGE_TAG ?=
|
||||
|
||||
########## Go Build Options ##########
|
||||
# Build targets
|
||||
TARGETS ?= darwin/amd64 darwin/arm64 linux/amd64 linux/386 linux/arm linux/arm64 windows/amd64
|
||||
TARGET_OBJS ?= darwin-amd64.tar.gz darwin-amd64.tar.gz.sha256 darwin-arm64.tar.gz darwin-arm64.tar.gz.sha256 linux-amd64.tar.gz linux-amd64.tar.gz.sha256 linux-386.tar.gz linux-386.tar.gz.sha256 linux-arm.tar.gz linux-arm.tar.gz.sha256 linux-arm64.tar.gz linux-arm64.tar.gz.sha256 windows-amd64.zip windows-amd64.zip.sha256
|
||||
K3D_HELPER_VERSION ?=
|
||||
# determine if make is being executed from interactive terminal
|
||||
INTERACTIVE:=$(shell [ -t 0 ] && echo 1)
|
||||
|
||||
# Go options
|
||||
GO ?= go
|
||||
GOENVPATH := $(shell go env GOPATH)
|
||||
PKG := $(shell go mod vendor)
|
||||
TAGS :=
|
||||
TESTS := ./...
|
||||
TESTS := .
|
||||
TESTFLAGS :=
|
||||
LDFLAGS := -w -s -X github.com/rancher/k3d/v5/version.Version=${GIT_TAG} -X github.com/rancher/k3d/v5/version.K3sVersion=${K3S_TAG}
|
||||
GCFLAGS :=
|
||||
LDFLAGS := -w -s -X github.com/rancher/k3d/version.Version=${GIT_TAG} -X github.com/rancher/k3d/version.K3sVersion=${K3S_TAG}
|
||||
GOFLAGS :=
|
||||
BINDIR := $(CURDIR)/bin
|
||||
BINARIES := k3d
|
||||
|
||||
# Set version of the k3d helper images for build
|
||||
ifneq ($(K3D_HELPER_VERSION),)
|
||||
$(info [INFO] Helper Image version set to ${K3D_HELPER_VERSION})
|
||||
LDFLAGS += -X github.com/rancher/k3d/v5/version.HelperVersionOverride=${K3D_HELPER_VERSION}
|
||||
endif
|
||||
K3D_IMAGE_TAG := $(GIT_TAG)
|
||||
|
||||
# Go Package required
|
||||
PKG_GOX := github.com/mitchellh/gox@v1.0.1
|
||||
PKG_GOLANGCI_LINT_VERSION := 1.23.8
|
||||
PKG_GOLANGCI_LINT_SCRIPT := https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh
|
||||
PKG_GOLANGCI_LINT := github.com/golangci/golangci-lint/cmd/golangci-lint@v${PKG_GOLANGCI_LINT_VERSION}
|
||||
|
||||
# configuration adjustments for golangci-lint
|
||||
GOLANGCI_LINT_DISABLED_LINTERS := ""
|
||||
|
||||
# Use Go Modules for everything
|
||||
export GO111MODULE=on
|
||||
|
||||
# go source directories.
|
||||
# DIRS defines a single level directly, we only look at *.go in this directory.
|
||||
# REC_DIRS defines a source code tree. All go files are analyzed recursively.
|
||||
DIRS := .
|
||||
REC_DIRS := cli
|
||||
|
||||
# Rules for finding all go source files using 'DIRS' and 'REC_DIRS'
|
||||
GO_SRC := $(foreach dir,$(DIRS),$(wildcard $(dir)/*.go))
|
||||
GO_SRC += $(foreach dir,$(REC_DIRS),$(shell find $(dir) -name "*.go"))
|
||||
|
||||
########## Required Tools ##########
|
||||
# Go Package required
|
||||
PKG_GOX := github.com/mitchellh/gox@v1.0.1
|
||||
PKG_GOLANGCI_LINT_VERSION := 1.39.0
|
||||
PKG_GOLANGCI_LINT_SCRIPT := https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh
|
||||
PKG_GOLANGCI_LINT := github.com/golangci/golangci-lint/cmd/golangci-lint@v${PKG_GOLANGCI_LINT_VERSION}
|
||||
|
||||
########## Linting Options ##########
|
||||
# configuration adjustments for golangci-lint
|
||||
GOLANGCI_LINT_DISABLED_LINTERS := "" # disabling typecheck, because it currently (06.09.2019) fails with Go 1.13
|
||||
|
||||
# Rules for directory list as input for the golangci-lint program
|
||||
LINT_DIRS := $(DIRS) $(foreach dir,$(REC_DIRS),$(dir)/...)
|
||||
|
||||
#############################
|
||||
# #
|
||||
# TARGETS #
|
||||
# #
|
||||
#############################
|
||||
|
||||
.PHONY: all build build-cross clean fmt check-fmt lint check extra-clean install-tools
|
||||
|
||||
all: clean fmt check test build
|
||||
all: clean fmt check build
|
||||
|
||||
############################
|
||||
########## Builds ##########
|
||||
############################
|
||||
|
||||
# debug builds
|
||||
build-debug: GCFLAGS+="all=-N -l"
|
||||
build-debug: build
|
||||
|
||||
# default build target for the local platform
|
||||
build:
|
||||
CGO_ENABLED=0 $(GO) build $(GOFLAGS) -tags '$(TAGS)' -ldflags '$(LDFLAGS)' -gcflags '$(GCFLAGS)' -o '$(BINDIR)/$(BINARIES)'
|
||||
CGO_ENABLED=0 $(GO) build $(GOFLAGS) -tags '$(TAGS)' -ldflags '$(LDFLAGS)' -o '$(BINDIR)/$(BINARIES)'
|
||||
|
||||
# cross-compilation for all targets
|
||||
build-cross: LDFLAGS += -extldflags "-static"
|
||||
build-cross:
|
||||
CGO_ENABLED=0 gox -parallel=3 -output="_dist/$(BINARIES)-{{.OS}}-{{.Arch}}" -osarch='$(TARGETS)' $(GOFLAGS) $(if $(TAGS),-tags '$(TAGS)',) -ldflags '$(LDFLAGS)'
|
||||
|
||||
# build a specific docker target ( '%' matches the target as specified in the Dockerfile)
|
||||
build-docker-%:
|
||||
@echo "Building Docker image k3d:$(K3D_IMAGE_TAG)-$*"
|
||||
DOCKER_BUILDKIT=1 docker build . -t k3d:$(K3D_IMAGE_TAG)-$* --target $*
|
||||
|
||||
# build helper images
|
||||
build-helper-images:
|
||||
@echo "Building docker image rancher/k3d-proxy:$(K3D_IMAGE_TAG)"
|
||||
DOCKER_BUILDKIT=1 docker build proxy/ -f proxy/Dockerfile -t rancher/k3d-proxy:$(K3D_IMAGE_TAG)
|
||||
@echo "Building docker image rancher/k3d-tools:$(K3D_IMAGE_TAG)"
|
||||
DOCKER_BUILDKIT=1 docker build --no-cache tools/ -f tools/Dockerfile -t rancher/k3d-tools:$(K3D_IMAGE_TAG) --build-arg GIT_TAG=$(GIT_TAG)
|
||||
|
||||
##############################
|
||||
########## Cleaning ##########
|
||||
##############################
|
||||
build-dockerfile: Dockerfile
|
||||
@echo "Building Docker image k3d:$(K3D_IMAGE_TAG)"
|
||||
docker build -t k3d:$(K3D_IMAGE_TAG) .
|
||||
|
||||
clean:
|
||||
@rm -rf $(BINDIR) _dist/
|
||||
@ -145,14 +81,17 @@ extra-clean: clean
|
||||
$(GO) clean -i $(PKG_GOX)
|
||||
$(GO) clean -i $(PKG_GOLANGCI_LINT)
|
||||
|
||||
##########################################
|
||||
########## Formatting & Linting ##########
|
||||
##########################################
|
||||
|
||||
# fmt will fix the golang source style in place.
|
||||
fmt:
|
||||
@gofmt -s -l -w $(GO_SRC)
|
||||
|
||||
e2e: build
|
||||
EXE='$(BINDIR)/$(BINARIES)' ./tests/runner.sh
|
||||
|
||||
e2e-dind: build-dockerfile
|
||||
@echo "Running e2e tests in k3d:$(K3D_IMAGE_TAG)"
|
||||
tests/dind.sh "${K3D_IMAGE_TAG}"
|
||||
|
||||
# check-fmt returns an error code if any source code contains format error.
|
||||
check-fmt:
|
||||
@test -z $(shell gofmt -s -l $(GO_SRC) | tee /dev/stderr) || echo "[WARN] Fix formatting issues with 'make fmt'"
|
||||
@ -162,31 +101,6 @@ lint:
|
||||
|
||||
check: check-fmt lint
|
||||
|
||||
###########################
|
||||
########## Tests ##########
|
||||
###########################
|
||||
|
||||
test:
|
||||
$(GO) test $(TESTS) $(TESTFLAGS)
|
||||
|
||||
e2e: build-docker-dind
|
||||
@echo "Running e2e tests in k3d:$(K3D_IMAGE_TAG)"
|
||||
LOG_LEVEL="$(E2E_LOG_LEVEL)" E2E_INCLUDE="$(E2E_INCLUDE)" E2E_EXCLUDE="$(E2E_EXCLUDE)" E2E_EXTRA="$(E2E_EXTRA)" E2E_RUNNER_START_TIMEOUT=$(E2E_RUNNER_START_TIMEOUT) E2E_HELPER_IMAGE_TAG="$(E2E_HELPER_IMAGE_TAG)" tests/dind.sh "${K3D_IMAGE_TAG}-dind"
|
||||
|
||||
ci-tests: fmt check e2e
|
||||
|
||||
##########################
|
||||
########## Misc ##########
|
||||
##########################
|
||||
|
||||
drone:
|
||||
@echo "Running drone pipeline locally with branch=main and event=push"
|
||||
drone exec --trusted --branch main --event push
|
||||
|
||||
#########################################
|
||||
########## Setup & Preparation ##########
|
||||
#########################################
|
||||
|
||||
# Check for required executables
|
||||
HAS_GOX := $(shell command -v gox 2> /dev/null)
|
||||
HAS_GOLANGCI := $(shell command -v golangci-lint)
|
||||
@ -197,29 +111,31 @@ ifndef HAS_GOX
|
||||
($(GO) get $(PKG_GOX))
|
||||
endif
|
||||
ifndef HAS_GOLANGCI
|
||||
(curl -sfL $(PKG_GOLANGCI_LINT_SCRIPT) | sh -s -- -b $(GOENVPATH)/bin v${PKG_GOLANGCI_LINT_VERSION})
|
||||
(curl -sfL $(PKG_GOLANGCI_LINT_SCRIPT) | sh -s -- -b ${GOPATH}/bin v${PKG_GOLANGCI_LINT_VERSION})
|
||||
endif
|
||||
ifdef HAS_GOLANGCI
|
||||
ifeq ($(HAS_GOLANGCI_VERSION),)
|
||||
ifdef INTERACTIVE
|
||||
@echo "Warning: Your installed version of golangci-lint (interactive: ${INTERACTIVE}) differs from what we'd like to use. Switch to v${PKG_GOLANGCI_LINT_VERSION}? [Y/n]"
|
||||
@read line; if [ $$line == "y" ]; then (curl -sfL $(PKG_GOLANGCI_LINT_SCRIPT) | sh -s -- -b $(GOENVPATH)/bin v${PKG_GOLANGCI_LINT_VERSION}); fi
|
||||
@read line; if [ $$line == "y" ]; then (curl -sfL $(PKG_GOLANGCI_LINT_SCRIPT) | sh -s -- -b ${GOPATH}/bin v${PKG_GOLANGCI_LINT_VERSION}); fi
|
||||
else
|
||||
@echo "Warning: you're not using the same version of golangci-lint as us (v${PKG_GOLANGCI_LINT_VERSION})"
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
# In the CI system, we need...
|
||||
# - golangci-lint for linting (lint)
|
||||
# - gox for cross-compilation (build-cross)
|
||||
# - kubectl for E2E-tests (e2e)
|
||||
ci-setup:
|
||||
@echo "Installing Go tools..."
|
||||
curl -sfL $(PKG_GOLANGCI_LINT_SCRIPT) | sh -s -- -b $(GOENVPATH)/bin v$(PKG_GOLANGCI_LINT_VERSION)
|
||||
$(GO) get $(PKG_GOX)
|
||||
curl -sfL $(PKG_GOLANGCI_LINT_SCRIPT) | sh -s -- -b ${GOPATH}/bin v$(PKG_GOLANGCI_LINT_VERSION)
|
||||
go get $(PKG_GOX)
|
||||
|
||||
@echo "Installing kubectl..."
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl
|
||||
chmod +x ./kubectl
|
||||
mv ./kubectl /usr/local/bin/kubectl
|
||||
sudo mv ./kubectl /usr/local/bin/kubectl
|
||||
|
||||
ci-tests: fmt check e2e
|
||||
|
||||
ci-dist: build-cross
|
||||
|
||||
ci-tests-dind: fmt check e2e-dind
|
||||
|
150
README.md
150
README.md
@ -1,76 +1,44 @@
|
||||
# [](https://k3d.io/)
|
||||
# k3d
|
||||
|
||||
[](https://drone-publish.rancher.io/rancher/k3d)
|
||||
[](./LICENSE.md)
|
||||

|
||||
[](https://travis-ci.com/rancher/k3d)
|
||||
[](https://goreportcard.com/report/github.com/rancher/k3d)
|
||||
[](./LICENSE.md)
|
||||

|
||||
[](https://github.com/rancher/k3d/releases/latest)
|
||||
[](https://formulae.brew.sh/formula/k3d)
|
||||
|
||||
[](https://pkg.go.dev/github.com/rancher/k3d/v5)
|
||||
[](./go.mod)
|
||||
[](https://goreportcard.com/report/github.com/rancher/k3d)
|
||||
|
||||
<!-- ALL-CONTRIBUTORS-BADGE:START - Do not remove or modify this section -->
|
||||
[](#contributors-)
|
||||
<!-- ALL-CONTRIBUTORS-BADGE:END -->
|
||||
[](code_of_conduct.md)
|
||||
|
||||
**Please Note:** `main` is now v5.0.0 and the code for v4.x can be found in the `main-v4` branch!
|
||||
|
||||
## [k3s in docker](https://k3d.io)
|
||||
## k3s in docker
|
||||
|
||||
k3s is the lightweight Kubernetes distribution by Rancher: [rancher/k3s](https://github.com/rancher/k3s)
|
||||
|
||||
k3d creates containerized k3s clusters. This means, that you can spin up a multi-node k3s cluster on a single machine using docker.
|
||||
|
||||
[](https://asciinema.org/a/436420)
|
||||
|
||||
## Learning
|
||||
|
||||
- Website with documentation: [k3d.io](https://k3d.io/)
|
||||
- [Rancher Meetup - May 2020 - Simplifying Your Cloud-Native Development Workflow With K3s, K3c and K3d (YouTube)](https://www.youtube.com/watch?v=hMr3prm9gDM)
|
||||
- k3d demo repository: [iwilltry42/k3d-demo](https://github.com/iwilltry42/k3d-demo)
|
||||
This repository is based on [@zeerorg](https://github.com/zeerorg/)'s [zeerorg/k3s-in-docker](https://github.com/zeerorg/k3s-in-docker), reimplemented in Go by [@iwilltry42](https://github.com/iwilltry42/) in [iwilltry42/k3d](https://github.com/iwilltry42/k3d), which is now [rancher/k3d](https://github.com/rancher/k3d).
|
||||
|
||||
## Requirements
|
||||
|
||||
- [docker](https://docs.docker.com/install/)
|
||||
|
||||
## Releases
|
||||
|
||||
**Note**: In May 2020 we upgraded from v1.7.x to **v3.0.0** after a complete rewrite of k3d!
|
||||
**Note**: In January 2021 we upgraded from v3.x.x to **v4.0.0** which includes some breaking changes!
|
||||
**Note**: In September 2021 we upgraded from v4.4.8 to **v5.0.0** which includes some breaking changes!
|
||||
|
||||
| Platform | Stage | Version | Release Date | |
|
||||
|-----------------|--------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|---|
|
||||
| [**GitHub Releases**](https://github.com/rancher/k3d/releases) | stable | [](https://github.com/rancher/k3d/releases/latest) | [](https://github.com/rancher/k3d/releases/latest) | |
|
||||
| [**GitHub Releases**](https://github.com/rancher/k3d/releases) | latest | [](https://github.com/rancher/k3d/releases) | [](https://github.com/rancher/k3d/releases) | |
|
||||
| [**Homebrew**](https://formulae.brew.sh/formula/k3d) | - | [](https://formulae.brew.sh/formula/k3d) | - | |
|
||||
| [**Chocolatey**](https://chocolatey.org/packages/k3d/)| stable | [](https://chocolatey.org/packages/k3d/) | - | |
|
||||
|
||||
## Get
|
||||
|
||||
You have several options there:
|
||||
|
||||
- use the install script to grab the latest release:
|
||||
- wget: `wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash`
|
||||
- curl: `curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash`
|
||||
- wget: `wget -q -O - https://raw.githubusercontent.com/rancher/k3d/master/install.sh | bash`
|
||||
- curl: `curl -s https://raw.githubusercontent.com/rancher/k3d/master/install.sh | bash`
|
||||
- use the install script to grab a specific release (via `TAG` environment variable):
|
||||
- wget: `wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v5.0.0 bash`
|
||||
- curl: `curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v5.0.0 bash`
|
||||
- wget: `wget -q -O - https://raw.githubusercontent.com/rancher/k3d/master/install.sh | TAG=v1.3.4 bash`
|
||||
- curl: `curl -s https://raw.githubusercontent.com/rancher/k3d/master/install.sh | TAG=v1.3.4 bash`
|
||||
|
||||
- use [Homebrew](https://brew.sh): `brew install k3d` (Homebrew is available for MacOS and Linux)
|
||||
- Use [Homebrew](https://brew.sh): `brew install k3d` (Homebrew is avaiable for MacOS and Linux)
|
||||
- Formula can be found in [homebrew/homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/k3d.rb) and is mirrored to [homebrew/linuxbrew-core](https://github.com/Homebrew/linuxbrew-core/blob/master/Formula/k3d.rb)
|
||||
- install via [MacPorts](https://www.macports.org): `sudo port selfupdate && sudo port install k3d` (MacPorts is available for MacOS)
|
||||
- install via [AUR](https://aur.archlinux.org/) package [rancher-k3d-bin](https://aur.archlinux.org/packages/rancher-k3d-bin/): `yay -S rancher-k3d-bin`
|
||||
- grab a release from the [release tab](https://github.com/rancher/k3d/releases) and install it yourself.
|
||||
- install via go: `go install github.com/rancher/k3d@latest` (**Note**: this will give you unreleased/bleeding-edge changes)
|
||||
- use [Chocolatey](https://chocolatey.org/): `choco install k3d` (Chocolatey package manager is available for Windows)
|
||||
- package source can be found in [erwinkersten/chocolatey-packages](https://github.com/erwinkersten/chocolatey-packages/tree/master/automatic/k3d)
|
||||
- Install via [AUR](https://aur.archlinux.org/) package [rancher-k3d-bin](https://aur.archlinux.org/packages/rancher-k3d-bin/): `yay -S rancher-k3d-bin`
|
||||
- Grab a release from the [release tab](https://github.com/rancher/k3d/releases) and install it yourself.
|
||||
- Via go: `go install github.com/rancher/k3d` (**Note**: this will give you unreleased/bleeding-edge changes)
|
||||
|
||||
or...
|
||||
|
||||
## Build
|
||||
|
||||
1. Clone this repo, e.g. via `git clone git@github.com:rancher/k3d.git` or `go get github.com/rancher/k3d/v5@main`
|
||||
1. Clone this repo, e.g. via `go get -u github.com/rancher/k3d`
|
||||
2. Inside the repo run
|
||||
- 'make install-tools' to make sure required go packages are installed
|
||||
3. Inside the repo run one of the following commands
|
||||
@ -80,74 +48,32 @@ or...
|
||||
|
||||
## Usage
|
||||
|
||||
Check out what you can do via `k3d help` or check the docs @ [k3d.io](https://k3d.io)
|
||||
Check out what you can do via `k3d help`
|
||||
|
||||
Example Workflow: Create a new cluster and use it with `kubectl`
|
||||
(*Note:* `kubectl` is not part of `k3d`, so you have to [install it first if needed](https://kubernetes.io/docs/tasks/tools/install-kubectl/))
|
||||
|
||||
1. `k3d cluster create CLUSTER_NAME` to create a new single-node cluster (= 1 container running k3s + 1 loadbalancer container)
|
||||
2. [Optional, included in cluster create] `k3d kubeconfig merge CLUSTER_NAME --kubeconfig-switch-context` to update your default kubeconfig and switch the current-context to the new one
|
||||
1. `k3d create` to create a new single-node cluster (docker container)
|
||||
2. `export KUBECONFIG=$(k3d get-kubeconfig)` to make `kubectl` to use the kubeconfig for that cluster
|
||||
3. execute some commands like `kubectl get pods --all-namespaces`
|
||||
4. `k3d cluster delete CLUSTER_NAME` to delete the default cluster
|
||||
4. `k3d delete` to delete the default cluster
|
||||
|
||||
## Connect
|
||||
### Exposing Services
|
||||
|
||||
If you want to access your services from the outside (e.g. via Ingress), you need to map the ports (e.g. port 80 for Ingress) using the `--publish` flag (or aliases).
|
||||
Check out the [examples here](docs/examples.md).
|
||||
|
||||
## What now?
|
||||
|
||||
Find more details under the following Links:
|
||||
|
||||
- [Further documentation](docs/documentation.md)
|
||||
- [Using registries](docs/registries.md)
|
||||
- [Usage examples](docs/examples.md)
|
||||
- [Frequently asked questions and nice-to-know facts](docs/faq.md)
|
||||
|
||||
### Connect
|
||||
|
||||
1. Join the Rancher community on slack via [slack.rancher.io](https://slack.rancher.io/)
|
||||
2. Go to [rancher-users.slack.com](https://rancher-users.slack.com) and join our channel #k3d
|
||||
3. Start chatting
|
||||
|
||||
## History
|
||||
|
||||
This repository is based on [@zeerorg](https://github.com/zeerorg/)'s [zeerorg/k3s-in-docker](https://github.com/zeerorg/k3s-in-docker), reimplemented in Go by [@iwilltry42](https://github.com/iwilltry42/) in [iwilltry42/k3d](https://github.com/iwilltry42/k3d), which got adopted by Rancher in[rancher/k3d](https://github.com/rancher/k3d).
|
||||
|
||||
## Related Projects
|
||||
|
||||
- [k3x](https://github.com/inercia/k3x): GUI (Linux) to k3d
|
||||
- [vscode-k3d](https://github.com/inercia/vscode-k3d): vscode plugin for k3d
|
||||
- [AbsaOSS/k3d-action](https://github.com/AbsaOSS/k3d-action): fully customizable GitHub Action to run lightweight Kubernetes clusters.
|
||||
- [AutoK3s](https://github.com/cnrancher/autok3s): a lightweight tool to help run K3s everywhere including k3d provider.
|
||||
- [nolar/setup-k3d-k3s](https://github.com/nolar/setup-k3d-k3s): setup K3d/K3s for GitHub Actions.
|
||||
|
||||
## Contributing
|
||||
|
||||
k3d is a community-driven project and so we welcome contributions of any form, be it code, logic, documentation, examples, requests, bug reports, ideas or anything else that pushes this project forward.
|
||||
|
||||
Please read our [**Contributing Guidelines**](./CONTRIBUTING.md) and the related [**Code of Conduct**](./CODE_OF_CONDUCT.md).
|
||||
|
||||
You can find an overview of the k3d project (e.g. explanations and a repository guide) in the documentation: [k3d.io/internals/project](https://k3d.io/internals/project)
|
||||
|
||||
[](code_of_conduct.md)
|
||||
|
||||
## Contributors ✨
|
||||
|
||||
Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)):
|
||||
|
||||
<!-- ALL-CONTRIBUTORS-LIST:START - Do not remove or modify this section -->
|
||||
<!-- prettier-ignore-start -->
|
||||
<!-- markdownlint-disable -->
|
||||
<table>
|
||||
<tr>
|
||||
<td align="center"><a href="https://twitter.com/iwilltry42"><img src="https://avatars3.githubusercontent.com/u/25345277?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Thorsten Klein</b></sub></a><br /><a href="https://github.com/rancher/k3d/commits?author=iwilltry42" title="Code">💻</a> <a href="https://github.com/rancher/k3d/commits?author=iwilltry42" title="Documentation">📖</a> <a href="#ideas-iwilltry42" title="Ideas, Planning, & Feedback">🤔</a> <a href="#maintenance-iwilltry42" title="Maintenance">🚧</a></td>
|
||||
<td align="center"><a href="https://blog.zeerorg.site/"><img src="https://avatars0.githubusercontent.com/u/13547997?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Rishabh Gupta</b></sub></a><br /><a href="#ideas-zeerorg" title="Ideas, Planning, & Feedback">🤔</a> <a href="https://github.com/rancher/k3d/commits?author=zeerorg" title="Code">💻</a></td>
|
||||
<td align="center"><a href="http://www.zenika.com"><img src="https://avatars3.githubusercontent.com/u/25585516?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Louis Tournayre</b></sub></a><br /><a href="https://github.com/rancher/k3d/commits?author=louiznk" title="Documentation">📖</a></td>
|
||||
<td align="center"><a href="https://github.com/lionelnicolas"><img src="https://avatars3.githubusercontent.com/u/6538664?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Lionel Nicolas</b></sub></a><br /><a href="https://github.com/rancher/k3d/commits?author=lionelnicolas" title="Code">💻</a></td>
|
||||
<td align="center"><a href="https://github.com/toonsevrin.keys"><img src="https://avatars1.githubusercontent.com/u/5507199?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Toon Sevrin</b></sub></a><br /><a href="https://github.com/rancher/k3d/commits?author=toonsevrin" title="Code">💻</a></td>
|
||||
<td align="center"><a href="http://debian-solutions.de"><img src="https://avatars3.githubusercontent.com/u/1111056?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Dennis Hoppe</b></sub></a><br /><a href="https://github.com/rancher/k3d/commits?author=dhoppe" title="Documentation">📖</a> <a href="#example-dhoppe" title="Examples">💡</a></td>
|
||||
<td align="center"><a href="https://dellinger.dev"><img src="https://avatars0.githubusercontent.com/u/3109892?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Jonas Dellinger</b></sub></a><br /><a href="#infra-JohnnyCrazy" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center"><a href="https://github.com/markrexwinkel"><img src="https://avatars2.githubusercontent.com/u/10704814?v=4?s=100" width="100px;" alt=""/><br /><sub><b>markrexwinkel</b></sub></a><br /><a href="https://github.com/rancher/k3d/commits?author=markrexwinkel" title="Documentation">📖</a></td>
|
||||
<td align="center"><a href="http://inerciatech.com/"><img src="https://avatars2.githubusercontent.com/u/1841612?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Alvaro</b></sub></a><br /><a href="https://github.com/rancher/k3d/commits?author=inercia" title="Code">💻</a> <a href="#ideas-inercia" title="Ideas, Planning, & Feedback">🤔</a> <a href="#plugin-inercia" title="Plugin/utility libraries">🔌</a></td>
|
||||
<td align="center"><a href="http://wsl.dev"><img src="https://avatars2.githubusercontent.com/u/905874?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Nuno do Carmo</b></sub></a><br /><a href="#content-nunix" title="Content">🖋</a> <a href="#tutorial-nunix" title="Tutorials">✅</a> <a href="#question-nunix" title="Answering Questions">💬</a></td>
|
||||
<td align="center"><a href="https://github.com/erwinkersten"><img src="https://avatars0.githubusercontent.com/u/4391121?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Erwin Kersten</b></sub></a><br /><a href="https://github.com/rancher/k3d/commits?author=erwinkersten" title="Documentation">📖</a></td>
|
||||
<td align="center"><a href="http://www.alexsears.com"><img src="https://avatars.githubusercontent.com/u/3712883?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Alex Sears</b></sub></a><br /><a href="https://github.com/rancher/k3d/commits?author=searsaw" title="Documentation">📖</a></td>
|
||||
<td align="center"><a href="http://shanduur.github.io"><img src="https://avatars.githubusercontent.com/u/32583062?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Mateusz Urbanek</b></sub></a><br /><a href="https://github.com/rancher/k3d/commits?author=Shanduur" title="Code">💻</a></td>
|
||||
<td align="center"><a href="https://github.com/benjaminjb"><img src="https://avatars.githubusercontent.com/u/4651855?v=4?s=100" width="100px;" alt=""/><br /><sub><b>Benjamin Blattberg</b></sub></a><br /><a href="https://github.com/rancher/k3d/commits?author=benjaminjb" title="Code">💻</a></td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
<!-- prettier-ignore-end -->
|
||||
|
||||
<!-- ALL-CONTRIBUTORS-LIST:END -->
|
||||
|
||||
This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome!
|
||||
|
334
cli/cluster.go
Normal file
334
cli/cluster.go
Normal file
@ -0,0 +1,334 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/client"
|
||||
homedir "github.com/mitchellh/go-homedir"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultContainerNamePrefix = "k3d"
|
||||
)
|
||||
|
||||
// GetContainerName generates the container names
|
||||
func GetContainerName(role, clusterName string, postfix int) string {
|
||||
if postfix >= 0 {
|
||||
return fmt.Sprintf("%s-%s-%s-%d", defaultContainerNamePrefix, clusterName, role, postfix)
|
||||
}
|
||||
return fmt.Sprintf("%s-%s-%s", defaultContainerNamePrefix, clusterName, role)
|
||||
}
|
||||
|
||||
// GetAllContainerNames returns a list of all containernames that will be created
|
||||
func GetAllContainerNames(clusterName string, serverCount, workerCount int) []string {
|
||||
names := []string{}
|
||||
for postfix := 0; postfix < serverCount; postfix++ {
|
||||
names = append(names, GetContainerName("server", clusterName, postfix))
|
||||
}
|
||||
for postfix := 0; postfix < workerCount; postfix++ {
|
||||
names = append(names, GetContainerName("worker", clusterName, postfix))
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// createDirIfNotExists checks for the existence of a directory and creates it along with all required parents if not.
|
||||
// It returns an error if the directory (or parents) couldn't be created and nil if it worked fine or if the path already exists.
|
||||
func createDirIfNotExists(path string) error {
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
return os.MkdirAll(path, os.ModePerm)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// createClusterDir creates a directory with the cluster name under $HOME/.config/k3d/<cluster_name>.
|
||||
// The cluster directory will be used e.g. to store the kubeconfig file.
|
||||
func createClusterDir(name string) {
|
||||
clusterPath, _ := getClusterDir(name)
|
||||
if err := createDirIfNotExists(clusterPath); err != nil {
|
||||
log.Fatalf("Couldn't create cluster directory [%s] -> %+v", clusterPath, err)
|
||||
}
|
||||
// create subdir for sharing container images
|
||||
if err := createDirIfNotExists(clusterPath + "/images"); err != nil {
|
||||
log.Fatalf("Couldn't create cluster sub-directory [%s] -> %+v", clusterPath+"/images", err)
|
||||
}
|
||||
}
|
||||
|
||||
// deleteClusterDir contrary to createClusterDir, this deletes the cluster directory under $HOME/.config/k3d/<cluster_name>
|
||||
func deleteClusterDir(name string) {
|
||||
clusterPath, _ := getClusterDir(name)
|
||||
if err := os.RemoveAll(clusterPath); err != nil {
|
||||
log.Warningf("Couldn't delete cluster directory [%s]. You might want to delete it manually.", clusterPath)
|
||||
}
|
||||
}
|
||||
|
||||
// getClusterDir returns the path to the cluster directory which is $HOME/.config/k3d/<cluster_name>
|
||||
func getClusterDir(name string) (string, error) {
|
||||
homeDir, err := homedir.Dir()
|
||||
if err != nil {
|
||||
log.Error("Couldn't get user's home directory")
|
||||
return "", err
|
||||
}
|
||||
return path.Join(homeDir, ".config", "k3d", name), nil
|
||||
}
|
||||
|
||||
func getClusterKubeConfigPath(cluster string) (string, error) {
|
||||
clusterDir, err := getClusterDir(cluster)
|
||||
return path.Join(clusterDir, "kubeconfig.yaml"), err
|
||||
}
|
||||
|
||||
func createKubeConfigFile(cluster string) error {
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("label", "app=k3d")
|
||||
filters.Add("label", fmt.Sprintf("cluster=%s", cluster))
|
||||
filters.Add("label", "component=server")
|
||||
server, err := docker.ContainerList(ctx, types.ContainerListOptions{
|
||||
Filters: filters,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to get server container for cluster %s\n%+v", cluster, err)
|
||||
}
|
||||
|
||||
if len(server) == 0 {
|
||||
return fmt.Errorf("No server container for cluster %s", cluster)
|
||||
}
|
||||
|
||||
// get kubeconfig file from container and read contents
|
||||
|
||||
kubeconfigerror := func() {
|
||||
log.Warnf("Couldn't get the kubeconfig from cluster '%s': Maybe it's not ready yet and you can try again later.", cluster)
|
||||
}
|
||||
|
||||
reader, _, err := docker.CopyFromContainer(ctx, server[0].ID, "/output/kubeconfig.yaml")
|
||||
if err != nil {
|
||||
kubeconfigerror()
|
||||
return fmt.Errorf(" Couldn't copy kubeconfig.yaml from server container %s\n%+v", server[0].ID, err)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
readBytes, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
kubeconfigerror()
|
||||
return fmt.Errorf(" Couldn't read kubeconfig from container\n%+v", err)
|
||||
}
|
||||
|
||||
// create destination kubeconfig file
|
||||
destPath, err := getClusterKubeConfigPath(cluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
kubeconfigfile, err := os.Create(destPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf(" Couldn't create kubeconfig file %s\n%+v", destPath, err)
|
||||
}
|
||||
defer kubeconfigfile.Close()
|
||||
|
||||
// write to file, skipping the first 512 bytes which contain file metadata
|
||||
// and trimming any NULL characters
|
||||
trimBytes := bytes.Trim(readBytes[512:], "\x00")
|
||||
|
||||
// Fix up kubeconfig.yaml file.
|
||||
//
|
||||
// K3s generates the default kubeconfig.yaml with host name as 'localhost'.
|
||||
// Change the host name to the name user specified via the --api-port argument.
|
||||
//
|
||||
// When user did not specify the host name and when we are running against a remote docker,
|
||||
// set the host name to remote docker machine's IP address.
|
||||
//
|
||||
// Otherwise, the hostname remains as 'localhost'
|
||||
//
|
||||
// Additionally, we replace every occurence of 'default' in the kubeconfig with the actual cluster name
|
||||
apiHost := server[0].Labels["apihost"]
|
||||
|
||||
s := string(trimBytes)
|
||||
s = strings.ReplaceAll(s, "default", cluster)
|
||||
if apiHost != "" {
|
||||
s = strings.Replace(s, "localhost", apiHost, 1)
|
||||
s = strings.Replace(s, "127.0.0.1", apiHost, 1)
|
||||
}
|
||||
trimBytes = []byte(s)
|
||||
|
||||
_, err = kubeconfigfile.Write(trimBytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Couldn't write to kubeconfig.yaml\n%+v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getKubeConfig(cluster string, overwrite bool) (string, error) {
|
||||
kubeConfigPath, err := getClusterKubeConfigPath(cluster)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if clusters, err := getClusters(false, cluster); err != nil || len(clusters) != 1 {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return "", fmt.Errorf("Cluster %s does not exist", cluster)
|
||||
}
|
||||
|
||||
// Create or overwrite file no matter if it exists or not
|
||||
if overwrite {
|
||||
log.Debugf("Creating/Overwriting file %s...", kubeConfigPath)
|
||||
if err = createKubeConfigFile(cluster); err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
// If kubeconfi.yaml has not been created, generate it now
|
||||
if _, err := os.Stat(kubeConfigPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
log.Debugf("File %s does not exist. Creating it now...", kubeConfigPath)
|
||||
if err = createKubeConfigFile(cluster); err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
log.Debugf("File %s exists, leaving it as it is...", kubeConfigPath)
|
||||
}
|
||||
}
|
||||
|
||||
return kubeConfigPath, nil
|
||||
}
|
||||
|
||||
// printClusters prints the names of existing clusters
|
||||
func printClusters() error {
|
||||
clusters, err := getClusters(true, "")
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't list clusters\n%+v", err)
|
||||
}
|
||||
if len(clusters) == 0 {
|
||||
return fmt.Errorf("No clusters found")
|
||||
}
|
||||
|
||||
table := tablewriter.NewWriter(os.Stdout)
|
||||
table.SetAlignment(tablewriter.ALIGN_CENTER)
|
||||
table.SetHeader([]string{"NAME", "IMAGE", "STATUS", "WORKERS"})
|
||||
|
||||
for _, cluster := range clusters {
|
||||
workersRunning := 0
|
||||
for _, worker := range cluster.workers {
|
||||
if worker.State == "running" {
|
||||
workersRunning++
|
||||
}
|
||||
}
|
||||
workerData := fmt.Sprintf("%d/%d", workersRunning, len(cluster.workers))
|
||||
clusterData := []string{cluster.name, cluster.image, cluster.status, workerData}
|
||||
table.Append(clusterData)
|
||||
}
|
||||
|
||||
table.Render()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Classify cluster state: Running, Stopped or Abnormal
|
||||
func getClusterStatus(server types.Container, workers []types.Container) string {
|
||||
// The cluster is in the abnromal state when server state and the worker
|
||||
// states don't agree.
|
||||
for _, w := range workers {
|
||||
if w.State != server.State {
|
||||
return "unhealthy"
|
||||
}
|
||||
}
|
||||
|
||||
switch server.State {
|
||||
case "exited": // All containers in this state are most likely
|
||||
// as the result of running the "k3d stop" command.
|
||||
return "stopped"
|
||||
}
|
||||
|
||||
return server.State
|
||||
}
|
||||
|
||||
// getClusters uses the docker API to get existing clusters and compares that with the list of cluster directories
|
||||
// When 'all' is true, 'cluster' contains all clusters found from the docker daemon
|
||||
// When 'all' is false, 'cluster' contains up to one cluster whose name matches 'name'. 'cluster' can
|
||||
// be empty if no matching cluster is found.
|
||||
func getClusters(all bool, name string) (map[string]Cluster, error) {
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(" Couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
// Prepare docker label filters
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("label", "app=k3d")
|
||||
filters.Add("label", "component=server")
|
||||
|
||||
// get all servers created by k3d
|
||||
k3dServers, err := docker.ContainerList(ctx, types.ContainerListOptions{
|
||||
All: true,
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("WARNING: couldn't list server containers\n%+v", err)
|
||||
}
|
||||
|
||||
clusters := make(map[string]Cluster)
|
||||
|
||||
// don't filter for servers but for workers now
|
||||
filters.Del("label", "component=server")
|
||||
filters.Add("label", "component=worker")
|
||||
|
||||
// for all servers created by k3d, get workers and cluster information
|
||||
for _, server := range k3dServers {
|
||||
clusterName := server.Labels["cluster"]
|
||||
|
||||
// Skip the cluster if we don't want all of them, and
|
||||
// the cluster name does not match.
|
||||
if all || name == clusterName {
|
||||
|
||||
// Add the cluster
|
||||
filters.Add("label", fmt.Sprintf("cluster=%s", clusterName))
|
||||
|
||||
// get workers
|
||||
workers, err := docker.ContainerList(ctx, types.ContainerListOptions{
|
||||
All: true,
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil {
|
||||
log.Warningf("Couldn't get worker containers for cluster %s\n%+v", clusterName, err)
|
||||
}
|
||||
|
||||
// save cluster information
|
||||
serverPorts := []string{}
|
||||
for _, port := range server.Ports {
|
||||
serverPorts = append(serverPorts, strconv.Itoa(int(port.PublicPort)))
|
||||
}
|
||||
clusters[clusterName] = Cluster{
|
||||
name: clusterName,
|
||||
image: server.Image,
|
||||
status: getClusterStatus(server, workers),
|
||||
serverPorts: serverPorts,
|
||||
server: server,
|
||||
workers: workers,
|
||||
}
|
||||
// clear label filters before searching for next cluster
|
||||
filters.Del("label", fmt.Sprintf("cluster=%s", clusterName))
|
||||
}
|
||||
}
|
||||
|
||||
return clusters, nil
|
||||
}
|
840
cli/commands.go
Normal file
840
cli/commands.go
Normal file
@ -0,0 +1,840 @@
|
||||
package run
|
||||
|
||||
/*
|
||||
* This file contains the "backend" functionality for the CLI commands (and flags)
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/client"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// CheckTools checks if the docker API server is responding
|
||||
func CheckTools(c *cli.Context) error {
|
||||
log.Print("Checking docker...")
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ping, err := docker.Ping(ctx)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf(" Checking docker failed\n%+v", err)
|
||||
}
|
||||
log.Printf("SUCCESS: Checking docker succeeded (API: v%s)\n", ping.APIVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateCluster creates a new single-node cluster container and initializes the cluster directory
|
||||
func CreateCluster(c *cli.Context) error {
|
||||
|
||||
// On Error delete the cluster. If there createCluster() encounter any error,
|
||||
// call this function to remove all resources allocated for the cluster so far
|
||||
// so that they don't linger around.
|
||||
deleteCluster := func() {
|
||||
log.Println("ERROR: Cluster creation failed, rolling back...")
|
||||
if err := DeleteCluster(c); err != nil {
|
||||
log.Printf("Error: Failed to delete cluster %s", c.String("name"))
|
||||
}
|
||||
}
|
||||
|
||||
// validate --wait flag
|
||||
if c.IsSet("wait") && c.Int("wait") < 0 {
|
||||
log.Fatalf("Negative value for '--wait' not allowed (set '%d')", c.Int("wait"))
|
||||
}
|
||||
|
||||
/**********************
|
||||
* *
|
||||
* CONFIGURATION *
|
||||
* vvvvvvvvvvvvvvvvvv *
|
||||
**********************/
|
||||
|
||||
/*
|
||||
* --name, -n
|
||||
* Name of the cluster
|
||||
*/
|
||||
|
||||
// ensure that it's a valid hostname, because it will be part of container names
|
||||
if err := CheckClusterName(c.String("name")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check if the cluster name is already taken
|
||||
if cluster, err := getClusters(false, c.String("name")); err != nil {
|
||||
return err
|
||||
} else if len(cluster) != 0 {
|
||||
// A cluster exists with the same name. Return with an error.
|
||||
return fmt.Errorf(" Cluster %s already exists", c.String("name"))
|
||||
}
|
||||
|
||||
/*
|
||||
* --image, -i
|
||||
* The k3s image used for the k3d node containers
|
||||
*/
|
||||
// define image
|
||||
image := c.String("image")
|
||||
// if no registry was provided, use the default docker.io
|
||||
if len(strings.Split(image, "/")) <= 2 {
|
||||
image = fmt.Sprintf("%s/%s", DefaultRegistry, image)
|
||||
}
|
||||
|
||||
/*
|
||||
* Cluster network
|
||||
* For proper communication, all k3d node containers have to be in the same docker network
|
||||
*/
|
||||
// create cluster network
|
||||
networkID, err := createClusterNetwork(c.String("name"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("Created cluster network with ID %s", networkID)
|
||||
|
||||
/*
|
||||
* --env, -e
|
||||
* Environment variables that will be passed into the k3d node containers
|
||||
*/
|
||||
// environment variables
|
||||
env := []string{"K3S_KUBECONFIG_OUTPUT=/output/kubeconfig.yaml"}
|
||||
env = append(env, c.StringSlice("env")...)
|
||||
env = append(env, fmt.Sprintf("K3S_CLUSTER_SECRET=%s", GenerateRandomString(20)))
|
||||
|
||||
/*
|
||||
* --label, -l
|
||||
* Docker container labels that will be added to the k3d node containers
|
||||
*/
|
||||
// labels
|
||||
labelmap, err := mapNodesToLabelSpecs(c.StringSlice("label"), GetAllContainerNames(c.String("name"), DefaultServerCount, c.Int("workers")))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
/*
|
||||
* Arguments passed on to the k3s server and agent, will be filled later
|
||||
*/
|
||||
k3AgentArgs := []string{}
|
||||
k3sServerArgs := []string{}
|
||||
|
||||
/*
|
||||
* --api-port, -a
|
||||
* The port that will be used by the k3s API-Server
|
||||
* It will be mapped to localhost or to another hist interface, if specified
|
||||
* If another host is chosen, we also add a tls-san argument for the server to allow connections
|
||||
*/
|
||||
apiPort, err := parseAPIPort(c.String("api-port"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
k3sServerArgs = append(k3sServerArgs, "--https-listen-port", apiPort.Port)
|
||||
|
||||
// When the 'host' is not provided by --api-port, try to fill it using Docker Machine's IP address.
|
||||
if apiPort.Host == "" {
|
||||
apiPort.Host, err = getDockerMachineIp()
|
||||
// IP address is the same as the host
|
||||
apiPort.HostIP = apiPort.Host
|
||||
// In case of error, Log a warning message, and continue on. Since it more likely caused by a miss configured
|
||||
// DOCKER_MACHINE_NAME environment variable.
|
||||
if err != nil {
|
||||
log.Warning("Failed to get docker machine IP address, ignoring the DOCKER_MACHINE_NAME environment variable setting.")
|
||||
}
|
||||
}
|
||||
|
||||
// Add TLS SAN for non default host name
|
||||
if apiPort.Host != "" {
|
||||
log.Printf("Add TLS SAN for %s", apiPort.Host)
|
||||
k3sServerArgs = append(k3sServerArgs, "--tls-san", apiPort.Host)
|
||||
}
|
||||
|
||||
/*
|
||||
* --server-arg, -x
|
||||
* Add user-supplied arguments for the k3s server
|
||||
*/
|
||||
if c.IsSet("server-arg") || c.IsSet("x") {
|
||||
k3sServerArgs = append(k3sServerArgs, c.StringSlice("server-arg")...)
|
||||
}
|
||||
|
||||
/*
|
||||
* --agent-arg
|
||||
* Add user-supplied arguments for the k3s agent
|
||||
*/
|
||||
if c.IsSet("agent-arg") {
|
||||
if c.Int("workers") < 1 {
|
||||
log.Warnln("--agent-arg supplied, but --workers is 0, so no agents will be created")
|
||||
}
|
||||
k3AgentArgs = append(k3AgentArgs, c.StringSlice("agent-arg")...)
|
||||
}
|
||||
|
||||
/*
|
||||
* --port, -p, --publish, --add-port
|
||||
* List of ports, that should be mapped from some or all k3d node containers to the host system (or other interface)
|
||||
*/
|
||||
// new port map
|
||||
portmap, err := mapNodesToPortSpecs(c.StringSlice("port"), GetAllContainerNames(c.String("name"), DefaultServerCount, c.Int("workers")))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
/*
|
||||
* Image Volume
|
||||
* A docker volume that will be shared by every k3d node container in the cluster.
|
||||
* This volume will be used for the `import-image` command.
|
||||
* On it, all node containers can access the image tarball.
|
||||
*/
|
||||
// create a docker volume for sharing image tarballs with the cluster
|
||||
imageVolume, err := createImageVolume(c.String("name"))
|
||||
log.Println("Created docker volume ", imageVolume.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
/*
|
||||
* --volume, -v
|
||||
* List of volumes: host directory mounts for some or all k3d node containers in the cluster
|
||||
*/
|
||||
volumes := c.StringSlice("volume")
|
||||
|
||||
volumesSpec, err := NewVolumes(volumes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
volumesSpec.DefaultVolumes = append(volumesSpec.DefaultVolumes, fmt.Sprintf("%s:/images", imageVolume.Name))
|
||||
|
||||
/*
|
||||
* --registry-file
|
||||
* check if there is a registries file
|
||||
*/
|
||||
registriesFile := ""
|
||||
if c.IsSet("registries-file") {
|
||||
registriesFile = c.String("registries-file")
|
||||
if !fileExists(registriesFile) {
|
||||
log.Fatalf("registries-file %q does not exists", registriesFile)
|
||||
}
|
||||
} else {
|
||||
registriesFile, err = getGlobalRegistriesConfFilename()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if !fileExists(registriesFile) {
|
||||
// if the default registries file does not exists, go ahead but do not try to load it
|
||||
registriesFile = ""
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* clusterSpec
|
||||
* Defines, with which specifications, the cluster and the nodes inside should be created
|
||||
*/
|
||||
clusterSpec := &ClusterSpec{
|
||||
AgentArgs: k3AgentArgs,
|
||||
APIPort: *apiPort,
|
||||
AutoRestart: c.Bool("auto-restart"),
|
||||
ClusterName: c.String("name"),
|
||||
Env: env,
|
||||
NodeToLabelSpecMap: labelmap,
|
||||
Image: image,
|
||||
NodeToPortSpecMap: portmap,
|
||||
PortAutoOffset: c.Int("port-auto-offset"),
|
||||
RegistriesFile: registriesFile,
|
||||
RegistryEnabled: c.Bool("enable-registry"),
|
||||
RegistryCacheEnabled: c.Bool("enable-registry-cache"),
|
||||
RegistryName: c.String("registry-name"),
|
||||
RegistryPort: c.Int("registry-port"),
|
||||
RegistryVolume: c.String("registry-volume"),
|
||||
ServerArgs: k3sServerArgs,
|
||||
Volumes: volumesSpec,
|
||||
}
|
||||
|
||||
/******************
|
||||
* *
|
||||
* CREATION *
|
||||
* vvvvvvvvvvvvvv *
|
||||
******************/
|
||||
|
||||
log.Printf("Creating cluster [%s]", c.String("name"))
|
||||
|
||||
/*
|
||||
* Cluster Directory
|
||||
*/
|
||||
// create the directory where we will put the kubeconfig file by default (when running `k3d get-config`)
|
||||
createClusterDir(c.String("name"))
|
||||
|
||||
/* (1)
|
||||
* Registry (optional)
|
||||
* Create the (optional) registry container
|
||||
*/
|
||||
var registryNameExists *dnsNameCheck
|
||||
if clusterSpec.RegistryEnabled {
|
||||
registryNameExists = newAsyncNameExists(clusterSpec.RegistryName, 1*time.Second)
|
||||
if _, err = createRegistry(*clusterSpec); err != nil {
|
||||
deleteCluster()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
/* (2)
|
||||
* Server
|
||||
* Create the server node container
|
||||
*/
|
||||
serverContainerID, err := createServer(clusterSpec)
|
||||
if err != nil {
|
||||
deleteCluster()
|
||||
return err
|
||||
}
|
||||
|
||||
/* (2.1)
|
||||
* Wait
|
||||
* Wait for k3s server to be done initializing, if wanted
|
||||
*/
|
||||
// We're simply scanning the container logs for a line that tells us that everything's up and running
|
||||
// TODO: also wait for worker nodes
|
||||
if c.IsSet("wait") {
|
||||
if err := waitForContainerLogMessage(serverContainerID, "Wrote kubeconfig", c.Int("wait")); err != nil {
|
||||
deleteCluster()
|
||||
return fmt.Errorf("ERROR: failed while waiting for server to come up\n%+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
/* (3)
|
||||
* Workers
|
||||
* Create the worker node containers
|
||||
*/
|
||||
// TODO: do this concurrently in different goroutines
|
||||
if c.Int("workers") > 0 {
|
||||
log.Printf("Booting %s workers for cluster %s", strconv.Itoa(c.Int("workers")), c.String("name"))
|
||||
for i := 0; i < c.Int("workers"); i++ {
|
||||
workerID, err := createWorker(clusterSpec, i)
|
||||
if err != nil {
|
||||
deleteCluster()
|
||||
return err
|
||||
}
|
||||
log.Printf("Created worker with ID %s\n", workerID)
|
||||
}
|
||||
}
|
||||
|
||||
/* (4)
|
||||
* Done
|
||||
* Finished creating resources.
|
||||
*/
|
||||
log.Printf("SUCCESS: created cluster [%s]", c.String("name"))
|
||||
|
||||
if clusterSpec.RegistryEnabled {
|
||||
log.Printf("A local registry has been started as %s:%d", clusterSpec.RegistryName, clusterSpec.RegistryPort)
|
||||
|
||||
exists, err := registryNameExists.Exists()
|
||||
if !exists || err != nil {
|
||||
log.Printf("Make sure %s resolves to '127.0.0.1' (using /etc/hosts f.e)", clusterSpec.RegistryName)
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf(`You can now use the cluster with:
|
||||
|
||||
export KUBECONFIG="$(%s get-kubeconfig --name='%s')"
|
||||
kubectl cluster-info`, os.Args[0], c.String("name"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteCluster removes the containers belonging to a cluster and its local directory
|
||||
func DeleteCluster(c *cli.Context) error {
|
||||
|
||||
clusters, err := getClusters(c.Bool("all"), c.String("name"))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(clusters) == 0 {
|
||||
if !c.IsSet("all") && c.IsSet("name") {
|
||||
return fmt.Errorf("No cluster with name '%s' found (You can add `--all` and `--name <CLUSTER-NAME>` to delete other clusters)", c.String("name"))
|
||||
}
|
||||
return fmt.Errorf("No cluster(s) found")
|
||||
}
|
||||
|
||||
// remove clusters one by one instead of appending all names to the docker command
|
||||
// this allows for more granular error handling and logging
|
||||
for _, cluster := range clusters {
|
||||
log.Printf("Removing cluster [%s]", cluster.name)
|
||||
if len(cluster.workers) > 0 {
|
||||
// TODO: this could be done in goroutines
|
||||
log.Printf("...Removing %d workers\n", len(cluster.workers))
|
||||
for _, worker := range cluster.workers {
|
||||
if err := removeContainer(worker.ID); err != nil {
|
||||
log.Println(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
deleteClusterDir(cluster.name)
|
||||
log.Println("...Removing server")
|
||||
if err := removeContainer(cluster.server.ID); err != nil {
|
||||
return fmt.Errorf(" Couldn't remove server for cluster %s\n%+v", cluster.name, err)
|
||||
}
|
||||
|
||||
if err := disconnectRegistryFromNetwork(cluster.name, c.IsSet("keep-registry-volume")); err != nil {
|
||||
log.Warningf("Couldn't disconnect Registry from network %s\n%+v", cluster.name, err)
|
||||
}
|
||||
|
||||
if c.IsSet("prune") {
|
||||
// disconnect any other container that is connected to the k3d network
|
||||
nid, err := getClusterNetwork(cluster.name)
|
||||
if err != nil {
|
||||
log.Warningf("Couldn't get the network for cluster %q\n%+v", cluster.name, err)
|
||||
}
|
||||
cids, err := getContainersInNetwork(nid)
|
||||
if err != nil {
|
||||
log.Warningf("Couldn't get the list of containers connected to network %q\n%+v", nid, err)
|
||||
}
|
||||
for _, cid := range cids {
|
||||
err := disconnectContainerFromNetwork(cid, nid)
|
||||
if err != nil {
|
||||
log.Warningf("Couldn't disconnect container %q from network %q", cid, nid)
|
||||
continue
|
||||
}
|
||||
log.Printf("...%q has been forced to disconnect from %q's network", cid, cluster.name)
|
||||
}
|
||||
}
|
||||
|
||||
if err := deleteClusterNetwork(cluster.name); err != nil {
|
||||
log.Warningf("Couldn't delete cluster network for cluster %s\n%+v", cluster.name, err)
|
||||
}
|
||||
|
||||
log.Println("...Removing docker image volume")
|
||||
if err := deleteImageVolume(cluster.name); err != nil {
|
||||
log.Warningf("Couldn't delete image docker volume for cluster %s\n%+v", cluster.name, err)
|
||||
}
|
||||
|
||||
log.Infof("Removed cluster [%s]", cluster.name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StopCluster stops a running cluster container (restartable)
|
||||
func StopCluster(c *cli.Context) error {
|
||||
clusters, err := getClusters(c.Bool("all"), c.String("name"))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(clusters) == 0 {
|
||||
if !c.IsSet("all") && c.IsSet("name") {
|
||||
return fmt.Errorf("No cluster with name '%s' found (You can add `--all` and `--name <CLUSTER-NAME>` to stop other clusters)", c.String("name"))
|
||||
}
|
||||
return fmt.Errorf("No cluster(s) found")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return fmt.Errorf(" Couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
// remove clusters one by one instead of appending all names to the docker command
|
||||
// this allows for more granular error handling and logging
|
||||
for _, cluster := range clusters {
|
||||
log.Printf("Stopping cluster [%s]", cluster.name)
|
||||
if len(cluster.workers) > 0 {
|
||||
log.Printf("...Stopping %d workers\n", len(cluster.workers))
|
||||
for _, worker := range cluster.workers {
|
||||
if err := docker.ContainerStop(ctx, worker.ID, nil); err != nil {
|
||||
log.Println(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Println("...Stopping server")
|
||||
if err := docker.ContainerStop(ctx, cluster.server.ID, nil); err != nil {
|
||||
return fmt.Errorf(" Couldn't stop server for cluster %s\n%+v", cluster.name, err)
|
||||
}
|
||||
|
||||
log.Infof("Stopped cluster [%s]", cluster.name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartCluster starts a stopped cluster container
|
||||
func StartCluster(c *cli.Context) error {
|
||||
clusters, err := getClusters(c.Bool("all"), c.String("name"))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(clusters) == 0 {
|
||||
if !c.IsSet("all") && c.IsSet("name") {
|
||||
return fmt.Errorf("No cluster with name '%s' found (You can add `--all` and `--name <CLUSTER-NAME>` to start other clusters)", c.String("name"))
|
||||
}
|
||||
return fmt.Errorf("No cluster(s) found")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return fmt.Errorf(" Couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
// remove clusters one by one instead of appending all names to the docker command
|
||||
// this allows for more granular error handling and logging
|
||||
for _, cluster := range clusters {
|
||||
log.Printf("Starting cluster [%s]", cluster.name)
|
||||
|
||||
// TODO: consider only touching the registry if it's really in use by a cluster
|
||||
registryContainer, err := getRegistryContainer()
|
||||
if err != nil {
|
||||
log.Warn("Couldn't get registry container, if you know you have one, try starting it manually via `docker start`")
|
||||
}
|
||||
if registryContainer != "" {
|
||||
log.Infof("...Starting registry container '%s'", registryContainer)
|
||||
if err := docker.ContainerStart(ctx, registryContainer, types.ContainerStartOptions{}); err != nil {
|
||||
log.Warnf("Failed to start the registry container '%s', try starting it manually via `docker start %s`", registryContainer, registryContainer)
|
||||
}
|
||||
} else {
|
||||
log.Debugln("No registry container found. Proceeding.")
|
||||
}
|
||||
|
||||
log.Println("...Starting server")
|
||||
if err := docker.ContainerStart(ctx, cluster.server.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return fmt.Errorf(" Couldn't start server for cluster %s\n%+v", cluster.name, err)
|
||||
}
|
||||
|
||||
if len(cluster.workers) > 0 {
|
||||
log.Printf("...Starting %d workers\n", len(cluster.workers))
|
||||
for _, worker := range cluster.workers {
|
||||
if err := docker.ContainerStart(ctx, worker.ID, types.ContainerStartOptions{}); err != nil {
|
||||
log.Println(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("SUCCESS: Started cluster [%s]", cluster.name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListClusters prints a list of created clusters
|
||||
func ListClusters(c *cli.Context) error {
|
||||
if err := printClusters(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetKubeConfig grabs the kubeconfig from the running cluster and prints the path to stdout
|
||||
func GetKubeConfig(c *cli.Context) error {
|
||||
clusters, err := getClusters(c.Bool("all"), c.String("name"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(clusters) == 0 {
|
||||
if !c.IsSet("all") && c.IsSet("name") {
|
||||
return fmt.Errorf("No cluster with name '%s' found (You can add `--all` and `--name <CLUSTER-NAME>` to check other clusters)", c.String("name"))
|
||||
}
|
||||
return fmt.Errorf("No cluster(s) found")
|
||||
}
|
||||
|
||||
for _, cluster := range clusters {
|
||||
kubeConfigPath, err := getKubeConfig(cluster.name, c.Bool("overwrite"))
|
||||
if err != nil {
|
||||
if !c.Bool("all") {
|
||||
return err
|
||||
}
|
||||
log.Println(err)
|
||||
continue
|
||||
}
|
||||
|
||||
// output kubeconfig file path to stdout
|
||||
fmt.Println(kubeConfigPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shell starts a new subshell with the KUBECONFIG pointing to the selected cluster
|
||||
func Shell(c *cli.Context) error {
|
||||
return subShell(c.String("name"), c.String("shell"), c.String("command"))
|
||||
}
|
||||
|
||||
// ImportImage saves an image locally and imports it into the k3d containers
|
||||
func ImportImage(c *cli.Context) error {
|
||||
images := make([]string, 0)
|
||||
if strings.Contains(c.Args().First(), ",") {
|
||||
images = append(images, strings.Split(c.Args().First(), ",")...)
|
||||
} else {
|
||||
images = append(images, c.Args()...)
|
||||
}
|
||||
if len(images) == 0 {
|
||||
return fmt.Errorf("No images specified for import")
|
||||
}
|
||||
return importImage(c.String("name"), images, c.Bool("no-remove"))
|
||||
}
|
||||
|
||||
// AddNode adds a node to an existing cluster
|
||||
func AddNode(c *cli.Context) error {
|
||||
|
||||
/*
|
||||
* (0) Check flags
|
||||
*/
|
||||
|
||||
clusterName := c.String("name")
|
||||
nodeCount := c.Int("count")
|
||||
|
||||
clusterSpec := &ClusterSpec{
|
||||
AgentArgs: nil,
|
||||
APIPort: apiPort{},
|
||||
AutoRestart: false,
|
||||
ClusterName: clusterName,
|
||||
Env: nil,
|
||||
NodeToLabelSpecMap: nil,
|
||||
Image: "",
|
||||
NodeToPortSpecMap: nil,
|
||||
PortAutoOffset: 0,
|
||||
ServerArgs: nil,
|
||||
Volumes: &Volumes{},
|
||||
}
|
||||
|
||||
/* (0.1)
|
||||
* --role
|
||||
* Role of the node that has to be created.
|
||||
* One of (server|master), (agent|worker)
|
||||
*/
|
||||
nodeRole := c.String("role")
|
||||
if nodeRole == "worker" {
|
||||
nodeRole = "agent"
|
||||
}
|
||||
if nodeRole == "master" {
|
||||
nodeRole = "server"
|
||||
}
|
||||
|
||||
// TODO: support adding server nodes
|
||||
if nodeRole != "worker" && nodeRole != "agent" {
|
||||
return fmt.Errorf("Adding nodes of type '%s' is not supported", nodeRole)
|
||||
}
|
||||
|
||||
/* (0.2)
|
||||
* --image, -i
|
||||
* The k3s image used for the k3d node containers
|
||||
*/
|
||||
// TODO: use the currently running image by default
|
||||
image := c.String("image")
|
||||
// if no registry was provided, use the default docker.io
|
||||
if len(strings.Split(image, "/")) <= 2 {
|
||||
image = fmt.Sprintf("%s/%s", DefaultRegistry, image)
|
||||
}
|
||||
clusterSpec.Image = image
|
||||
|
||||
/* (0.3)
|
||||
* --env, -e <key1=val1>[,<keyX=valX]
|
||||
* Environment variables that will be passed to the node containers
|
||||
*/
|
||||
clusterSpec.Env = []string{}
|
||||
clusterSpec.Env = append(clusterSpec.Env, c.StringSlice("env")...)
|
||||
|
||||
/* (0.4)
|
||||
* --arg, -x <argument>
|
||||
* Argument passed in to the k3s server/agent command
|
||||
*/
|
||||
clusterSpec.ServerArgs = append(clusterSpec.ServerArgs, c.StringSlice("arg")...)
|
||||
clusterSpec.AgentArgs = append(clusterSpec.AgentArgs, c.StringSlice("arg")...)
|
||||
|
||||
/* (0.5)
|
||||
* --volume, -v
|
||||
* Add volume mounts
|
||||
*/
|
||||
volumeSpec, err := NewVolumes(c.StringSlice("volume"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: volumeSpec.DefaultVolumes = append(volumeSpec.DefaultVolumes, "%s:/images", imageVolume.Name)
|
||||
clusterSpec.Volumes = volumeSpec
|
||||
|
||||
/* (0.5) BREAKOUT
|
||||
* --k3s <url>
|
||||
* Connect to a non-dockerized k3s server
|
||||
*/
|
||||
|
||||
if c.IsSet("k3s") {
|
||||
log.Infof("Adding %d %s-nodes to k3s cluster %s...\n", nodeCount, nodeRole, c.String("k3s"))
|
||||
if _, err := createClusterNetwork(clusterName); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := addNodeToK3s(c, clusterSpec, nodeRole); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
* (1) Check cluster
|
||||
*/
|
||||
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
log.Errorln("Failed to create docker client")
|
||||
return err
|
||||
}
|
||||
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("label", fmt.Sprintf("cluster=%s", clusterName))
|
||||
filters.Add("label", "app=k3d")
|
||||
|
||||
/*
|
||||
* (1.1) Verify, that the cluster (i.e. the server) that we want to connect to, is running
|
||||
*/
|
||||
filters.Add("label", "component=server")
|
||||
|
||||
serverList, err := docker.ContainerList(ctx, types.ContainerListOptions{
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil || len(serverList) == 0 {
|
||||
log.Errorf("Failed to get server container for cluster '%s'", clusterName)
|
||||
return err
|
||||
}
|
||||
|
||||
/*
|
||||
* (1.2) Extract cluster information from server container
|
||||
*/
|
||||
serverContainer, err := docker.ContainerInspect(ctx, serverList[0].ID)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to inspect server container '%s' to get cluster secret", serverList[0].ID)
|
||||
return err
|
||||
}
|
||||
|
||||
/*
|
||||
* (1.2.1) Extract cluster secret from server container's labels
|
||||
*/
|
||||
clusterSecretEnvVar := ""
|
||||
for _, envVar := range serverContainer.Config.Env {
|
||||
if envVarSplit := strings.SplitN(envVar, "=", 2); envVarSplit[0] == "K3S_CLUSTER_SECRET" {
|
||||
clusterSecretEnvVar = envVar
|
||||
}
|
||||
}
|
||||
if clusterSecretEnvVar == "" {
|
||||
return fmt.Errorf("Failed to get cluster secret from server container")
|
||||
}
|
||||
|
||||
clusterSpec.Env = append(clusterSpec.Env, clusterSecretEnvVar)
|
||||
|
||||
/*
|
||||
* (1.2.2) Extract API server Port from server container's cmd
|
||||
*/
|
||||
serverListenPort := ""
|
||||
for cmdIndex, cmdPart := range serverContainer.Config.Cmd {
|
||||
if cmdPart == "--https-listen-port" {
|
||||
serverListenPort = serverContainer.Config.Cmd[cmdIndex+1]
|
||||
}
|
||||
}
|
||||
if serverListenPort == "" {
|
||||
return fmt.Errorf("Failed to get https-listen-port from server container")
|
||||
}
|
||||
|
||||
serverURLEnvVar := fmt.Sprintf("K3S_URL=https://%s:%s", strings.TrimLeft(serverContainer.Name, "/"), serverListenPort)
|
||||
clusterSpec.Env = append(clusterSpec.Env, serverURLEnvVar)
|
||||
|
||||
/*
|
||||
* (1.3) Get the docker network of the cluster that we want to connect to
|
||||
*/
|
||||
filters.Del("label", "component=server")
|
||||
|
||||
networkList, err := docker.NetworkList(ctx, types.NetworkListOptions{
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil || len(networkList) == 0 {
|
||||
log.Errorf("Failed to find network for cluster '%s'", clusterName)
|
||||
return err
|
||||
}
|
||||
|
||||
/*
|
||||
* (2) Now identify any existing worker nodes IF we're adding a new one
|
||||
*/
|
||||
highestExistingWorkerSuffix := 0 // needs to be outside conditional because of bad branching
|
||||
|
||||
if nodeRole == "agent" {
|
||||
filters.Add("label", "component=worker")
|
||||
|
||||
workerList, err := docker.ContainerList(ctx, types.ContainerListOptions{
|
||||
Filters: filters,
|
||||
All: true,
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorln("Failed to list worker node containers")
|
||||
return err
|
||||
}
|
||||
|
||||
for _, worker := range workerList {
|
||||
split := strings.Split(worker.Names[0], "-")
|
||||
currSuffix, err := strconv.Atoi(split[len(split)-1])
|
||||
if err != nil {
|
||||
log.Errorln("Failed to get highest worker suffix")
|
||||
return err
|
||||
}
|
||||
if currSuffix > highestExistingWorkerSuffix {
|
||||
highestExistingWorkerSuffix = currSuffix
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* (3) Create the nodes with configuration that automatically joins them to the cluster
|
||||
*/
|
||||
|
||||
log.Infof("Adding %d %s-nodes to k3d cluster %s...\n", nodeCount, nodeRole, clusterName)
|
||||
|
||||
if err := createNodes(clusterSpec, nodeRole, highestExistingWorkerSuffix+1, nodeCount); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addNodeToK3s(c *cli.Context, clusterSpec *ClusterSpec, nodeRole string) error {
|
||||
|
||||
k3sURLEnvVar := fmt.Sprintf("K3S_URL=%s", c.String("k3s"))
|
||||
k3sConnSecretEnvVar := fmt.Sprintf("K3S_CLUSTER_SECRET=%s", c.String("k3s-secret"))
|
||||
if c.IsSet("k3s-token") {
|
||||
k3sConnSecretEnvVar = fmt.Sprintf("K3S_TOKEN=%s", c.String("k3s-token"))
|
||||
}
|
||||
|
||||
clusterSpec.Env = append(clusterSpec.Env, k3sURLEnvVar, k3sConnSecretEnvVar)
|
||||
|
||||
if err := createNodes(clusterSpec, nodeRole, 0, c.Int("count")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createNodes helps creating multiple nodes at once with an incrementing suffix in the name
|
||||
func createNodes(clusterSpec *ClusterSpec, role string, suffixNumberStart int, count int) error {
|
||||
for suffix := suffixNumberStart; suffix < suffixNumberStart+count; suffix++ {
|
||||
containerID := ""
|
||||
var err error
|
||||
if role == "agent" {
|
||||
containerID, err = createWorker(clusterSpec, suffix)
|
||||
} else if role == "server" {
|
||||
containerID, err = createServer(clusterSpec)
|
||||
}
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create %s-node", role)
|
||||
return err
|
||||
}
|
||||
log.Infof("Created %s-node with ID %s", role, containerID)
|
||||
}
|
||||
return nil
|
||||
}
|
384
cli/container.go
Normal file
384
cli/container.go
Normal file
@ -0,0 +1,384 @@
|
||||
package run
|
||||
|
||||
/*
|
||||
* The functions in this file take care of spinning up the
|
||||
* k3s server and worker containers as well as deleting them.
|
||||
*/
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func createContainer(config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (string, error) {
|
||||
ctx := context.Background()
|
||||
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
resp, err := docker.ContainerCreate(ctx, config, hostConfig, networkingConfig, containerName)
|
||||
if client.IsErrNotFound(err) {
|
||||
log.Printf("Pulling image %s...\n", config.Image)
|
||||
reader, err := docker.ImagePull(ctx, config.Image, types.ImagePullOptions{})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Couldn't pull image %s\n%+v", config.Image, err)
|
||||
}
|
||||
defer reader.Close()
|
||||
if ll := log.GetLevel(); ll == log.DebugLevel {
|
||||
_, err := io.Copy(os.Stdout, reader)
|
||||
if err != nil {
|
||||
log.Warningf("Couldn't get docker output\n%+v", err)
|
||||
}
|
||||
} else {
|
||||
_, err := io.Copy(ioutil.Discard, reader)
|
||||
if err != nil {
|
||||
log.Warningf("Couldn't get docker output\n%+v", err)
|
||||
}
|
||||
}
|
||||
resp, err = docker.ContainerCreate(ctx, config, hostConfig, networkingConfig, containerName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(" Couldn't create container after pull %s\n%+v", containerName, err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return "", fmt.Errorf(" Couldn't create container %s\n%+v", containerName, err)
|
||||
}
|
||||
|
||||
return resp.ID, nil
|
||||
}
|
||||
|
||||
func startContainer(ID string) error {
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return fmt.Errorf("Couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
if err := docker.ContainerStart(ctx, ID, types.ContainerStartOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createServer(spec *ClusterSpec) (string, error) {
|
||||
log.Printf("Creating server using %s...\n", spec.Image)
|
||||
|
||||
containerLabels := make(map[string]string)
|
||||
containerLabels["app"] = "k3d"
|
||||
containerLabels["component"] = "server"
|
||||
containerLabels["created"] = time.Now().Format("2006-01-02 15:04:05")
|
||||
containerLabels["cluster"] = spec.ClusterName
|
||||
|
||||
containerName := GetContainerName("server", spec.ClusterName, -1)
|
||||
|
||||
// labels to be created to the server belong to roles
|
||||
// all, server, master or <server-container-name>
|
||||
serverLabels, err := MergeLabelSpecs(spec.NodeToLabelSpecMap, "server", containerName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
containerLabels = MergeLabels(containerLabels, serverLabels)
|
||||
|
||||
// ports to be assigned to the server belong to roles
|
||||
// all, server, master or <server-container-name>
|
||||
serverPorts, err := MergePortSpecs(spec.NodeToPortSpecMap, "server", containerName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
hostIP := "0.0.0.0"
|
||||
containerLabels["apihost"] = "localhost"
|
||||
if spec.APIPort.Host != "" {
|
||||
hostIP = spec.APIPort.HostIP
|
||||
containerLabels["apihost"] = spec.APIPort.Host
|
||||
}
|
||||
|
||||
apiPortSpec := fmt.Sprintf("%s:%s:%s/tcp", hostIP, spec.APIPort.Port, spec.APIPort.Port)
|
||||
|
||||
serverPorts = append(serverPorts, apiPortSpec)
|
||||
|
||||
serverPublishedPorts, err := CreatePublishedPorts(serverPorts)
|
||||
if err != nil {
|
||||
log.Fatalf("Error: failed to parse port specs %+v \n%+v", serverPorts, err)
|
||||
}
|
||||
|
||||
hostConfig := &container.HostConfig{
|
||||
PortBindings: serverPublishedPorts.PortBindings,
|
||||
Privileged: true,
|
||||
Init: &[]bool{true}[0],
|
||||
}
|
||||
|
||||
if spec.AutoRestart {
|
||||
hostConfig.RestartPolicy.Name = "unless-stopped"
|
||||
}
|
||||
|
||||
spec.Volumes.addVolumesToHostConfig(containerName, "server", hostConfig)
|
||||
|
||||
networkingConfig := &network.NetworkingConfig{
|
||||
EndpointsConfig: map[string]*network.EndpointSettings{
|
||||
k3dNetworkName(spec.ClusterName): {
|
||||
Aliases: []string{containerName},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
config := &container.Config{
|
||||
Hostname: containerName,
|
||||
Image: spec.Image,
|
||||
Cmd: append([]string{"server"}, spec.ServerArgs...),
|
||||
ExposedPorts: serverPublishedPorts.ExposedPorts,
|
||||
Env: spec.Env,
|
||||
Labels: containerLabels,
|
||||
}
|
||||
id, err := createContainer(config, hostConfig, networkingConfig, containerName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(" Couldn't create container %s\n%+v", containerName, err)
|
||||
}
|
||||
|
||||
// copy the registry configuration
|
||||
if spec.RegistryEnabled || len(spec.RegistriesFile) > 0 {
|
||||
if err := writeRegistriesConfigInContainer(spec, id); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
if err := startContainer(id); err != nil {
|
||||
return "", fmt.Errorf(" Couldn't start container %s\n%+v", containerName, err)
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// createWorker creates/starts a k3s agent node that connects to the server
|
||||
func createWorker(spec *ClusterSpec, postfix int) (string, error) {
|
||||
containerLabels := make(map[string]string)
|
||||
containerLabels["app"] = "k3d"
|
||||
containerLabels["component"] = "worker"
|
||||
containerLabels["created"] = time.Now().Format("2006-01-02 15:04:05")
|
||||
containerLabels["cluster"] = spec.ClusterName
|
||||
|
||||
containerName := GetContainerName("worker", spec.ClusterName, postfix)
|
||||
env := spec.Env
|
||||
|
||||
needServerURL := true
|
||||
for _, envVar := range env {
|
||||
if strings.Split(envVar, "=")[0] == "K3S_URL" {
|
||||
needServerURL = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if needServerURL {
|
||||
env = append(spec.Env, fmt.Sprintf("K3S_URL=https://k3d-%s-server:%s", spec.ClusterName, spec.APIPort.Port))
|
||||
}
|
||||
|
||||
// labels to be created to the worker belong to roles
|
||||
// all, worker, agent or <server-container-name>
|
||||
workerLabels, err := MergeLabelSpecs(spec.NodeToLabelSpecMap, "worker", containerName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
containerLabels = MergeLabels(containerLabels, workerLabels)
|
||||
|
||||
// ports to be assigned to the worker belong to roles
|
||||
// all, worker, agent or <server-container-name>
|
||||
workerPorts, err := MergePortSpecs(spec.NodeToPortSpecMap, "worker", containerName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
workerPublishedPorts, err := CreatePublishedPorts(workerPorts)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if spec.PortAutoOffset > 0 {
|
||||
// TODO: add some checks before to print a meaningful log message saying that we cannot map multiple container ports
|
||||
// to the same host port without a offset
|
||||
workerPublishedPorts = workerPublishedPorts.Offset(postfix + spec.PortAutoOffset)
|
||||
}
|
||||
|
||||
hostConfig := &container.HostConfig{
|
||||
Tmpfs: map[string]string{
|
||||
"/run": "",
|
||||
"/var/run": "",
|
||||
},
|
||||
PortBindings: workerPublishedPorts.PortBindings,
|
||||
Privileged: true,
|
||||
Init: &[]bool{true}[0],
|
||||
}
|
||||
|
||||
if spec.AutoRestart {
|
||||
hostConfig.RestartPolicy.Name = "unless-stopped"
|
||||
}
|
||||
|
||||
spec.Volumes.addVolumesToHostConfig(containerName, "worker", hostConfig)
|
||||
|
||||
networkingConfig := &network.NetworkingConfig{
|
||||
EndpointsConfig: map[string]*network.EndpointSettings{
|
||||
k3dNetworkName(spec.ClusterName): {
|
||||
Aliases: []string{containerName},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
config := &container.Config{
|
||||
Hostname: containerName,
|
||||
Image: spec.Image,
|
||||
Env: env,
|
||||
Cmd: append([]string{"agent"}, spec.AgentArgs...),
|
||||
Labels: containerLabels,
|
||||
ExposedPorts: workerPublishedPorts.ExposedPorts,
|
||||
}
|
||||
|
||||
id, err := createContainer(config, hostConfig, networkingConfig, containerName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(" Couldn't create container %s\n%+v", containerName, err)
|
||||
}
|
||||
|
||||
// copy the registry configuration
|
||||
if spec.RegistryEnabled || len(spec.RegistriesFile) > 0 {
|
||||
if err := writeRegistriesConfigInContainer(spec, id); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
if err := startContainer(id); err != nil {
|
||||
return "", fmt.Errorf(" Couldn't start container %s\n%+v", containerName, err)
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// removeContainer tries to rm a container, selected by Docker ID, and does a rm -f if it fails (e.g. if container is still running)
|
||||
func removeContainer(ID string) error {
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return fmt.Errorf(" Couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
options := types.ContainerRemoveOptions{
|
||||
RemoveVolumes: true,
|
||||
Force: true,
|
||||
}
|
||||
|
||||
if err := docker.ContainerRemove(ctx, ID, options); err != nil {
|
||||
return fmt.Errorf(" Couldn't delete container [%s] -> %+v", ID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getContainerNetworks returns the networks a container is connected to
|
||||
func getContainerNetworks(ID string) (map[string]*network.EndpointSettings, error) {
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c, err := docker.ContainerInspect(ctx, ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(" Couldn't get details about container %s: %w", ID, err)
|
||||
}
|
||||
return c.NetworkSettings.Networks, nil
|
||||
}
|
||||
|
||||
// connectContainerToNetwork connects a container to a given network
|
||||
func connectContainerToNetwork(ID string, networkID string, aliases []string) error {
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return fmt.Errorf(" Couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
networkingConfig := &network.EndpointSettings{
|
||||
Aliases: aliases,
|
||||
}
|
||||
|
||||
return docker.NetworkConnect(ctx, networkID, ID, networkingConfig)
|
||||
}
|
||||
|
||||
// disconnectContainerFromNetwork disconnects a container from a given network
|
||||
func disconnectContainerFromNetwork(ID string, networkID string) error {
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return fmt.Errorf(" Couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
return docker.NetworkDisconnect(ctx, networkID, ID, false)
|
||||
}
|
||||
|
||||
func waitForContainerLogMessage(containerID string, message string, timeoutSeconds int) error {
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return fmt.Errorf(" Couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
timeout := time.Duration(timeoutSeconds) * time.Second
|
||||
for {
|
||||
// not running after timeout exceeded? Rollback and delete everything.
|
||||
if timeout != 0 && time.Now().After(start.Add(timeout)) {
|
||||
return fmt.Errorf("ERROR: timeout of %d seconds exceeded while waiting for log message '%s'", timeoutSeconds, message)
|
||||
}
|
||||
|
||||
// scan container logs for a line that tells us that the required services are up and running
|
||||
out, err := docker.ContainerLogs(ctx, containerID, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true})
|
||||
if err != nil {
|
||||
out.Close()
|
||||
return fmt.Errorf("ERROR: couldn't get docker logs from container %s\n%+v", containerID, err)
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
nRead, _ := buf.ReadFrom(out)
|
||||
out.Close()
|
||||
output := buf.String()
|
||||
if nRead > 0 && strings.Contains(string(output), message) {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyToContainer(ID string, dstPath string, content []byte) error {
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return fmt.Errorf(" Couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
tw := tar.NewWriter(buf)
|
||||
hdr := &tar.Header{Name: dstPath, Mode: 0644, Size: int64(len(content))}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return errors.Wrap(err, "failed to write a tar header")
|
||||
}
|
||||
if _, err := tw.Write(content); err != nil {
|
||||
return errors.Wrap(err, "failed to write a tar body")
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
return errors.Wrap(err, "failed to close tar archive")
|
||||
}
|
||||
|
||||
r := bytes.NewReader(buf.Bytes())
|
||||
if err := docker.CopyToContainer(ctx, ID, "/", r, types.CopyToContainerOptions{AllowOverwriteDirWithFile: true}); err != nil {
|
||||
return errors.Wrap(err, "failed to copy source code")
|
||||
}
|
||||
return nil
|
||||
}
|
36
cli/docker-machine.go
Normal file
36
cli/docker-machine.go
Normal file
@ -0,0 +1,36 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func getDockerMachineIp() (string, error) {
|
||||
machine := os.ExpandEnv("$DOCKER_MACHINE_NAME")
|
||||
|
||||
if machine == "" {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
dockerMachinePath, err := exec.LookPath("docker-machine")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
out, err := exec.Command(dockerMachinePath, "ip", machine).Output()
|
||||
if err != nil {
|
||||
log.Printf("Error executing 'docker-machine ip'")
|
||||
|
||||
if exitError, ok := err.(*exec.ExitError); ok {
|
||||
log.Printf("%s", string(exitError.Stderr))
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
ipStr := strings.TrimSuffix(string(out), "\n")
|
||||
ipStr = strings.TrimSuffix(ipStr, "\r")
|
||||
|
||||
return ipStr, nil
|
||||
}
|
215
cli/image.go
Normal file
215
cli/image.go
Normal file
@ -0,0 +1,215 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/client"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
imageBasePathRemote = "/images"
|
||||
k3dToolsImage = "docker.io/iwilltry42/k3d-tools:v0.0.1"
|
||||
)
|
||||
|
||||
func importImage(clusterName string, images []string, noRemove bool) error {
|
||||
// get a docker client
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return fmt.Errorf(" Couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
// get cluster directory to temporarily save the image tarball there
|
||||
imageVolume, err := getImageVolume(clusterName)
|
||||
if err != nil {
|
||||
return fmt.Errorf(" Couldn't get image volume for cluster [%s]\n%+v", clusterName, err)
|
||||
}
|
||||
|
||||
//*** first, save the images using the local docker daemon
|
||||
log.Infof("Saving images %s from local docker daemon...", images)
|
||||
toolsContainerName := fmt.Sprintf("k3d-%s-tools", clusterName)
|
||||
tarFileName := fmt.Sprintf("%s/k3d-%s-images-%s.tar", imageBasePathRemote, clusterName, time.Now().Format("20060102150405"))
|
||||
|
||||
// create a tools container to get the tarball into the named volume
|
||||
containerConfig := container.Config{
|
||||
Hostname: toolsContainerName,
|
||||
Image: k3dToolsImage,
|
||||
Labels: map[string]string{
|
||||
"app": "k3d",
|
||||
"cluster": clusterName,
|
||||
"component": "tools",
|
||||
},
|
||||
Cmd: append([]string{"save-image", "-d", tarFileName}, images...),
|
||||
AttachStdout: true,
|
||||
AttachStderr: true,
|
||||
}
|
||||
hostConfig := container.HostConfig{
|
||||
Binds: []string{
|
||||
"/var/run/docker.sock:/var/run/docker.sock",
|
||||
fmt.Sprintf("%s:%s:rw", imageVolume.Name, imageBasePathRemote),
|
||||
},
|
||||
}
|
||||
|
||||
toolsContainerID, err := createContainer(&containerConfig, &hostConfig, &network.NetworkingConfig{}, toolsContainerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := startContainer(toolsContainerID); err != nil {
|
||||
return fmt.Errorf(" Couldn't start container %s\n%w", toolsContainerName, err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err = docker.ContainerRemove(ctx, toolsContainerID, types.ContainerRemoveOptions{
|
||||
Force: true,
|
||||
}); err != nil {
|
||||
log.Warningf("Couldn't remove tools container\n%+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// loop to wait for tools container to exit (failed or successfully saved images)
|
||||
for {
|
||||
cont, err := docker.ContainerInspect(ctx, toolsContainerID)
|
||||
if err != nil {
|
||||
return fmt.Errorf(" Couldn't get helper container's exit code\n%+v", err)
|
||||
}
|
||||
if !cont.State.Running { // container finished...
|
||||
if cont.State.ExitCode == 0 { // ...successfully
|
||||
log.Info("Saved images to shared docker volume")
|
||||
break
|
||||
} else if cont.State.ExitCode != 0 { // ...failed
|
||||
errTxt := "Helper container failed to save images"
|
||||
logReader, err := docker.ContainerLogs(ctx, toolsContainerID, types.ContainerLogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s\n> couldn't get logs from helper container\n%+v", errTxt, err)
|
||||
}
|
||||
logs, err := ioutil.ReadAll(logReader) // let's show somw logs indicating what happened
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s\n> couldn't get logs from helper container\n%+v", errTxt, err)
|
||||
}
|
||||
return fmt.Errorf("%s -> Logs from [%s]:\n>>>>>>\n%s\n<<<<<<", errTxt, toolsContainerName, string(logs))
|
||||
}
|
||||
}
|
||||
time.Sleep(time.Second / 2) // wait for half a second so we don't spam the docker API too much
|
||||
}
|
||||
|
||||
// Get the container IDs for all containers in the cluster
|
||||
clusters, err := getClusters(false, clusterName)
|
||||
if err != nil {
|
||||
return fmt.Errorf(" Couldn't get cluster by name [%s]\n%+v", clusterName, err)
|
||||
}
|
||||
containerList := []types.Container{clusters[clusterName].server}
|
||||
containerList = append(containerList, clusters[clusterName].workers...)
|
||||
|
||||
// *** second, import the images using ctr in the k3d nodes
|
||||
|
||||
// create exec configuration
|
||||
cmd := []string{"ctr", "image", "import", tarFileName}
|
||||
execConfig := types.ExecConfig{
|
||||
AttachStderr: true,
|
||||
AttachStdout: true,
|
||||
Cmd: cmd,
|
||||
Tty: true,
|
||||
Detach: true,
|
||||
}
|
||||
|
||||
execAttachConfig := types.ExecConfig{
|
||||
Tty: true,
|
||||
}
|
||||
|
||||
execStartConfig := types.ExecStartCheck{
|
||||
Tty: true,
|
||||
}
|
||||
|
||||
// import in each node separately
|
||||
// TODO: import concurrently using goroutines or find a way to share the image cache
|
||||
for _, container := range containerList {
|
||||
|
||||
containerName := container.Names[0][1:] // trimming the leading "/" from name
|
||||
log.Infof("Importing images %s in container [%s]", images, containerName)
|
||||
|
||||
// create exec configuration
|
||||
execResponse, err := docker.ContainerExecCreate(ctx, container.ID, execConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create exec command for container [%s]\n%+v", containerName, err)
|
||||
}
|
||||
|
||||
// attach to exec process in container
|
||||
containerConnection, err := docker.ContainerExecAttach(ctx, execResponse.ID, types.ExecStartCheck{
|
||||
Detach: execAttachConfig.Detach,
|
||||
Tty: execAttachConfig.Tty,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf(" Couldn't attach to container [%s]\n%+v", containerName, err)
|
||||
}
|
||||
defer containerConnection.Close()
|
||||
|
||||
// start exec
|
||||
err = docker.ContainerExecStart(ctx, execResponse.ID, execStartConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf(" Couldn't execute command in container [%s]\n%+v", containerName, err)
|
||||
}
|
||||
|
||||
// get output from container
|
||||
content, err := ioutil.ReadAll(containerConnection.Reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf(" Couldn't read output from container [%s]\n%+v", containerName, err)
|
||||
}
|
||||
|
||||
// example output "unpacking image........ ...done"
|
||||
if !strings.Contains(string(content), "done") {
|
||||
return fmt.Errorf("seems like something went wrong using `ctr image import` in container [%s]. Full output below:\n%s", containerName, string(content))
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Successfully imported images %s in all nodes of cluster [%s]", images, clusterName)
|
||||
|
||||
// remove tarball from inside the server container
|
||||
if !noRemove {
|
||||
log.Info("Cleaning up tarball")
|
||||
|
||||
execID, err := docker.ContainerExecCreate(ctx, clusters[clusterName].server.ID, types.ExecConfig{
|
||||
Cmd: []string{"rm", "-f", tarFileName},
|
||||
})
|
||||
if err != nil {
|
||||
log.Warningf("Failed to delete tarball: couldn't create remove in container [%s]\n%+v", clusters[clusterName].server.ID, err)
|
||||
}
|
||||
err = docker.ContainerExecStart(ctx, execID.ID, types.ExecStartCheck{
|
||||
Detach: true,
|
||||
})
|
||||
if err != nil {
|
||||
log.Warningf("Couldn't start tarball deletion action\n%+v", err)
|
||||
}
|
||||
|
||||
for {
|
||||
execInspect, err := docker.ContainerExecInspect(ctx, execID.ID)
|
||||
if err != nil {
|
||||
log.Warningf("Couldn't verify deletion of tarball\n%+v", err)
|
||||
}
|
||||
|
||||
if !execInspect.Running {
|
||||
if execInspect.ExitCode == 0 {
|
||||
log.Info("Deleted tarball")
|
||||
break
|
||||
} else {
|
||||
log.Warning("Failed to delete tarball")
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("...Done")
|
||||
|
||||
return nil
|
||||
}
|
121
cli/label.go
Normal file
121
cli/label.go
Normal file
@ -0,0 +1,121 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// mapNodesToLabelSpecs maps nodes to labelSpecs
|
||||
func mapNodesToLabelSpecs(specs []string, createdNodes []string) (map[string][]string, error) {
|
||||
// check node-specifier possibilitites
|
||||
possibleNodeSpecifiers := []string{"all", "workers", "agents", "server", "master"}
|
||||
possibleNodeSpecifiers = append(possibleNodeSpecifiers, createdNodes...)
|
||||
|
||||
nodeToLabelSpecMap := make(map[string][]string)
|
||||
|
||||
for _, spec := range specs {
|
||||
labelSpec, node := extractLabelNode(spec)
|
||||
|
||||
// check if node-specifier is valid (either a role or a name) and append to list if matches
|
||||
nodeFound := false
|
||||
for _, name := range possibleNodeSpecifiers {
|
||||
if node == name {
|
||||
nodeFound = true
|
||||
nodeToLabelSpecMap[node] = append(nodeToLabelSpecMap[node], labelSpec)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// node extraction was a false positive, use full spec with default node
|
||||
if !nodeFound {
|
||||
nodeToLabelSpecMap[defaultLabelNodes] = append(nodeToLabelSpecMap[defaultLabelNodes], spec)
|
||||
}
|
||||
}
|
||||
|
||||
return nodeToLabelSpecMap, nil
|
||||
}
|
||||
|
||||
// extractLabelNode separates the node specification from the actual label specs
|
||||
func extractLabelNode(spec string) (string, string) {
|
||||
// label defaults to full spec
|
||||
labelSpec := spec
|
||||
|
||||
// node defaults to "all"
|
||||
node := defaultLabelNodes
|
||||
|
||||
// only split at the last "@"
|
||||
re := regexp.MustCompile(`^(.*)@([^@]+)$`)
|
||||
match := re.FindStringSubmatch(spec)
|
||||
|
||||
if len(match) > 0 {
|
||||
labelSpec = match[1]
|
||||
node = match[2]
|
||||
}
|
||||
|
||||
return labelSpec, node
|
||||
}
|
||||
|
||||
// splitLabel separates the label key from the label value
|
||||
func splitLabel(label string) (string, string) {
|
||||
// split only on first '=' sign (like `docker run` do)
|
||||
labelSlice := strings.SplitN(label, "=", 2)
|
||||
|
||||
if len(labelSlice) > 1 {
|
||||
return labelSlice[0], labelSlice[1]
|
||||
}
|
||||
|
||||
// defaults to label key with empty value (like `docker run` do)
|
||||
return label, ""
|
||||
}
|
||||
|
||||
// MergeLabelSpecs merges labels for a given node
|
||||
func MergeLabelSpecs(nodeToLabelSpecMap map[string][]string, role, name string) ([]string, error) {
|
||||
labelSpecs := []string{}
|
||||
|
||||
// add portSpecs according to node role
|
||||
for _, group := range nodeRuleGroupsMap[role] {
|
||||
for _, v := range nodeToLabelSpecMap[group] {
|
||||
exists := false
|
||||
for _, i := range labelSpecs {
|
||||
if v == i {
|
||||
exists = true
|
||||
}
|
||||
}
|
||||
if !exists {
|
||||
labelSpecs = append(labelSpecs, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// add portSpecs according to node name
|
||||
for _, v := range nodeToLabelSpecMap[name] {
|
||||
exists := false
|
||||
for _, i := range labelSpecs {
|
||||
if v == i {
|
||||
exists = true
|
||||
}
|
||||
}
|
||||
if !exists {
|
||||
labelSpecs = append(labelSpecs, v)
|
||||
}
|
||||
}
|
||||
|
||||
return labelSpecs, nil
|
||||
}
|
||||
|
||||
// MergeLabels merges list of labels into a label map
|
||||
func MergeLabels(labelMap map[string]string, labels []string) map[string]string {
|
||||
for _, label := range labels {
|
||||
labelKey, labelValue := splitLabel(label)
|
||||
|
||||
if _, found := labelMap[labelKey]; found {
|
||||
log.Warningf("Overriding already existing label [%s]", labelKey)
|
||||
}
|
||||
|
||||
labelMap[labelKey] = labelValue
|
||||
}
|
||||
|
||||
return labelMap
|
||||
}
|
120
cli/network.go
Normal file
120
cli/network.go
Normal file
@ -0,0 +1,120 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/client"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func k3dNetworkName(clusterName string) string {
|
||||
return fmt.Sprintf("k3d-%s", clusterName)
|
||||
}
|
||||
|
||||
// createClusterNetwork creates a docker network for a cluster that will be used
|
||||
// to let the server and worker containers communicate with each other easily.
|
||||
func createClusterNetwork(clusterName string) (string, error) {
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(" Couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
args := filters.NewArgs()
|
||||
args.Add("label", "app=k3d")
|
||||
args.Add("label", "cluster="+clusterName)
|
||||
nl, err := docker.NetworkList(ctx, types.NetworkListOptions{Filters: args})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed to list networks\n%+v", err)
|
||||
}
|
||||
|
||||
if len(nl) > 1 {
|
||||
log.Warningf("Found %d networks for %s when we only expect 1\n", len(nl), clusterName)
|
||||
}
|
||||
|
||||
if len(nl) > 0 {
|
||||
return nl[0].ID, nil
|
||||
}
|
||||
|
||||
// create the network with a set of labels and the cluster name as network name
|
||||
resp, err := docker.NetworkCreate(ctx, k3dNetworkName(clusterName), types.NetworkCreate{
|
||||
Labels: map[string]string{
|
||||
"app": "k3d",
|
||||
"cluster": clusterName,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(" Couldn't create network\n%+v", err)
|
||||
}
|
||||
|
||||
return resp.ID, nil
|
||||
}
|
||||
|
||||
func getClusterNetwork(clusterName string) (string, error) {
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(" Couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("label", "app=k3d")
|
||||
filters.Add("label", fmt.Sprintf("cluster=%s", clusterName))
|
||||
|
||||
networks, err := docker.NetworkList(ctx, types.NetworkListOptions{
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(" Couldn't find network for cluster %s\n%+v", clusterName, err)
|
||||
}
|
||||
if len(networks) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
// there should be only one network that matches the name... but who knows?
|
||||
return networks[0].ID, nil
|
||||
}
|
||||
|
||||
// deleteClusterNetwork deletes a docker network based on the name of a cluster it belongs to
|
||||
func deleteClusterNetwork(clusterName string) error {
|
||||
nid, err := getClusterNetwork(clusterName)
|
||||
if err != nil {
|
||||
return fmt.Errorf(" Couldn't find network for cluster %s\n%+v", clusterName, err)
|
||||
}
|
||||
if nid == "" {
|
||||
log.Warningf("couldn't remove network for cluster %s: network does not exist", clusterName)
|
||||
return nil
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return fmt.Errorf(" Couldn't create docker client\n%+v", err)
|
||||
}
|
||||
if err := docker.NetworkRemove(ctx, nid); err != nil {
|
||||
log.Warningf("couldn't remove network for cluster %s\n%+v", clusterName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getContainersInNetwork gets a list of containers connected to a network
|
||||
func getContainersInNetwork(nid string) ([]string, error) {
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
options := types.NetworkInspectOptions{}
|
||||
network, err := docker.NetworkInspect(ctx, nid, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cids := []string{}
|
||||
for cid := range network.Containers {
|
||||
cids = append(cids, cid)
|
||||
}
|
||||
return cids, nil
|
||||
}
|
188
cli/port.go
Normal file
188
cli/port.go
Normal file
@ -0,0 +1,188 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// mapNodesToPortSpecs maps nodes to portSpecs
|
||||
func mapNodesToPortSpecs(specs []string, createdNodes []string) (map[string][]string, error) {
|
||||
|
||||
if err := validatePortSpecs(specs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check node-specifier possibilitites
|
||||
possibleNodeSpecifiers := []string{"all", "workers", "agents", "server", "master"}
|
||||
possibleNodeSpecifiers = append(possibleNodeSpecifiers, createdNodes...)
|
||||
|
||||
nodeToPortSpecMap := make(map[string][]string)
|
||||
|
||||
for _, spec := range specs {
|
||||
nodes, portSpec := extractNodes(spec)
|
||||
|
||||
if len(nodes) == 0 {
|
||||
nodes = append(nodes, defaultNodes)
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
// check if node-specifier is valid (either a role or a name) and append to list if matches
|
||||
nodeFound := false
|
||||
for _, name := range possibleNodeSpecifiers {
|
||||
if node == name {
|
||||
nodeFound = true
|
||||
nodeToPortSpecMap[node] = append(nodeToPortSpecMap[node], portSpec)
|
||||
break
|
||||
}
|
||||
}
|
||||
if !nodeFound {
|
||||
log.Warningf("Unknown node-specifier [%s] in port mapping entry [%s]", node, spec)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nodeToPortSpecMap, nil
|
||||
}
|
||||
|
||||
// CreatePublishedPorts is the factory function for PublishedPorts
|
||||
func CreatePublishedPorts(specs []string) (*PublishedPorts, error) {
|
||||
if len(specs) == 0 {
|
||||
var newExposedPorts = make(map[nat.Port]struct{}, 1)
|
||||
var newPortBindings = make(map[nat.Port][]nat.PortBinding, 1)
|
||||
return &PublishedPorts{ExposedPorts: newExposedPorts, PortBindings: newPortBindings}, nil
|
||||
}
|
||||
|
||||
newExposedPorts, newPortBindings, err := nat.ParsePortSpecs(specs)
|
||||
return &PublishedPorts{ExposedPorts: newExposedPorts, PortBindings: newPortBindings}, err
|
||||
}
|
||||
|
||||
// validatePortSpecs matches the provided port specs against a set of rules to enable early exit if something is wrong
|
||||
func validatePortSpecs(specs []string) error {
|
||||
for _, spec := range specs {
|
||||
atSplit := strings.Split(spec, "@")
|
||||
_, err := nat.ParsePortSpec(atSplit[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid port specification [%s] in port mapping [%s]\n%+v", atSplit[0], spec, err)
|
||||
}
|
||||
if len(atSplit) > 0 {
|
||||
for i := 1; i < len(atSplit); i++ {
|
||||
if err := ValidateHostname(atSplit[i]); err != nil {
|
||||
return fmt.Errorf("Invalid node-specifier [%s] in port mapping [%s]\n%+v", atSplit[i], spec, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractNodes separates the node specification from the actual port specs
|
||||
func extractNodes(spec string) ([]string, string) {
|
||||
// extract nodes
|
||||
nodes := []string{}
|
||||
atSplit := strings.Split(spec, "@")
|
||||
portSpec := atSplit[0]
|
||||
if len(atSplit) > 1 {
|
||||
nodes = atSplit[1:]
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
nodes = append(nodes, defaultNodes)
|
||||
}
|
||||
return nodes, portSpec
|
||||
}
|
||||
|
||||
// Offset creates a new PublishedPort structure, with all host ports are changed by a fixed 'offset'
|
||||
func (p PublishedPorts) Offset(offset int) *PublishedPorts {
|
||||
var newExposedPorts = make(map[nat.Port]struct{}, len(p.ExposedPorts))
|
||||
var newPortBindings = make(map[nat.Port][]nat.PortBinding, len(p.PortBindings))
|
||||
|
||||
for k, v := range p.ExposedPorts {
|
||||
newExposedPorts[k] = v
|
||||
}
|
||||
|
||||
for k, v := range p.PortBindings {
|
||||
bindings := make([]nat.PortBinding, len(v))
|
||||
for i, b := range v {
|
||||
port, _ := nat.ParsePort(b.HostPort)
|
||||
bindings[i].HostIP = b.HostIP
|
||||
bindings[i].HostPort = fmt.Sprintf("%d", port*offset)
|
||||
}
|
||||
newPortBindings[k] = bindings
|
||||
}
|
||||
|
||||
return &PublishedPorts{ExposedPorts: newExposedPorts, PortBindings: newPortBindings}
|
||||
}
|
||||
|
||||
// AddPort creates a new PublishedPort struct with one more port, based on 'portSpec'
|
||||
func (p *PublishedPorts) AddPort(portSpec string) (*PublishedPorts, error) {
|
||||
portMappings, err := nat.ParsePortSpec(portSpec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var newExposedPorts = make(map[nat.Port]struct{}, len(p.ExposedPorts)+1)
|
||||
var newPortBindings = make(map[nat.Port][]nat.PortBinding, len(p.PortBindings)+1)
|
||||
|
||||
// Populate the new maps
|
||||
for k, v := range p.ExposedPorts {
|
||||
newExposedPorts[k] = v
|
||||
}
|
||||
|
||||
for k, v := range p.PortBindings {
|
||||
newPortBindings[k] = v
|
||||
}
|
||||
|
||||
// Add new ports
|
||||
for _, portMapping := range portMappings {
|
||||
port := portMapping.Port
|
||||
if _, exists := newExposedPorts[port]; !exists {
|
||||
newExposedPorts[port] = struct{}{}
|
||||
}
|
||||
|
||||
bslice, exists := newPortBindings[port]
|
||||
if !exists {
|
||||
bslice = []nat.PortBinding{}
|
||||
}
|
||||
newPortBindings[port] = append(bslice, portMapping.Binding)
|
||||
}
|
||||
|
||||
return &PublishedPorts{ExposedPorts: newExposedPorts, PortBindings: newPortBindings}, nil
|
||||
}
|
||||
|
||||
// MergePortSpecs merges published ports for a given node
|
||||
func MergePortSpecs(nodeToPortSpecMap map[string][]string, role, name string) ([]string, error) {
|
||||
|
||||
portSpecs := []string{}
|
||||
|
||||
// add portSpecs according to node role
|
||||
for _, group := range nodeRuleGroupsMap[role] {
|
||||
for _, v := range nodeToPortSpecMap[group] {
|
||||
exists := false
|
||||
for _, i := range portSpecs {
|
||||
if v == i {
|
||||
exists = true
|
||||
}
|
||||
}
|
||||
if !exists {
|
||||
portSpecs = append(portSpecs, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// add portSpecs according to node name
|
||||
for _, v := range nodeToPortSpecMap[name] {
|
||||
exists := false
|
||||
for _, i := range portSpecs {
|
||||
if v == i {
|
||||
exists = true
|
||||
}
|
||||
}
|
||||
if !exists {
|
||||
portSpecs = append(portSpecs, v)
|
||||
}
|
||||
}
|
||||
|
||||
return portSpecs, nil
|
||||
}
|
340
cli/registry.go
Normal file
340
cli/registry.go
Normal file
@ -0,0 +1,340 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultRegistryContainerName = "k3d-registry"
|
||||
|
||||
defaultRegistryImage = "registry:2"
|
||||
|
||||
// Default registry port, both for the external and the internal ports
|
||||
// Note well, that the internal port is never changed.
|
||||
defaultRegistryPort = 5000
|
||||
|
||||
defaultFullRegistriesPath = "/etc/rancher/k3s/registries.yaml"
|
||||
|
||||
defaultRegistryMountPath = "/var/lib/registry"
|
||||
|
||||
defaultDockerHubAddress = "docker.io"
|
||||
|
||||
defaultDockerRegistryHubAddress = "registry-1.docker.io"
|
||||
)
|
||||
|
||||
// default labels assigned to the registry container
|
||||
var defaultRegistryContainerLabels = map[string]string{
|
||||
"app": "k3d",
|
||||
"component": "registry",
|
||||
}
|
||||
|
||||
// default labels assigned to the registry volume
|
||||
var defaultRegistryVolumeLabels = map[string]string{
|
||||
"app": "k3d",
|
||||
"component": "registry",
|
||||
"managed": "true",
|
||||
}
|
||||
|
||||
// NOTE: structs copied from https://github.com/rancher/k3s/blob/master/pkg/agent/templates/registry.go
|
||||
// for avoiding a dependencies nightmare...
|
||||
|
||||
// Registry is registry settings configured
|
||||
type Registry struct {
|
||||
// Mirrors are namespace to mirror mapping for all namespaces.
|
||||
Mirrors map[string]Mirror `toml:"mirrors" yaml:"mirrors"`
|
||||
|
||||
// Configs are configs for each registry.
|
||||
// The key is the FDQN or IP of the registry.
|
||||
Configs map[string]interface{} `toml:"configs" yaml:"configs"`
|
||||
|
||||
// Auths are registry endpoint to auth config mapping. The registry endpoint must
|
||||
// be a valid url with host specified.
|
||||
// DEPRECATED: Use Configs instead. Remove in containerd 1.4.
|
||||
Auths map[string]interface{} `toml:"auths" yaml:"auths"`
|
||||
}
|
||||
|
||||
// Mirror contains the config related to the registry mirror
|
||||
type Mirror struct {
|
||||
// Endpoints are endpoints for a namespace. CRI plugin will try the endpoints
|
||||
// one by one until a working one is found. The endpoint must be a valid url
|
||||
// with host specified.
|
||||
// The scheme, host and path from the endpoint URL will be used.
|
||||
Endpoints []string `toml:"endpoint" yaml:"endpoint"`
|
||||
}
|
||||
|
||||
// getGlobalRegistriesConfFilename gets the global registries file that will be used in all the servers/workers
|
||||
func getGlobalRegistriesConfFilename() (string, error) {
|
||||
homeDir, err := homedir.Dir()
|
||||
if err != nil {
|
||||
log.Error("Couldn't get user's home directory")
|
||||
return "", err
|
||||
}
|
||||
|
||||
return path.Join(homeDir, ".k3d", "registries.yaml"), nil
|
||||
}
|
||||
|
||||
// writeRegistriesConfigInContainer creates a valid registries configuration file in a container
|
||||
func writeRegistriesConfigInContainer(spec *ClusterSpec, ID string) error {
|
||||
registryInternalAddress := fmt.Sprintf("%s:%d", spec.RegistryName, defaultRegistryPort)
|
||||
registryExternalAddress := fmt.Sprintf("%s:%d", spec.RegistryName, spec.RegistryPort)
|
||||
|
||||
privRegistries := &Registry{}
|
||||
|
||||
// load the base registry file
|
||||
if len(spec.RegistriesFile) > 0 {
|
||||
log.Printf("Using registries definitions from %q...\n", spec.RegistriesFile)
|
||||
privRegistryFile, err := ioutil.ReadFile(spec.RegistriesFile)
|
||||
if err != nil {
|
||||
return err // the file must exist at this point
|
||||
}
|
||||
if err := yaml.Unmarshal(privRegistryFile, &privRegistries); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if spec.RegistryEnabled {
|
||||
if len(privRegistries.Mirrors) == 0 {
|
||||
privRegistries.Mirrors = map[string]Mirror{}
|
||||
}
|
||||
|
||||
// then add the private registry
|
||||
privRegistries.Mirrors[registryExternalAddress] = Mirror{
|
||||
Endpoints: []string{fmt.Sprintf("http://%s", registryInternalAddress)},
|
||||
}
|
||||
|
||||
// with the cache, redirect all the PULLs to the Docker Hub to the local registry
|
||||
if spec.RegistryCacheEnabled {
|
||||
privRegistries.Mirrors[defaultDockerHubAddress] = Mirror{
|
||||
Endpoints: []string{fmt.Sprintf("http://%s", registryInternalAddress)},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
d, err := yaml.Marshal(&privRegistries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return copyToContainer(ID, defaultFullRegistriesPath, d)
|
||||
}
|
||||
|
||||
// createRegistry creates a registry, or connect the k3d network to an existing one
|
||||
func createRegistry(spec ClusterSpec) (string, error) {
|
||||
netName := k3dNetworkName(spec.ClusterName)
|
||||
|
||||
// first, check we have not already started a registry (for example, for a different k3d cluster)
|
||||
// all the k3d clusters should share the same private registry, so if we already have a registry just connect
|
||||
// it to the network of this cluster.
|
||||
cid, err := getRegistryContainer()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if cid != "" {
|
||||
// TODO: we should check given-registry-name == existing-registry-name
|
||||
log.Printf("Registry already present: ensuring that it's running and connecting it to the '%s' network...\n", netName)
|
||||
if err := startContainer(cid); err != nil {
|
||||
log.Warnf("Failed to start registry container. Try starting it manually via `docker start %s`", cid)
|
||||
}
|
||||
if err := connectRegistryToNetwork(cid, netName, []string{spec.RegistryName}); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return cid, nil
|
||||
}
|
||||
|
||||
log.Printf("Creating Registry as %s:%d...\n", spec.RegistryName, spec.RegistryPort)
|
||||
|
||||
containerLabels := make(map[string]string)
|
||||
|
||||
// add a standard list of labels to our registry
|
||||
for k, v := range defaultRegistryContainerLabels {
|
||||
containerLabels[k] = v
|
||||
}
|
||||
containerLabels["created"] = time.Now().Format("2006-01-02 15:04:05")
|
||||
containerLabels["hostname"] = spec.RegistryName
|
||||
|
||||
registryPortSpec := fmt.Sprintf("0.0.0.0:%d:%d/tcp", spec.RegistryPort, defaultRegistryPort)
|
||||
registryPublishedPorts, err := CreatePublishedPorts([]string{registryPortSpec})
|
||||
if err != nil {
|
||||
log.Fatalf("Error: failed to parse port specs %+v \n%+v", registryPortSpec, err)
|
||||
}
|
||||
|
||||
hostConfig := &container.HostConfig{
|
||||
PortBindings: registryPublishedPorts.PortBindings,
|
||||
Privileged: true,
|
||||
Init: &[]bool{true}[0],
|
||||
}
|
||||
|
||||
if spec.AutoRestart {
|
||||
hostConfig.RestartPolicy.Name = "unless-stopped"
|
||||
}
|
||||
|
||||
spec.Volumes = &Volumes{} // we do not need in the registry any of the volumes used by the other containers
|
||||
if spec.RegistryVolume != "" {
|
||||
vol, err := getVolume(spec.RegistryVolume, map[string]string{})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(" Couldn't check if volume %s exists: %w", spec.RegistryVolume, err)
|
||||
}
|
||||
if vol != nil {
|
||||
log.Printf("Using existing volume %s for the Registry\n", spec.RegistryVolume)
|
||||
} else {
|
||||
log.Printf("Creating Registry volume %s...\n", spec.RegistryVolume)
|
||||
|
||||
// assign some labels (so we can recognize the volume later on)
|
||||
volLabels := map[string]string{
|
||||
"registry-name": spec.RegistryName,
|
||||
"registry-port": strconv.Itoa(spec.RegistryPort),
|
||||
}
|
||||
for k, v := range defaultRegistryVolumeLabels {
|
||||
volLabels[k] = v
|
||||
}
|
||||
_, err := createVolume(spec.RegistryVolume, volLabels)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(" Couldn't create volume %s for registry: %w", spec.RegistryVolume, err)
|
||||
}
|
||||
}
|
||||
mount := fmt.Sprintf("%s:%s", spec.RegistryVolume, defaultRegistryMountPath)
|
||||
hostConfig.Binds = []string{mount}
|
||||
}
|
||||
|
||||
// connect the registry to this k3d network
|
||||
networkingConfig := &network.NetworkingConfig{
|
||||
EndpointsConfig: map[string]*network.EndpointSettings{
|
||||
netName: {
|
||||
Aliases: []string{spec.RegistryName},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
config := &container.Config{
|
||||
Hostname: spec.RegistryName,
|
||||
Image: defaultRegistryImage,
|
||||
ExposedPorts: registryPublishedPorts.ExposedPorts,
|
||||
Labels: containerLabels,
|
||||
}
|
||||
|
||||
// we can enable the cache in the Registry by just adding a new env variable
|
||||
// (see https://docs.docker.com/registry/configuration/#override-specific-configuration-options)
|
||||
if spec.RegistryCacheEnabled {
|
||||
log.Printf("Activating pull-through cache to Docker Hub\n")
|
||||
cacheConfigKey := "REGISTRY_PROXY_REMOTEURL"
|
||||
cacheConfigValues := fmt.Sprintf("https://%s", defaultDockerRegistryHubAddress)
|
||||
config.Env = []string{fmt.Sprintf("%s=%s", cacheConfigKey, cacheConfigValues)}
|
||||
}
|
||||
|
||||
id, err := createContainer(config, hostConfig, networkingConfig, defaultRegistryContainerName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(" Couldn't create registry container %s\n%w", defaultRegistryContainerName, err)
|
||||
}
|
||||
|
||||
if err := startContainer(id); err != nil {
|
||||
return "", fmt.Errorf(" Couldn't start container %s\n%w", defaultRegistryContainerName, err)
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// getRegistryContainer looks for the registry container
|
||||
func getRegistryContainer() (string, error) {
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
cFilter := filters.NewArgs()
|
||||
cFilter.Add("name", defaultRegistryContainerName)
|
||||
// filter with the standard list of labels of our registry
|
||||
for k, v := range defaultRegistryContainerLabels {
|
||||
cFilter.Add("label", fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
|
||||
containers, err := docker.ContainerList(ctx, types.ContainerListOptions{Filters: cFilter, All: true})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(" Couldn't list containers: %w", err)
|
||||
}
|
||||
if len(containers) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
return containers[0].ID, nil
|
||||
}
|
||||
|
||||
// connectRegistryToNetwork connects the registry container to a given network
|
||||
func connectRegistryToNetwork(ID string, networkID string, aliases []string) error {
|
||||
if err := connectContainerToNetwork(ID, networkID, aliases); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// disconnectRegistryFromNetwork disconnects the Registry from a Network
|
||||
// if the Registry container is not connected to any more networks, it is stopped
|
||||
func disconnectRegistryFromNetwork(name string, keepRegistryVolume bool) error {
|
||||
// disconnect the registry from this cluster's network
|
||||
netName := k3dNetworkName(name)
|
||||
cid, err := getRegistryContainer()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cid == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Printf("...Disconnecting Registry from the %s network\n", netName)
|
||||
if err := disconnectContainerFromNetwork(cid, netName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check if the registry is not connected to any other networks.
|
||||
// in that case, we can safely stop the registry container
|
||||
networks, err := getContainerNetworks(cid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(networks) == 0 {
|
||||
log.Printf("...Removing the Registry\n")
|
||||
volName, err := getVolumeMountedIn(cid, defaultRegistryMountPath)
|
||||
if err != nil {
|
||||
log.Printf("...warning: could not detect registry volume\n")
|
||||
}
|
||||
|
||||
if err := removeContainer(cid); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
// check if the volume mounted in /var/lib/registry was managed by us. In that case (and only if
|
||||
// the user does not want to keep the volume alive), delete the registry volume
|
||||
if volName != "" {
|
||||
vol, err := getVolume(volName, defaultRegistryVolumeLabels)
|
||||
if err != nil {
|
||||
return fmt.Errorf(" Couldn't remove volume for registry %s\n%w", defaultRegistryContainerName, err)
|
||||
}
|
||||
if vol != nil {
|
||||
if keepRegistryVolume {
|
||||
log.Printf("...(keeping the Registry volume %s)\n", volName)
|
||||
} else {
|
||||
log.Printf("...Removing the Registry volume %s\n", volName)
|
||||
if err := deleteVolume(volName); err != nil {
|
||||
return fmt.Errorf(" Couldn't remove volume for registry %s\n%w", defaultRegistryContainerName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
100
cli/shell.go
Normal file
100
cli/shell.go
Normal file
@ -0,0 +1,100 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
)
|
||||
|
||||
type shell struct {
|
||||
Name string
|
||||
Options []string
|
||||
Prompt string
|
||||
Env map[string]string
|
||||
}
|
||||
|
||||
var shells = map[string]shell{
|
||||
"bash": {
|
||||
Name: "bash",
|
||||
Options: []string{
|
||||
"--noprofile", // don't load .profile/.bash_profile
|
||||
"--norc", // don't load .bashrc
|
||||
},
|
||||
Prompt: "PS1",
|
||||
},
|
||||
"zsh": {
|
||||
Name: "zsh",
|
||||
Options: []string{
|
||||
"--no-rcs", // don't load .zshrc
|
||||
},
|
||||
Prompt: "PROMPT",
|
||||
},
|
||||
}
|
||||
|
||||
// subShell
|
||||
func subShell(cluster, shell, command string) error {
|
||||
|
||||
// check if the selected shell is supported
|
||||
if shell == "auto" {
|
||||
shell = path.Base(os.Getenv("SHELL"))
|
||||
}
|
||||
|
||||
supported := false
|
||||
for supportedShell := range shells {
|
||||
if supportedShell == shell {
|
||||
supported = true
|
||||
}
|
||||
}
|
||||
if !supported {
|
||||
return fmt.Errorf("selected shell [%s] is not supported", shell)
|
||||
}
|
||||
|
||||
// get kubeconfig for selected cluster
|
||||
kubeConfigPath, err := getKubeConfig(cluster, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check if we're already in a subshell
|
||||
subShell := os.ExpandEnv("$__K3D_CLUSTER__")
|
||||
if len(subShell) > 0 {
|
||||
return fmt.Errorf("Error: Already in subshell of cluster %s", subShell)
|
||||
}
|
||||
|
||||
// get path of shell executable
|
||||
shellPath, err := exec.LookPath(shell)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// set shell specific options (command line flags)
|
||||
shellOptions := shells[shell].Options
|
||||
|
||||
cmd := exec.Command(shellPath, shellOptions...)
|
||||
|
||||
if len(command) > 0 {
|
||||
cmd.Args = append(cmd.Args, "-c", command)
|
||||
|
||||
}
|
||||
|
||||
// Set up stdio
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
// Set up Promot
|
||||
setPrompt := fmt.Sprintf("%s=[%s} %s", shells[shell].Prompt, cluster, os.Getenv(shells[shell].Prompt))
|
||||
|
||||
// Set up KUBECONFIG
|
||||
setKube := fmt.Sprintf("KUBECONFIG=%s", kubeConfigPath)
|
||||
|
||||
// Declare subshell
|
||||
subShell = fmt.Sprintf("__K3D_CLUSTER__=%s", cluster)
|
||||
|
||||
newEnv := append(os.Environ(), setPrompt, setKube, subShell)
|
||||
|
||||
cmd.Env = newEnv
|
||||
|
||||
return cmd.Run()
|
||||
}
|
61
cli/types.go
Normal file
61
cli/types.go
Normal file
@ -0,0 +1,61 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/go-connections/nat"
|
||||
)
|
||||
|
||||
// Globally used constants
|
||||
const (
|
||||
DefaultRegistry = "docker.io"
|
||||
DefaultServerCount = 1
|
||||
)
|
||||
|
||||
// defaultNodes describes the type of nodes on which a port should be exposed by default
|
||||
const defaultNodes = "server"
|
||||
|
||||
// defaultLabelNodes describes the type of nodes on which a label should be applied by default
|
||||
const defaultLabelNodes = "all"
|
||||
|
||||
// mapping a node role to groups that should be applied to it
|
||||
var nodeRuleGroupsMap = map[string][]string{
|
||||
"worker": {"all", "workers", "agents"},
|
||||
"server": {"all", "server", "master"},
|
||||
}
|
||||
|
||||
// Cluster describes an existing cluster
|
||||
type Cluster struct {
|
||||
name string
|
||||
image string
|
||||
status string
|
||||
serverPorts []string
|
||||
server types.Container
|
||||
workers []types.Container
|
||||
}
|
||||
|
||||
// ClusterSpec defines the specs for a cluster that's up for creation
|
||||
type ClusterSpec struct {
|
||||
AgentArgs []string
|
||||
APIPort apiPort
|
||||
AutoRestart bool
|
||||
ClusterName string
|
||||
Env []string
|
||||
NodeToLabelSpecMap map[string][]string
|
||||
Image string
|
||||
NodeToPortSpecMap map[string][]string
|
||||
PortAutoOffset int
|
||||
RegistriesFile string
|
||||
RegistryEnabled bool
|
||||
RegistryCacheEnabled bool
|
||||
RegistryName string
|
||||
RegistryPort int
|
||||
RegistryVolume string
|
||||
ServerArgs []string
|
||||
Volumes *Volumes
|
||||
}
|
||||
|
||||
// PublishedPorts is a struct used for exposing container ports on the host system
|
||||
type PublishedPorts struct {
|
||||
ExposedPorts map[nat.Port]struct{}
|
||||
PortBindings map[nat.Port][]nat.PortBinding
|
||||
}
|
161
cli/util.go
Normal file
161
cli/util.go
Normal file
@ -0,0 +1,161 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type apiPort struct {
|
||||
Host string
|
||||
HostIP string
|
||||
Port string
|
||||
}
|
||||
|
||||
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
const (
|
||||
letterIdxBits = 6 // 6 bits to represent a letter index
|
||||
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
|
||||
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
|
||||
)
|
||||
|
||||
var src = rand.NewSource(time.Now().UnixNano())
|
||||
|
||||
// GenerateRandomString thanks to https://stackoverflow.com/a/31832326/6450189
|
||||
// GenerateRandomString is used to generate a random string that is used as a cluster secret
|
||||
func GenerateRandomString(n int) string {
|
||||
|
||||
sb := strings.Builder{}
|
||||
sb.Grow(n)
|
||||
// A src.Int63() generates 63 random bits, enough for letterIdxMax characters!
|
||||
for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
|
||||
if remain == 0 {
|
||||
cache, remain = src.Int63(), letterIdxMax
|
||||
}
|
||||
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
|
||||
sb.WriteByte(letterBytes[idx])
|
||||
i--
|
||||
}
|
||||
cache >>= letterIdxBits
|
||||
remain--
|
||||
}
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
/*** Cluster Name Validation ***/
|
||||
const clusterNameMaxSize int = 35
|
||||
|
||||
// CheckClusterName ensures that a cluster name is also a valid host name according to RFC 1123.
|
||||
// We further restrict the length of the cluster name to maximum 'clusterNameMaxSize'
|
||||
// so that we can construct the host names based on the cluster name, and still stay
|
||||
// within the 64 characters limit.
|
||||
func CheckClusterName(name string) error {
|
||||
if err := ValidateHostname(name); err != nil {
|
||||
return fmt.Errorf("Invalid cluster name\n%+v", ValidateHostname(name))
|
||||
}
|
||||
if len(name) > clusterNameMaxSize {
|
||||
return fmt.Errorf("Cluster name is too long (%d > %d)", len(name), clusterNameMaxSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateHostname ensures that a cluster name is also a valid host name according to RFC 1123.
|
||||
func ValidateHostname(name string) error {
|
||||
|
||||
if len(name) == 0 {
|
||||
return fmt.Errorf("no name provided")
|
||||
}
|
||||
|
||||
if name[0] == '-' || name[len(name)-1] == '-' {
|
||||
return fmt.Errorf("Hostname [%s] must not start or end with - (dash)", name)
|
||||
}
|
||||
|
||||
for _, c := range name {
|
||||
switch {
|
||||
case '0' <= c && c <= '9':
|
||||
case 'a' <= c && c <= 'z':
|
||||
case 'A' <= c && c <= 'Z':
|
||||
case c == '-':
|
||||
break
|
||||
default:
|
||||
return fmt.Errorf("Hostname [%s] contains characters other than 'Aa-Zz', '0-9' or '-'", name)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseAPIPort(portSpec string) (*apiPort, error) {
|
||||
var port *apiPort
|
||||
split := strings.Split(portSpec, ":")
|
||||
if len(split) > 2 {
|
||||
return nil, fmt.Errorf("api-port format error")
|
||||
}
|
||||
|
||||
if len(split) == 1 {
|
||||
port = &apiPort{Port: split[0]}
|
||||
} else {
|
||||
// Make sure 'host' can be resolved to an IP address
|
||||
addrs, err := net.LookupHost(split[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
port = &apiPort{Host: split[0], HostIP: addrs[0], Port: split[1]}
|
||||
}
|
||||
|
||||
// Verify 'port' is an integer and within port ranges
|
||||
p, err := strconv.Atoi(port.Port)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if p < 0 || p > 65535 {
|
||||
return nil, fmt.Errorf("--api-port port value out of range")
|
||||
}
|
||||
|
||||
return port, nil
|
||||
}
|
||||
|
||||
func fileExists(filename string) bool {
|
||||
_, err := os.Stat(filename)
|
||||
return !os.IsNotExist(err)
|
||||
}
|
||||
|
||||
type dnsNameCheck struct {
|
||||
res chan bool
|
||||
err chan error
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func newAsyncNameExists(name string, timeout time.Duration) *dnsNameCheck {
|
||||
d := &dnsNameCheck{
|
||||
res: make(chan bool),
|
||||
err: make(chan error),
|
||||
timeout: timeout,
|
||||
}
|
||||
go func() {
|
||||
addresses, err := net.LookupHost(name)
|
||||
if err != nil {
|
||||
d.err <- err
|
||||
}
|
||||
d.res <- len(addresses) > 0
|
||||
}()
|
||||
return d
|
||||
}
|
||||
|
||||
func (d dnsNameCheck) Exists() (bool, error) {
|
||||
select {
|
||||
case r := <-d.res:
|
||||
return r, nil
|
||||
case e := <-d.err:
|
||||
return false, e
|
||||
case <-time.After(d.timeout):
|
||||
return false, nil
|
||||
}
|
||||
}
|
234
cli/volume.go
Normal file
234
cli/volume.go
Normal file
@ -0,0 +1,234 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
type Volumes struct {
|
||||
DefaultVolumes []string
|
||||
NodeSpecificVolumes map[string][]string
|
||||
GroupSpecificVolumes map[string][]string
|
||||
}
|
||||
|
||||
// createVolume will create a new docker volume
|
||||
func createVolume(volName string, volLabels map[string]string) (types.Volume, error) {
|
||||
var vol types.Volume
|
||||
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return vol, fmt.Errorf(" Couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
volumeCreateOptions := volume.VolumeCreateBody{
|
||||
Name: volName,
|
||||
Labels: volLabels,
|
||||
Driver: "local", //TODO: allow setting driver + opts
|
||||
DriverOpts: map[string]string{},
|
||||
}
|
||||
vol, err = docker.VolumeCreate(ctx, volumeCreateOptions)
|
||||
if err != nil {
|
||||
return vol, fmt.Errorf("failed to create image volume [%s]\n%+v", volName, err)
|
||||
}
|
||||
|
||||
return vol, nil
|
||||
}
|
||||
|
||||
// deleteVolume will delete a volume
|
||||
func deleteVolume(volName string) error {
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return fmt.Errorf(" Couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
if err = docker.VolumeRemove(ctx, volName, true); err != nil {
|
||||
return fmt.Errorf(" Couldn't remove volume [%s]\n%+v", volName, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getVolume checks if a docker volume exists. The volume can be specified with a name and/or some labels.
|
||||
func getVolume(volName string, volLabels map[string]string) (*types.Volume, error) {
|
||||
ctx := context.Background()
|
||||
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(" Couldn't create docker client: %w", err)
|
||||
}
|
||||
|
||||
vFilter := filters.NewArgs()
|
||||
if volName != "" {
|
||||
vFilter.Add("name", volName)
|
||||
}
|
||||
for k, v := range volLabels {
|
||||
vFilter.Add("label", fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
|
||||
volumes, err := docker.VolumeList(ctx, vFilter)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(" Couldn't list volumes: %w", err)
|
||||
}
|
||||
if len(volumes.Volumes) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return volumes.Volumes[0], nil
|
||||
}
|
||||
|
||||
// getVolumeMountedIn gets the volume that is mounted in some container in some path
|
||||
func getVolumeMountedIn(ID string, path string) (string, error) {
|
||||
ctx := context.Background()
|
||||
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(" Couldn't create docker client: %w", err)
|
||||
}
|
||||
|
||||
c, err := docker.ContainerInspect(ctx, ID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(" Couldn't inspect container %s: %w", ID, err)
|
||||
}
|
||||
for _, mount := range c.Mounts {
|
||||
if mount.Destination == path {
|
||||
return mount.Name, nil
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// createImageVolume will create a new docker volume used for storing image tarballs that can be loaded into the clusters
|
||||
func createImageVolume(clusterName string) (types.Volume, error) {
|
||||
volName := fmt.Sprintf("k3d-%s-images", clusterName)
|
||||
volLabels := map[string]string{
|
||||
"app": "k3d",
|
||||
"cluster": clusterName,
|
||||
}
|
||||
return createVolume(volName, volLabels)
|
||||
}
|
||||
|
||||
// deleteImageVolume will delete the volume we created for sharing images with this cluster
|
||||
func deleteImageVolume(clusterName string) error {
|
||||
volName := fmt.Sprintf("k3d-%s-images", clusterName)
|
||||
return deleteVolume(volName)
|
||||
}
|
||||
|
||||
// getImageVolume returns the docker volume object representing the imagevolume for the cluster
|
||||
func getImageVolume(clusterName string) (types.Volume, error) {
|
||||
var vol types.Volume
|
||||
volName := fmt.Sprintf("k3d-%s-images", clusterName)
|
||||
|
||||
ctx := context.Background()
|
||||
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return vol, fmt.Errorf(" Couldn't create docker client\n%+v", err)
|
||||
}
|
||||
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("label", "app=k3d")
|
||||
filters.Add("label", fmt.Sprintf("cluster=%s", clusterName))
|
||||
volumeList, err := docker.VolumeList(ctx, filters)
|
||||
if err != nil {
|
||||
return vol, fmt.Errorf(" Couldn't get volumes for cluster [%s]\n%+v ", clusterName, err)
|
||||
}
|
||||
volFound := false
|
||||
for _, volume := range volumeList.Volumes {
|
||||
if volume.Name == volName {
|
||||
vol = *volume
|
||||
volFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !volFound {
|
||||
return vol, fmt.Errorf("didn't find volume [%s] in list of volumes returned for cluster [%s]", volName, clusterName)
|
||||
}
|
||||
|
||||
return vol, nil
|
||||
}
|
||||
|
||||
func NewVolumes(volumes []string) (*Volumes, error) {
|
||||
volumesSpec := &Volumes{
|
||||
DefaultVolumes: []string{},
|
||||
NodeSpecificVolumes: make(map[string][]string),
|
||||
GroupSpecificVolumes: make(map[string][]string),
|
||||
}
|
||||
|
||||
volumes:
|
||||
for _, volume := range volumes {
|
||||
if strings.Contains(volume, "@") {
|
||||
split := strings.Split(volume, "@")
|
||||
if len(split) != 2 {
|
||||
return nil, fmt.Errorf("invalid node volume spec: %s", volume)
|
||||
}
|
||||
|
||||
nodeVolumes := split[0]
|
||||
node := strings.ToLower(split[1])
|
||||
if len(node) == 0 {
|
||||
return nil, fmt.Errorf("invalid node volume spec: %s", volume)
|
||||
}
|
||||
|
||||
// check if node selector is a node group
|
||||
for group, names := range nodeRuleGroupsMap {
|
||||
added := false
|
||||
|
||||
for _, name := range names {
|
||||
if name == node {
|
||||
volumesSpec.addGroupSpecificVolume(group, nodeVolumes)
|
||||
added = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if added {
|
||||
continue volumes
|
||||
}
|
||||
}
|
||||
|
||||
// otherwise this is a volume for a specific node
|
||||
volumesSpec.addNodeSpecificVolume(node, nodeVolumes)
|
||||
} else {
|
||||
volumesSpec.DefaultVolumes = append(volumesSpec.DefaultVolumes, volume)
|
||||
}
|
||||
}
|
||||
|
||||
return volumesSpec, nil
|
||||
}
|
||||
|
||||
// addVolumesToHostConfig adds all default volumes and node / group specific volumes to a HostConfig
|
||||
func (v Volumes) addVolumesToHostConfig(containerName string, groupName string, hostConfig *container.HostConfig) {
|
||||
volumes := v.DefaultVolumes
|
||||
|
||||
if v, ok := v.NodeSpecificVolumes[containerName]; ok {
|
||||
volumes = append(volumes, v...)
|
||||
}
|
||||
|
||||
if v, ok := v.GroupSpecificVolumes[groupName]; ok {
|
||||
volumes = append(volumes, v...)
|
||||
}
|
||||
|
||||
if len(volumes) > 0 {
|
||||
hostConfig.Binds = volumes
|
||||
}
|
||||
}
|
||||
|
||||
func (v *Volumes) addNodeSpecificVolume(node, volume string) {
|
||||
if _, ok := v.NodeSpecificVolumes[node]; !ok {
|
||||
v.NodeSpecificVolumes[node] = []string{}
|
||||
}
|
||||
v.NodeSpecificVolumes[node] = append(v.NodeSpecificVolumes[node], volume)
|
||||
}
|
||||
|
||||
func (v *Volumes) addGroupSpecificVolume(group, volume string) {
|
||||
if _, ok := v.GroupSpecificVolumes[group]; !ok {
|
||||
v.GroupSpecificVolumes[group] = []string{}
|
||||
}
|
||||
v.GroupSpecificVolumes[group] = append(v.GroupSpecificVolumes[group], volume)
|
||||
}
|
@ -1,58 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package cluster
|
||||
|
||||
import (
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCmdCluster returns a new cobra command
|
||||
func NewCmdCluster() *cobra.Command {
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "cluster",
|
||||
Short: "Manage cluster(s)",
|
||||
Long: `Manage cluster(s)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
cmd.AddCommand(NewCmdClusterCreate(),
|
||||
NewCmdClusterStart(),
|
||||
NewCmdClusterStop(),
|
||||
NewCmdClusterDelete(),
|
||||
NewCmdClusterList(),
|
||||
NewCmdClusterEdit())
|
||||
|
||||
// add flags
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
@ -1,599 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
cliutil "github.com/rancher/k3d/v5/cmd/util"
|
||||
cliconfig "github.com/rancher/k3d/v5/cmd/util/config"
|
||||
k3dCluster "github.com/rancher/k3d/v5/pkg/client"
|
||||
"github.com/rancher/k3d/v5/pkg/config"
|
||||
conf "github.com/rancher/k3d/v5/pkg/config/v1alpha3"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/rancher/k3d/v5/version"
|
||||
)
|
||||
|
||||
var configFile string
|
||||
|
||||
const clusterCreateDescription = `
|
||||
Create a new k3s cluster with containerized nodes (k3s in docker).
|
||||
Every cluster will consist of one or more containers:
|
||||
- 1 (or more) server node container (k3s)
|
||||
- (optionally) 1 loadbalancer container as the entrypoint to the cluster (nginx)
|
||||
- (optionally) 1 (or more) agent node containers (k3s)
|
||||
`
|
||||
|
||||
/*
|
||||
* Viper for configuration handling
|
||||
* we use two different instances of Viper here to handle
|
||||
* - cfgViper: "static" configuration
|
||||
* - ppViper: "pre-processed" configuration, where CLI input has to be pre-processed
|
||||
* to be treated as part of the SImpleConfig
|
||||
*/
|
||||
var (
|
||||
cfgViper = viper.New()
|
||||
ppViper = viper.New()
|
||||
)
|
||||
|
||||
func initConfig() error {
|
||||
|
||||
// Viper for pre-processed config options
|
||||
ppViper.SetEnvPrefix("K3D")
|
||||
|
||||
if l.Log().GetLevel() >= logrus.DebugLevel {
|
||||
|
||||
c, _ := yaml.Marshal(ppViper.AllSettings())
|
||||
l.Log().Debugf("Additional CLI Configuration:\n%s", c)
|
||||
}
|
||||
|
||||
return cliconfig.InitViperWithConfigFile(cfgViper, configFile)
|
||||
}
|
||||
|
||||
// NewCmdClusterCreate returns a new cobra command
|
||||
func NewCmdClusterCreate() *cobra.Command {
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "create NAME",
|
||||
Short: "Create a new cluster",
|
||||
Long: clusterCreateDescription,
|
||||
Args: cobra.RangeArgs(0, 1), // exactly one cluster name can be set (default: k3d.DefaultClusterName)
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
return initConfig()
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
/*************************
|
||||
* Compute Configuration *
|
||||
*************************/
|
||||
if cfgViper.GetString("apiversion") == "" {
|
||||
cfgViper.Set("apiversion", config.DefaultConfigApiVersion)
|
||||
}
|
||||
if cfgViper.GetString("kind") == "" {
|
||||
cfgViper.Set("kind", "Simple")
|
||||
}
|
||||
cfg, err := config.FromViper(cfgViper)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
if cfg.GetAPIVersion() != config.DefaultConfigApiVersion {
|
||||
l.Log().Warnf("Default config apiVersion is '%s', but you're using '%s': consider migrating.", config.DefaultConfigApiVersion, cfg.GetAPIVersion())
|
||||
cfg, err = config.Migrate(cfg, config.DefaultConfigApiVersion)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
simpleCfg := cfg.(conf.SimpleConfig)
|
||||
|
||||
l.Log().Debugf("========== Simple Config ==========\n%+v\n==========================\n", simpleCfg)
|
||||
|
||||
simpleCfg, err = applyCLIOverrides(simpleCfg)
|
||||
if err != nil {
|
||||
l.Log().Fatalf("Failed to apply CLI overrides: %+v", err)
|
||||
}
|
||||
|
||||
l.Log().Debugf("========== Merged Simple Config ==========\n%+v\n==========================\n", simpleCfg)
|
||||
|
||||
/**************************************
|
||||
* Transform, Process & Validate Configuration *
|
||||
**************************************/
|
||||
|
||||
// Set the name
|
||||
if len(args) != 0 {
|
||||
simpleCfg.Name = args[0]
|
||||
}
|
||||
|
||||
clusterConfig, err := config.TransformSimpleToClusterConfig(cmd.Context(), runtimes.SelectedRuntime, simpleCfg)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
l.Log().Debugf("===== Merged Cluster Config =====\n%+v\n===== ===== =====\n", clusterConfig)
|
||||
|
||||
clusterConfig, err = config.ProcessClusterConfig(*clusterConfig)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
l.Log().Debugf("===== Processed Cluster Config =====\n%+v\n===== ===== =====\n", clusterConfig)
|
||||
|
||||
if err := config.ValidateClusterConfig(cmd.Context(), runtimes.SelectedRuntime, *clusterConfig); err != nil {
|
||||
l.Log().Fatalln("Failed Cluster Configuration Validation: ", err)
|
||||
}
|
||||
|
||||
/**************************************
|
||||
* Create cluster if it doesn't exist *
|
||||
**************************************/
|
||||
|
||||
// check if a cluster with that name exists already
|
||||
if _, err := k3dCluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster); err == nil {
|
||||
l.Log().Fatalf("Failed to create cluster '%s' because a cluster with that name already exists", clusterConfig.Cluster.Name)
|
||||
}
|
||||
|
||||
// create cluster
|
||||
if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig {
|
||||
l.Log().Debugln("'--kubeconfig-update-default set: enabling wait-for-server")
|
||||
clusterConfig.ClusterCreateOpts.WaitForServer = true
|
||||
}
|
||||
//if err := k3dCluster.ClusterCreate(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster, &clusterConfig.ClusterCreateOpts); err != nil {
|
||||
if err := k3dCluster.ClusterRun(cmd.Context(), runtimes.SelectedRuntime, clusterConfig); err != nil {
|
||||
// rollback if creation failed
|
||||
l.Log().Errorln(err)
|
||||
if simpleCfg.Options.K3dOptions.NoRollback { // TODO: move rollback mechanics to pkg/
|
||||
l.Log().Fatalln("Cluster creation FAILED, rollback deactivated.")
|
||||
}
|
||||
// rollback if creation failed
|
||||
l.Log().Errorln("Failed to create cluster >>> Rolling Back")
|
||||
if err := k3dCluster.ClusterDelete(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster, k3d.ClusterDeleteOpts{SkipRegistryCheck: true}); err != nil {
|
||||
l.Log().Errorln(err)
|
||||
l.Log().Fatalln("Cluster creation FAILED, also FAILED to rollback changes!")
|
||||
}
|
||||
l.Log().Fatalln("Cluster creation FAILED, all changes have been rolled back!")
|
||||
}
|
||||
l.Log().Infof("Cluster '%s' created successfully!", clusterConfig.Cluster.Name)
|
||||
|
||||
/**************
|
||||
* Kubeconfig *
|
||||
**************/
|
||||
|
||||
if !clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig && clusterConfig.KubeconfigOpts.SwitchCurrentContext {
|
||||
l.Log().Infoln("--kubeconfig-update-default=false --> sets --kubeconfig-switch-context=false")
|
||||
clusterConfig.KubeconfigOpts.SwitchCurrentContext = false
|
||||
}
|
||||
|
||||
if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig {
|
||||
l.Log().Debugf("Updating default kubeconfig with a new context for cluster %s", clusterConfig.Cluster.Name)
|
||||
if _, err := k3dCluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster, "", &k3dCluster.WriteKubeConfigOptions{UpdateExisting: true, OverwriteExisting: false, UpdateCurrentContext: simpleCfg.Options.KubeconfigOptions.SwitchCurrentContext}); err != nil {
|
||||
l.Log().Warningln(err)
|
||||
}
|
||||
}
|
||||
|
||||
/*****************
|
||||
* User Feedback *
|
||||
*****************/
|
||||
|
||||
// print information on how to use the cluster with kubectl
|
||||
l.Log().Infoln("You can now use it like this:")
|
||||
if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig && !clusterConfig.KubeconfigOpts.SwitchCurrentContext {
|
||||
fmt.Printf("kubectl config use-context %s\n", fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, clusterConfig.Cluster.Name))
|
||||
} else if !clusterConfig.KubeconfigOpts.SwitchCurrentContext {
|
||||
if runtime.GOOS == "windows" {
|
||||
fmt.Printf("$env:KUBECONFIG=(%s kubeconfig write %s)\n", os.Args[0], clusterConfig.Cluster.Name)
|
||||
} else {
|
||||
fmt.Printf("export KUBECONFIG=$(%s kubeconfig write %s)\n", os.Args[0], clusterConfig.Cluster.Name)
|
||||
}
|
||||
}
|
||||
fmt.Println("kubectl cluster-info")
|
||||
},
|
||||
}
|
||||
|
||||
/***************
|
||||
* Config File *
|
||||
***************/
|
||||
|
||||
cmd.Flags().StringVarP(&configFile, "config", "c", "", "Path of a config file to use")
|
||||
if err := cmd.MarkFlagFilename("config", "yaml", "yml"); err != nil {
|
||||
l.Log().Fatalln("Failed to mark flag 'config' as filename flag")
|
||||
}
|
||||
|
||||
/***********************
|
||||
* Pre-Processed Flags *
|
||||
***********************
|
||||
*
|
||||
* Flags that have a different style in the CLI than their internal representation.
|
||||
* Also, we cannot set (viper) default values just here for those.
|
||||
* Example:
|
||||
* CLI: `--api-port 0.0.0.0:6443`
|
||||
* Config File:
|
||||
* exposeAPI:
|
||||
* hostIP: 0.0.0.0
|
||||
* port: 6443
|
||||
*
|
||||
* Note: here we also use Slice-type flags instead of Array because of https://github.com/spf13/viper/issues/380
|
||||
*/
|
||||
|
||||
cmd.Flags().String("api-port", "", "Specify the Kubernetes API server port exposed on the LoadBalancer (Format: `[HOST:]HOSTPORT`)\n - Example: `k3d cluster create --servers 3 --api-port 0.0.0.0:6550`")
|
||||
_ = ppViper.BindPFlag("cli.api-port", cmd.Flags().Lookup("api-port"))
|
||||
|
||||
cmd.Flags().StringArrayP("env", "e", nil, "Add environment variables to nodes (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -e \"HTTP_PROXY=my.proxy.com@server:0\" -e \"SOME_KEY=SOME_VAL@server:0\"`")
|
||||
_ = ppViper.BindPFlag("cli.env", cmd.Flags().Lookup("env"))
|
||||
|
||||
cmd.Flags().StringArrayP("volume", "v", nil, "Mount volumes into the nodes (Format: `[SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -v /my/path@agent:0,1 -v /tmp/test:/tmp/other@server:0`")
|
||||
_ = ppViper.BindPFlag("cli.volumes", cmd.Flags().Lookup("volume"))
|
||||
|
||||
cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers (via the serverlb) to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d cluster create --agents 2 -p 8080:80@agent:0 -p 8081@agent:1`")
|
||||
_ = ppViper.BindPFlag("cli.ports", cmd.Flags().Lookup("port"))
|
||||
|
||||
cmd.Flags().StringArrayP("k3s-node-label", "", nil, "Add label to k3s node (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 --k3s-node-label \"my.label@agent:0,1\" --k3s-node-label \"other.label=somevalue@server:0\"`")
|
||||
_ = ppViper.BindPFlag("cli.k3s-node-labels", cmd.Flags().Lookup("k3s-node-label"))
|
||||
|
||||
cmd.Flags().StringArrayP("runtime-label", "", nil, "Add label to container runtime (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 --runtime-label \"my.label@agent:0,1\" --runtime-label \"other.label=somevalue@server:0\"`")
|
||||
_ = ppViper.BindPFlag("cli.runtime-labels", cmd.Flags().Lookup("runtime-label"))
|
||||
|
||||
cmd.Flags().String("registry-create", "", "Create a k3d-managed registry and connect it to the cluster (Format: `NAME[:HOST][:HOSTPORT]`\n - Example: `k3d cluster create --registry-create mycluster-registry:0.0.0.0:5432`")
|
||||
_ = ppViper.BindPFlag("cli.registries.create", cmd.Flags().Lookup("registry-create"))
|
||||
|
||||
/* k3s */
|
||||
cmd.Flags().StringArray("k3s-arg", nil, "Additional args passed to k3s command (Format: `ARG@NODEFILTER[;@NODEFILTER]`)\n - Example: `k3d cluster create --k3s-arg \"--disable=traefik@server:0\"")
|
||||
_ = ppViper.BindPFlag("cli.k3sargs", cmd.Flags().Lookup("k3s-arg"))
|
||||
|
||||
/******************
|
||||
* "Normal" Flags *
|
||||
******************
|
||||
*
|
||||
* No pre-processing needed on CLI level.
|
||||
* Bound to Viper config value.
|
||||
* Default Values set via Viper.
|
||||
*/
|
||||
|
||||
cmd.Flags().IntP("servers", "s", 0, "Specify how many servers you want to create")
|
||||
_ = cfgViper.BindPFlag("servers", cmd.Flags().Lookup("servers"))
|
||||
cfgViper.SetDefault("servers", 1)
|
||||
|
||||
cmd.Flags().IntP("agents", "a", 0, "Specify how many agents you want to create")
|
||||
_ = cfgViper.BindPFlag("agents", cmd.Flags().Lookup("agents"))
|
||||
cfgViper.SetDefault("agents", 0)
|
||||
|
||||
cmd.Flags().StringP("image", "i", "", "Specify k3s image that you want to use for the nodes")
|
||||
_ = cfgViper.BindPFlag("image", cmd.Flags().Lookup("image"))
|
||||
cfgViper.SetDefault("image", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)))
|
||||
|
||||
cmd.Flags().String("network", "", "Join an existing network")
|
||||
_ = cfgViper.BindPFlag("network", cmd.Flags().Lookup("network"))
|
||||
|
||||
cmd.Flags().String("subnet", "", "[Experimental: IPAM] Define a subnet for the newly created container network (Example: `172.28.0.0/16`)")
|
||||
_ = cfgViper.BindPFlag("subnet", cmd.Flags().Lookup("subnet"))
|
||||
|
||||
cmd.Flags().String("token", "", "Specify a cluster token. By default, we generate one.")
|
||||
_ = cfgViper.BindPFlag("token", cmd.Flags().Lookup("token"))
|
||||
|
||||
cmd.Flags().Bool("wait", true, "Wait for the server(s) to be ready before returning. Use '--timeout DURATION' to not wait forever.")
|
||||
_ = cfgViper.BindPFlag("options.k3d.wait", cmd.Flags().Lookup("wait"))
|
||||
|
||||
cmd.Flags().Duration("timeout", 0*time.Second, "Rollback changes if cluster couldn't be created in specified duration.")
|
||||
_ = cfgViper.BindPFlag("options.k3d.timeout", cmd.Flags().Lookup("timeout"))
|
||||
|
||||
cmd.Flags().Bool("kubeconfig-update-default", true, "Directly update the default kubeconfig with the new cluster's context")
|
||||
_ = cfgViper.BindPFlag("options.kubeconfig.updatedefaultkubeconfig", cmd.Flags().Lookup("kubeconfig-update-default"))
|
||||
|
||||
cmd.Flags().Bool("kubeconfig-switch-context", true, "Directly switch the default kubeconfig's current-context to the new cluster's context (requires --kubeconfig-update-default)")
|
||||
_ = cfgViper.BindPFlag("options.kubeconfig.switchcurrentcontext", cmd.Flags().Lookup("kubeconfig-switch-context"))
|
||||
|
||||
cmd.Flags().Bool("no-lb", false, "Disable the creation of a LoadBalancer in front of the server nodes")
|
||||
_ = cfgViper.BindPFlag("options.k3d.disableloadbalancer", cmd.Flags().Lookup("no-lb"))
|
||||
|
||||
cmd.Flags().Bool("no-rollback", false, "Disable the automatic rollback actions, if anything goes wrong")
|
||||
_ = cfgViper.BindPFlag("options.k3d.disablerollback", cmd.Flags().Lookup("no-rollback"))
|
||||
|
||||
cmd.Flags().String("gpus", "", "GPU devices to add to the cluster node containers ('all' to pass all GPUs) [From docker]")
|
||||
_ = cfgViper.BindPFlag("options.runtime.gpurequest", cmd.Flags().Lookup("gpus"))
|
||||
|
||||
cmd.Flags().String("servers-memory", "", "Memory limit imposed on the server nodes [From docker]")
|
||||
_ = cfgViper.BindPFlag("options.runtime.serversmemory", cmd.Flags().Lookup("servers-memory"))
|
||||
|
||||
cmd.Flags().String("agents-memory", "", "Memory limit imposed on the agents nodes [From docker]")
|
||||
_ = cfgViper.BindPFlag("options.runtime.agentsmemory", cmd.Flags().Lookup("agents-memory"))
|
||||
|
||||
/* Image Importing */
|
||||
cmd.Flags().Bool("no-image-volume", false, "Disable the creation of a volume for importing images")
|
||||
_ = cfgViper.BindPFlag("options.k3d.disableimagevolume", cmd.Flags().Lookup("no-image-volume"))
|
||||
|
||||
/* Registry */
|
||||
cmd.Flags().StringArray("registry-use", nil, "Connect to one or more k3d-managed registries running locally")
|
||||
_ = cfgViper.BindPFlag("registries.use", cmd.Flags().Lookup("registry-use"))
|
||||
|
||||
cmd.Flags().String("registry-config", "", "Specify path to an extra registries.yaml file")
|
||||
_ = cfgViper.BindPFlag("registries.config", cmd.Flags().Lookup("registry-config"))
|
||||
if err := cmd.MarkFlagFilename("registry-config", "yaml", "yml"); err != nil {
|
||||
l.Log().Fatalln("Failed to mark flag 'config' as filename flag")
|
||||
}
|
||||
|
||||
/* Loadbalancer / Proxy */
|
||||
cmd.Flags().StringSlice("lb-config-override", nil, "Use dotted YAML path syntax to override nginx loadbalancer settings")
|
||||
_ = cfgViper.BindPFlag("options.k3d.loadbalancer.configoverrides", cmd.Flags().Lookup("lb-config-override"))
|
||||
|
||||
/* Subcommands */
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
|
||||
func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
|
||||
/****************************
|
||||
* Parse and validate flags *
|
||||
****************************/
|
||||
|
||||
// -> API-PORT
|
||||
// parse the port mapping
|
||||
var (
|
||||
err error
|
||||
exposeAPI *k3d.ExposureOpts
|
||||
)
|
||||
|
||||
// Apply config file values as defaults
|
||||
exposeAPI = &k3d.ExposureOpts{
|
||||
PortMapping: nat.PortMapping{
|
||||
Binding: nat.PortBinding{
|
||||
HostIP: cfg.ExposeAPI.HostIP,
|
||||
HostPort: cfg.ExposeAPI.HostPort,
|
||||
},
|
||||
},
|
||||
Host: cfg.ExposeAPI.Host,
|
||||
}
|
||||
|
||||
// Overwrite if cli arg is set
|
||||
if ppViper.IsSet("cli.api-port") {
|
||||
if cfg.ExposeAPI.HostPort != "" {
|
||||
l.Log().Debugf("Overriding pre-defined kubeAPI Exposure Spec %+v with CLI argument %s", cfg.ExposeAPI, ppViper.GetString("cli.api-port"))
|
||||
}
|
||||
exposeAPI, err = cliutil.ParsePortExposureSpec(ppViper.GetString("cli.api-port"), k3d.DefaultAPIPort)
|
||||
if err != nil {
|
||||
return cfg, fmt.Errorf("failed to parse API Port spec: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Set to random port if port is empty string
|
||||
if len(exposeAPI.Binding.HostPort) == 0 {
|
||||
var freePort string
|
||||
port, err := cliutil.GetFreePort()
|
||||
freePort = strconv.Itoa(port)
|
||||
if err != nil || port == 0 {
|
||||
l.Log().Warnf("Failed to get random free port: %+v", err)
|
||||
l.Log().Warnf("Falling back to internal port %s (may be blocked though)...", k3d.DefaultAPIPort)
|
||||
freePort = k3d.DefaultAPIPort
|
||||
}
|
||||
exposeAPI.Binding.HostPort = freePort
|
||||
}
|
||||
|
||||
cfg.ExposeAPI = conf.SimpleExposureOpts{
|
||||
Host: exposeAPI.Host,
|
||||
HostIP: exposeAPI.Binding.HostIP,
|
||||
HostPort: exposeAPI.Binding.HostPort,
|
||||
}
|
||||
|
||||
// -> VOLUMES
|
||||
// volumeFilterMap will map volume mounts to applied node filters
|
||||
volumeFilterMap := make(map[string][]string, 1)
|
||||
for _, volumeFlag := range ppViper.GetStringSlice("cli.volumes") {
|
||||
|
||||
// split node filter from the specified volume
|
||||
volume, filters, err := cliutil.SplitFiltersFromFlag(volumeFlag)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
if strings.Contains(volume, k3d.DefaultRegistriesFilePath) && (cfg.Registries.Create != nil || cfg.Registries.Config != "" || len(cfg.Registries.Use) != 0) {
|
||||
l.Log().Warnf("Seems like you're mounting a file at '%s' while also using a referenced registries config or k3d-managed registries: Your mounted file will probably be overwritten!", k3d.DefaultRegistriesFilePath)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
if _, exists := volumeFilterMap[volume]; exists {
|
||||
volumeFilterMap[volume] = append(volumeFilterMap[volume], filters...)
|
||||
} else {
|
||||
volumeFilterMap[volume] = filters
|
||||
}
|
||||
}
|
||||
|
||||
for volume, nodeFilters := range volumeFilterMap {
|
||||
cfg.Volumes = append(cfg.Volumes, conf.VolumeWithNodeFilters{
|
||||
Volume: volume,
|
||||
NodeFilters: nodeFilters,
|
||||
})
|
||||
}
|
||||
|
||||
l.Log().Tracef("VolumeFilterMap: %+v", volumeFilterMap)
|
||||
|
||||
// -> PORTS
|
||||
portFilterMap := make(map[string][]string, 1)
|
||||
for _, portFlag := range ppViper.GetStringSlice("cli.ports") {
|
||||
// split node filter from the specified volume
|
||||
portmap, filters, err := cliutil.SplitFiltersFromFlag(portFlag)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
if _, exists := portFilterMap[portmap]; exists {
|
||||
l.Log().Fatalln("Same Portmapping can not be used for multiple nodes")
|
||||
} else {
|
||||
portFilterMap[portmap] = filters
|
||||
}
|
||||
}
|
||||
|
||||
for port, nodeFilters := range portFilterMap {
|
||||
cfg.Ports = append(cfg.Ports, conf.PortWithNodeFilters{
|
||||
Port: port,
|
||||
NodeFilters: nodeFilters,
|
||||
})
|
||||
}
|
||||
|
||||
l.Log().Tracef("PortFilterMap: %+v", portFilterMap)
|
||||
|
||||
// --k3s-node-label
|
||||
// k3sNodeLabelFilterMap will add k3s node label to applied node filters
|
||||
k3sNodeLabelFilterMap := make(map[string][]string, 1)
|
||||
for _, labelFlag := range ppViper.GetStringSlice("cli.k3s-node-labels") {
|
||||
|
||||
// split node filter from the specified label
|
||||
label, nodeFilters, err := cliutil.SplitFiltersFromFlag(labelFlag)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
if _, exists := k3sNodeLabelFilterMap[label]; exists {
|
||||
k3sNodeLabelFilterMap[label] = append(k3sNodeLabelFilterMap[label], nodeFilters...)
|
||||
} else {
|
||||
k3sNodeLabelFilterMap[label] = nodeFilters
|
||||
}
|
||||
}
|
||||
|
||||
for label, nodeFilters := range k3sNodeLabelFilterMap {
|
||||
cfg.Options.K3sOptions.NodeLabels = append(cfg.Options.K3sOptions.NodeLabels, conf.LabelWithNodeFilters{
|
||||
Label: label,
|
||||
NodeFilters: nodeFilters,
|
||||
})
|
||||
}
|
||||
|
||||
l.Log().Tracef("K3sNodeLabelFilterMap: %+v", k3sNodeLabelFilterMap)
|
||||
|
||||
// --runtime-label
|
||||
// runtimeLabelFilterMap will add container runtime label to applied node filters
|
||||
runtimeLabelFilterMap := make(map[string][]string, 1)
|
||||
for _, labelFlag := range ppViper.GetStringSlice("cli.runtime-labels") {
|
||||
|
||||
// split node filter from the specified label
|
||||
label, nodeFilters, err := cliutil.SplitFiltersFromFlag(labelFlag)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
cliutil.ValidateRuntimeLabelKey(strings.Split(label, "=")[0])
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
if _, exists := runtimeLabelFilterMap[label]; exists {
|
||||
runtimeLabelFilterMap[label] = append(runtimeLabelFilterMap[label], nodeFilters...)
|
||||
} else {
|
||||
runtimeLabelFilterMap[label] = nodeFilters
|
||||
}
|
||||
}
|
||||
|
||||
for label, nodeFilters := range runtimeLabelFilterMap {
|
||||
cfg.Options.Runtime.Labels = append(cfg.Options.Runtime.Labels, conf.LabelWithNodeFilters{
|
||||
Label: label,
|
||||
NodeFilters: nodeFilters,
|
||||
})
|
||||
}
|
||||
|
||||
l.Log().Tracef("RuntimeLabelFilterMap: %+v", runtimeLabelFilterMap)
|
||||
|
||||
// --env
|
||||
// envFilterMap will add container env vars to applied node filters
|
||||
envFilterMap := make(map[string][]string, 1)
|
||||
for _, envFlag := range ppViper.GetStringSlice("cli.env") {
|
||||
|
||||
// split node filter from the specified env var
|
||||
env, filters, err := cliutil.SplitFiltersFromFlag(envFlag)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
if _, exists := envFilterMap[env]; exists {
|
||||
envFilterMap[env] = append(envFilterMap[env], filters...)
|
||||
} else {
|
||||
envFilterMap[env] = filters
|
||||
}
|
||||
}
|
||||
|
||||
for envVar, nodeFilters := range envFilterMap {
|
||||
cfg.Env = append(cfg.Env, conf.EnvVarWithNodeFilters{
|
||||
EnvVar: envVar,
|
||||
NodeFilters: nodeFilters,
|
||||
})
|
||||
}
|
||||
|
||||
l.Log().Tracef("EnvFilterMap: %+v", envFilterMap)
|
||||
|
||||
// --k3s-arg
|
||||
argFilterMap := make(map[string][]string, 1)
|
||||
for _, argFlag := range ppViper.GetStringSlice("cli.k3sargs") {
|
||||
|
||||
// split node filter from the specified arg
|
||||
arg, filters, err := cliutil.SplitFiltersFromFlag(argFlag)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
if _, exists := argFilterMap[arg]; exists {
|
||||
argFilterMap[arg] = append(argFilterMap[arg], filters...)
|
||||
} else {
|
||||
argFilterMap[arg] = filters
|
||||
}
|
||||
}
|
||||
|
||||
for arg, nodeFilters := range argFilterMap {
|
||||
cfg.Options.K3sOptions.ExtraArgs = append(cfg.Options.K3sOptions.ExtraArgs, conf.K3sArgWithNodeFilters{
|
||||
Arg: arg,
|
||||
NodeFilters: nodeFilters,
|
||||
})
|
||||
}
|
||||
|
||||
// --registry-create
|
||||
if ppViper.IsSet("cli.registries.create") {
|
||||
flagvalue := ppViper.GetString("cli.registries.create")
|
||||
fvSplit := strings.SplitN(flagvalue, ":", 2)
|
||||
if cfg.Registries.Create == nil {
|
||||
cfg.Registries.Create = &conf.SimpleConfigRegistryCreateConfig{}
|
||||
}
|
||||
cfg.Registries.Create.Name = fvSplit[0]
|
||||
if len(fvSplit) > 1 {
|
||||
exposeAPI, err = cliutil.ParsePortExposureSpec(fvSplit[1], "1234") // internal port is unused after all
|
||||
if err != nil {
|
||||
return cfg, fmt.Errorf("failed to registry port spec: %w", err)
|
||||
}
|
||||
cfg.Registries.Create.Host = exposeAPI.Host
|
||||
cfg.Registries.Create.HostPort = exposeAPI.Binding.HostPort
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
@ -1,170 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
cliconfig "github.com/rancher/k3d/v5/cmd/util/config"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
k3dutil "github.com/rancher/k3d/v5/pkg/util"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var clusterDeleteConfigFile string
|
||||
var clusterDeleteCfgViper = viper.New()
|
||||
|
||||
// NewCmdClusterDelete returns a new cobra command
|
||||
func NewCmdClusterDelete() *cobra.Command {
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "delete [NAME [NAME ...] | --all]",
|
||||
Aliases: []string{"del", "rm"},
|
||||
Short: "Delete cluster(s).",
|
||||
Long: `Delete cluster(s).`,
|
||||
Args: cobra.MinimumNArgs(0), // 0 or n arguments; 0 = default cluster name
|
||||
ValidArgsFunction: util.ValidArgsAvailableClusters,
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cliconfig.InitViperWithConfigFile(clusterDeleteCfgViper, clusterDeleteConfigFile)
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
clusters := parseDeleteClusterCmd(cmd, args)
|
||||
|
||||
if len(clusters) == 0 {
|
||||
l.Log().Infoln("No clusters found")
|
||||
} else {
|
||||
for _, c := range clusters {
|
||||
if err := client.ClusterDelete(cmd.Context(), runtimes.SelectedRuntime, c, k3d.ClusterDeleteOpts{SkipRegistryCheck: false}); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
l.Log().Infoln("Removing cluster details from default kubeconfig...")
|
||||
if err := client.KubeconfigRemoveClusterFromDefaultConfig(cmd.Context(), c); err != nil {
|
||||
l.Log().Warnln("Failed to remove cluster details from default kubeconfig")
|
||||
l.Log().Warnln(err)
|
||||
}
|
||||
l.Log().Infoln("Removing standalone kubeconfig file (if there is one)...")
|
||||
configDir, err := k3dutil.GetConfigDirOrCreate()
|
||||
if err != nil {
|
||||
l.Log().Warnf("Failed to delete kubeconfig file: %+v", err)
|
||||
} else {
|
||||
kubeconfigfile := path.Join(configDir, fmt.Sprintf("kubeconfig-%s.yaml", c.Name))
|
||||
if err := os.Remove(kubeconfigfile); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
l.Log().Warnf("Failed to delete kubeconfig file '%s'", kubeconfigfile)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
l.Log().Infof("Successfully deleted cluster %s!", c.Name)
|
||||
}
|
||||
}
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
|
||||
// add flags
|
||||
cmd.Flags().BoolP("all", "a", false, "Delete all existing clusters")
|
||||
|
||||
/***************
|
||||
* Config File *
|
||||
***************/
|
||||
|
||||
cmd.Flags().StringVarP(&clusterDeleteConfigFile, "config", "c", "", "Path of a config file to use")
|
||||
if err := cmd.MarkFlagFilename("config", "yaml", "yml"); err != nil {
|
||||
l.Log().Fatalln("Failed to mark flag 'config' as filename flag")
|
||||
}
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
|
||||
// parseDeleteClusterCmd parses the command input into variables required to delete clusters
|
||||
func parseDeleteClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
|
||||
var clusters []*k3d.Cluster
|
||||
|
||||
// --all
|
||||
all, err := cmd.Flags().GetBool("all")
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// --config
|
||||
if clusterDeleteConfigFile != "" {
|
||||
// not allowed with --all or more args
|
||||
if len(args) > 0 || all {
|
||||
l.Log().Fatalln("failed to delete cluster: cannot use `--config` flag with additional arguments or `--all`")
|
||||
}
|
||||
|
||||
if clusterDeleteCfgViper.GetString("name") == "" {
|
||||
l.Log().Fatalln("failed to delete cluster via config file: no name in config file")
|
||||
}
|
||||
|
||||
c, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterDeleteCfgViper.GetString("name")})
|
||||
if err != nil {
|
||||
l.Log().Fatalf("failed to delete cluster '%s': %v", clusterDeleteCfgViper.GetString("name"), err)
|
||||
}
|
||||
|
||||
clusters = append(clusters, c)
|
||||
return clusters
|
||||
}
|
||||
|
||||
// --all was set
|
||||
if all {
|
||||
l.Log().Infoln("Deleting all clusters...")
|
||||
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
return clusters
|
||||
}
|
||||
|
||||
// args only
|
||||
clusternames := []string{k3d.DefaultClusterName}
|
||||
if len(args) != 0 {
|
||||
clusternames = args
|
||||
}
|
||||
|
||||
for _, name := range clusternames {
|
||||
c, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
if err != nil {
|
||||
if err == client.ClusterGetNoNodesFoundError {
|
||||
continue
|
||||
}
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, c)
|
||||
}
|
||||
|
||||
return clusters
|
||||
}
|
@ -1,124 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
cliutil "github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
conf "github.com/rancher/k3d/v5/pkg/config/v1alpha3"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCmdClusterEdit returns a new cobra command
|
||||
func NewCmdClusterEdit() *cobra.Command {
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "edit CLUSTER",
|
||||
Short: "[EXPERIMENTAL] Edit cluster(s).",
|
||||
Long: `[EXPERIMENTAL] Edit cluster(s).`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
Aliases: []string{"update"},
|
||||
ValidArgsFunction: util.ValidArgsAvailableClusters,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
existingCluster, changeset := parseEditClusterCmd(cmd, args)
|
||||
|
||||
l.Log().Debugf("===== Current =====\n%+v\n===== Changeset =====\n%+v\n", existingCluster, changeset)
|
||||
|
||||
if err := client.ClusterEditChangesetSimple(cmd.Context(), runtimes.SelectedRuntime, existingCluster, changeset); err != nil {
|
||||
l.Log().Fatalf("Failed to update the cluster: %v", err)
|
||||
}
|
||||
|
||||
l.Log().Infof("Successfully updated %s", existingCluster.Name)
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
|
||||
// add flags
|
||||
cmd.Flags().StringArray("port-add", nil, "[EXPERIMENTAL] Map ports from the node containers (via the serverlb) to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d node edit k3d-mycluster-serverlb --port-add 8080:80`")
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
|
||||
// parseEditClusterCmd parses the command input into variables required to delete nodes
|
||||
func parseEditClusterCmd(cmd *cobra.Command, args []string) (*k3d.Cluster, *conf.SimpleConfig) {
|
||||
|
||||
existingCluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: args[0]})
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
if existingCluster == nil {
|
||||
l.Log().Infof("Cluster %s not found", args[0])
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
changeset := conf.SimpleConfig{}
|
||||
|
||||
/*
|
||||
* --port-add
|
||||
*/
|
||||
portFlags, err := cmd.Flags().GetStringArray("port-add")
|
||||
if err != nil {
|
||||
l.Log().Errorln(err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// init portmap
|
||||
changeset.Ports = []conf.PortWithNodeFilters{}
|
||||
|
||||
portFilterMap := make(map[string][]string, 1)
|
||||
for _, portFlag := range portFlags {
|
||||
|
||||
// split node filter from the specified volume
|
||||
portmap, filters, err := cliutil.SplitFiltersFromFlag(portFlag)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// create new entry or append filter to existing entry
|
||||
if _, exists := portFilterMap[portmap]; exists {
|
||||
l.Log().Fatalln("Same Portmapping can not be used for multiple nodes")
|
||||
} else {
|
||||
portFilterMap[portmap] = filters
|
||||
}
|
||||
}
|
||||
|
||||
for port, nodeFilters := range portFilterMap {
|
||||
changeset.Ports = append(changeset.Ports, conf.PortWithNodeFilters{
|
||||
Port: port,
|
||||
NodeFilters: nodeFilters,
|
||||
})
|
||||
}
|
||||
|
||||
l.Log().Tracef("PortFilterMap: %+v", portFilterMap)
|
||||
|
||||
return existingCluster, &changeset
|
||||
}
|
@ -1,182 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
k3cluster "github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/liggitt/tabwriter"
|
||||
)
|
||||
|
||||
// TODO : deal with --all flag to manage differentiate started cluster and stopped cluster like `docker ps` and `docker ps -a`
|
||||
type clusterFlags struct {
|
||||
noHeader bool
|
||||
token bool
|
||||
output string
|
||||
}
|
||||
|
||||
// NewCmdClusterList returns a new cobra command
|
||||
func NewCmdClusterList() *cobra.Command {
|
||||
clusterFlags := clusterFlags{}
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "list [NAME [NAME...]]",
|
||||
Aliases: []string{"ls", "get"},
|
||||
Short: "List cluster(s)",
|
||||
Long: `List cluster(s).`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
clusters := buildClusterList(cmd.Context(), args)
|
||||
PrintClusters(clusters, clusterFlags)
|
||||
},
|
||||
ValidArgsFunction: util.ValidArgsAvailableClusters,
|
||||
}
|
||||
|
||||
// add flags
|
||||
cmd.Flags().BoolVar(&clusterFlags.noHeader, "no-headers", false, "Disable headers")
|
||||
cmd.Flags().BoolVar(&clusterFlags.token, "token", false, "Print k3s cluster token")
|
||||
cmd.Flags().StringVarP(&clusterFlags.output, "output", "o", "", "Output format. One of: json|yaml")
|
||||
|
||||
// add subcommands
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
|
||||
func buildClusterList(ctx context.Context, args []string) []*k3d.Cluster {
|
||||
var clusters []*k3d.Cluster
|
||||
var err error
|
||||
|
||||
if len(args) == 0 {
|
||||
// cluster name not specified : get all clusters
|
||||
clusters, err = k3cluster.ClusterList(ctx, runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
for _, clusterName := range args {
|
||||
// cluster name specified : get specific cluster
|
||||
retrievedCluster, err := k3cluster.ClusterGet(ctx, runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, retrievedCluster)
|
||||
}
|
||||
}
|
||||
|
||||
return clusters
|
||||
}
|
||||
|
||||
// PrintPrintClusters : display list of cluster
|
||||
func PrintClusters(clusters []*k3d.Cluster, flags clusterFlags) {
|
||||
// the output details printed when we dump JSON/YAML
|
||||
type jsonOutput struct {
|
||||
k3d.Cluster
|
||||
ServersRunning int `yaml:"servers_running" json:"serversRunning"`
|
||||
ServersCount int `yaml:"servers_count" json:"serversCount"`
|
||||
AgentsRunning int `yaml:"agents_running" json:"agentsRunning"`
|
||||
AgentsCount int `yaml:"agents_count" json:"agentsCount"`
|
||||
LoadBalancer bool `yaml:"has_lb,omitempty" json:"hasLoadbalancer,omitempty"`
|
||||
}
|
||||
|
||||
jsonOutputEntries := []jsonOutput{}
|
||||
|
||||
outputFormat := strings.ToLower(flags.output)
|
||||
|
||||
tabwriter := tabwriter.NewWriter(os.Stdout, 6, 4, 3, ' ', tabwriter.RememberWidths)
|
||||
defer tabwriter.Flush()
|
||||
|
||||
if outputFormat != "json" && outputFormat != "yaml" {
|
||||
if !flags.noHeader {
|
||||
headers := []string{"NAME", "SERVERS", "AGENTS", "LOADBALANCER"} // TODO: getCluster: add status column
|
||||
if flags.token {
|
||||
headers = append(headers, "TOKEN")
|
||||
}
|
||||
_, err := fmt.Fprintf(tabwriter, "%s\n", strings.Join(headers, "\t"))
|
||||
if err != nil {
|
||||
l.Log().Fatalln("Failed to print headers")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
k3cluster.SortClusters(clusters)
|
||||
|
||||
for _, cluster := range clusters {
|
||||
serverCount, serversRunning := cluster.ServerCountRunning()
|
||||
agentCount, agentsRunning := cluster.AgentCountRunning()
|
||||
hasLB := cluster.HasLoadBalancer()
|
||||
|
||||
if outputFormat == "json" || outputFormat == "yaml" {
|
||||
entry := jsonOutput{
|
||||
Cluster: *cluster,
|
||||
ServersRunning: serversRunning,
|
||||
ServersCount: serverCount,
|
||||
AgentsRunning: agentsRunning,
|
||||
AgentsCount: agentCount,
|
||||
LoadBalancer: hasLB,
|
||||
}
|
||||
|
||||
if !flags.token {
|
||||
entry.Token = ""
|
||||
}
|
||||
|
||||
// clear some things
|
||||
entry.ExternalDatastore = nil
|
||||
|
||||
jsonOutputEntries = append(jsonOutputEntries, entry)
|
||||
} else {
|
||||
if flags.token {
|
||||
fmt.Fprintf(tabwriter, "%s\t%d/%d\t%d/%d\t%t\t%s\n", cluster.Name, serversRunning, serverCount, agentsRunning, agentCount, hasLB, cluster.Token)
|
||||
} else {
|
||||
fmt.Fprintf(tabwriter, "%s\t%d/%d\t%d/%d\t%t\n", cluster.Name, serversRunning, serverCount, agentsRunning, agentCount, hasLB)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if outputFormat == "json" {
|
||||
b, err := json.Marshal(jsonOutputEntries)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
fmt.Println(string(b))
|
||||
} else if outputFormat == "yaml" {
|
||||
b, err := yaml.Marshal(jsonOutputEntries)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
fmt.Println(string(b))
|
||||
}
|
||||
}
|
@ -1,110 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
)
|
||||
|
||||
// NewCmdClusterStart returns a new cobra command
|
||||
func NewCmdClusterStart() *cobra.Command {
|
||||
|
||||
startClusterOpts := types.ClusterStartOpts{
|
||||
Intent: k3d.IntentClusterStart,
|
||||
}
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "start [NAME [NAME...] | --all]",
|
||||
Long: `Start existing k3d cluster(s)`,
|
||||
Short: "Start existing k3d cluster(s)",
|
||||
ValidArgsFunction: util.ValidArgsAvailableClusters,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
clusters := parseStartClusterCmd(cmd, args)
|
||||
if len(clusters) == 0 {
|
||||
l.Log().Infoln("No clusters found")
|
||||
} else {
|
||||
for _, c := range clusters {
|
||||
envInfo, err := client.GatherEnvironmentInfo(cmd.Context(), runtimes.SelectedRuntime, c)
|
||||
if err != nil {
|
||||
l.Log().Fatalf("failed to gather info about cluster environment: %v", err)
|
||||
}
|
||||
startClusterOpts.EnvironmentInfo = envInfo
|
||||
if err := client.ClusterStart(cmd.Context(), runtimes.SelectedRuntime, c, startClusterOpts); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
l.Log().Infof("Started cluster '%s'", c.Name)
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add flags
|
||||
cmd.Flags().BoolP("all", "a", false, "Start all existing clusters")
|
||||
cmd.Flags().BoolVar(&startClusterOpts.WaitForServer, "wait", true, "Wait for the server(s) (and loadbalancer) to be ready before returning.")
|
||||
cmd.Flags().DurationVar(&startClusterOpts.Timeout, "timeout", 0*time.Second, "Maximum waiting time for '--wait' before canceling/returning.")
|
||||
|
||||
// add subcommands
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
|
||||
// parseStartClusterCmd parses the command input into variables required to start clusters
|
||||
func parseStartClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
// --all
|
||||
var clusters []*k3d.Cluster
|
||||
|
||||
if all, err := cmd.Flags().GetBool("all"); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
} else if all {
|
||||
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
return clusters
|
||||
}
|
||||
|
||||
clusternames := []string{k3d.DefaultClusterName}
|
||||
if len(args) != 0 {
|
||||
clusternames = args
|
||||
}
|
||||
|
||||
for _, name := range clusternames {
|
||||
cluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, cluster)
|
||||
}
|
||||
|
||||
return clusters
|
||||
}
|
@ -1,95 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
)
|
||||
|
||||
// NewCmdClusterStop returns a new cobra command
|
||||
func NewCmdClusterStop() *cobra.Command {
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "stop [NAME [NAME...] | --all]",
|
||||
Short: "Stop existing k3d cluster(s)",
|
||||
Long: `Stop existing k3d cluster(s).`,
|
||||
ValidArgsFunction: util.ValidArgsAvailableClusters,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
clusters := parseStopClusterCmd(cmd, args)
|
||||
if len(clusters) == 0 {
|
||||
l.Log().Infoln("No clusters found")
|
||||
} else {
|
||||
for _, c := range clusters {
|
||||
if err := client.ClusterStop(cmd.Context(), runtimes.SelectedRuntime, c); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add flags
|
||||
cmd.Flags().BoolP("all", "a", false, "Stop all existing clusters")
|
||||
|
||||
// add subcommands
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
|
||||
// parseStopClusterCmd parses the command input into variables required to start clusters
|
||||
func parseStopClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
// --all
|
||||
var clusters []*k3d.Cluster
|
||||
|
||||
if all, err := cmd.Flags().GetBool("all"); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
} else if all {
|
||||
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
return clusters
|
||||
}
|
||||
|
||||
clusternames := []string{k3d.DefaultClusterName}
|
||||
if len(args) != 0 {
|
||||
clusternames = args
|
||||
}
|
||||
|
||||
for _, name := range clusternames {
|
||||
cluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, cluster)
|
||||
}
|
||||
|
||||
return clusters
|
||||
}
|
@ -1,47 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package config
|
||||
|
||||
import (
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCmdConfig returns a new cobra command
|
||||
func NewCmdConfig() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "config",
|
||||
Short: "Work with config file(s)",
|
||||
Long: `Work with config file(s)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
cmd.AddCommand(NewCmdConfigInit(), NewCmdConfigMigrate())
|
||||
|
||||
return cmd
|
||||
}
|
@ -1,77 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
config "github.com/rancher/k3d/v5/pkg/config/v1alpha3"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCmdConfigInit returns a new cobra command
|
||||
func NewCmdConfigInit() *cobra.Command {
|
||||
var output string
|
||||
var force bool
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "init",
|
||||
Aliases: []string{"create"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
l.Log().Infoln("COMING SOON: print a basic k3d config with default pre-filled.")
|
||||
if output == "-" {
|
||||
fmt.Println(config.DefaultConfig)
|
||||
} else {
|
||||
// check if file exists
|
||||
var file *os.File
|
||||
var err error
|
||||
_, err = os.Stat(output)
|
||||
if os.IsNotExist(err) || force {
|
||||
// create/overwrite file
|
||||
file, err = os.Create(output)
|
||||
if err != nil {
|
||||
l.Log().Fatalf("Failed to create/overwrite output file: %s", err)
|
||||
}
|
||||
// write content
|
||||
if _, err = file.WriteString(config.DefaultConfig); err != nil {
|
||||
l.Log().Fatalf("Failed to write to output file: %+v", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
l.Log().Fatalf("Failed to stat output file: %+v", err)
|
||||
} else {
|
||||
l.Log().Errorln("Output file exists and --force was not set")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().StringVarP(&output, "output", "o", "k3d-default.yaml", "Write a default k3d config")
|
||||
if err := cmd.MarkFlagFilename("output", "yaml", "yml"); err != nil {
|
||||
l.Log().Fatalf("Failed to mark flag 'output' as filename flag: %v", err)
|
||||
}
|
||||
cmd.Flags().BoolVarP(&force, "force", "f", false, "Force overwrite of target file")
|
||||
|
||||
return cmd
|
||||
}
|
@ -1,112 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/k3d/v5/pkg/config"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// NewCmdConfigMigrate returns a new cobra command
|
||||
func NewCmdConfigMigrate() *cobra.Command {
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "migrate INPUT [OUTPUT]",
|
||||
Aliases: []string{"update"},
|
||||
Args: cobra.RangeArgs(1, 2),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
configFile := args[0]
|
||||
|
||||
if _, err := os.Stat(configFile); err != nil {
|
||||
l.Log().Fatalf("Failed to stat config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
cfgViper := viper.New()
|
||||
cfgViper.SetConfigType("yaml")
|
||||
|
||||
cfgViper.SetConfigFile(configFile)
|
||||
|
||||
// try to read config into memory (viper map structure)
|
||||
if err := cfgViper.ReadInConfig(); err != nil {
|
||||
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
|
||||
l.Log().Fatalf("Config file %s not found: %+v", configFile, err)
|
||||
}
|
||||
// config file found but some other error happened
|
||||
l.Log().Fatalf("Failed to read config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
schema, err := config.GetSchemaByVersion(cfgViper.GetString("apiVersion"))
|
||||
if err != nil {
|
||||
l.Log().Fatalf("Cannot validate config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
if err := config.ValidateSchemaFile(configFile, schema); err != nil {
|
||||
l.Log().Fatalf("Schema Validation failed for config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
l.Log().Infof("Using config file %s (%s#%s)", cfgViper.ConfigFileUsed(), strings.ToLower(cfgViper.GetString("apiVersion")), strings.ToLower(cfgViper.GetString("kind")))
|
||||
|
||||
cfg, err := config.FromViper(cfgViper)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
if cfg.GetAPIVersion() != config.DefaultConfigApiVersion {
|
||||
cfg, err = config.Migrate(cfg, config.DefaultConfigApiVersion)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
yamlout, err := yaml.Marshal(cfg)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
output := "-"
|
||||
|
||||
if len(args) > 1 {
|
||||
output = args[1]
|
||||
}
|
||||
|
||||
if output == "-" {
|
||||
if _, err := os.Stdout.Write(yamlout); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
if err := os.WriteFile(output, yamlout, os.ModePerm); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
@ -1,41 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// NewCmdConfig returns a new cobra command
|
||||
func NewCmdConfigView() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "view",
|
||||
Aliases: []string{"show"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Printf("%+v", viper.AllSettings())
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
@ -1,93 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package debug
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// NewCmdDebug returns a new cobra command
|
||||
func NewCmdDebug() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "debug",
|
||||
Hidden: true,
|
||||
Short: "Debug k3d cluster(s)",
|
||||
Long: `Debug k3d cluster(s)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
cmd.AddCommand(NewCmdDebugLoadbalancer())
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func NewCmdDebugLoadbalancer() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "loadbalancer",
|
||||
Aliases: []string{"lb"},
|
||||
Short: "Debug the loadbalancer",
|
||||
Long: `Debug the loadbalancer`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
cmd.AddCommand(&cobra.Command{
|
||||
Use: "get-config CLUSTERNAME",
|
||||
Args: cobra.ExactArgs(1), // cluster name
|
||||
ValidArgsFunction: util.ValidArgsAvailableClusters,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
c, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &types.Cluster{Name: args[0]})
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
lbconf, err := client.GetLoadbalancerConfig(cmd.Context(), runtimes.SelectedRuntime, c)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
yamlized, err := yaml.Marshal(lbconf)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
fmt.Println(string(yamlized))
|
||||
},
|
||||
})
|
||||
|
||||
return cmd
|
||||
}
|
@ -1,53 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package image
|
||||
|
||||
import (
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCmdImage returns a new cobra command
|
||||
func NewCmdImage() *cobra.Command {
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "image",
|
||||
Aliases: []string{"images"},
|
||||
Short: "Handle container images.",
|
||||
Long: `Handle container images.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
cmd.AddCommand(NewCmdImageImport())
|
||||
|
||||
// add flags
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
@ -1,116 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package image
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
)
|
||||
|
||||
// NewCmdImageImport returns a new cobra command
|
||||
func NewCmdImageImport() *cobra.Command {
|
||||
|
||||
loadImageOpts := k3d.ImageImportOpts{}
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "import [IMAGE | ARCHIVE [IMAGE | ARCHIVE...]]",
|
||||
Short: "Import image(s) from docker into k3d cluster(s).",
|
||||
Long: `Import image(s) from docker into k3d cluster(s).
|
||||
|
||||
If an IMAGE starts with the prefix 'docker.io/', then this prefix is stripped internally.
|
||||
That is, 'docker.io/rancher/k3d-tools:latest' is treated as 'rancher/k3d-tools:latest'.
|
||||
|
||||
If an IMAGE starts with the prefix 'library/' (or 'docker.io/library/'), then this prefix is stripped internally.
|
||||
That is, 'library/busybox:latest' (or 'docker.io/library/busybox:latest') are treated as 'busybox:latest'.
|
||||
|
||||
If an IMAGE does not have a version tag, then ':latest' is assumed.
|
||||
That is, 'rancher/k3d-tools' is treated as 'rancher/k3d-tools:latest'.
|
||||
|
||||
A file ARCHIVE always takes precedence.
|
||||
So if a file './rancher/k3d-tools' exists, k3d will try to import it instead of the IMAGE of the same name.`,
|
||||
Aliases: []string{"load"},
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
images, clusters := parseLoadImageCmd(cmd, args)
|
||||
l.Log().Debugf("Importing image(s) [%+v] from runtime [%s] into cluster(s) [%+v]...", images, runtimes.SelectedRuntime, clusters)
|
||||
errOccured := false
|
||||
for _, cluster := range clusters {
|
||||
l.Log().Infof("Importing image(s) into cluster '%s'", cluster.Name)
|
||||
if err := client.ImageImportIntoClusterMulti(cmd.Context(), runtimes.SelectedRuntime, images, &cluster, loadImageOpts); err != nil {
|
||||
l.Log().Errorf("Failed to import image(s) into cluster '%s': %+v", cluster.Name, err)
|
||||
errOccured = true
|
||||
}
|
||||
}
|
||||
if errOccured {
|
||||
l.Log().Warnln("At least one error occured while trying to import the image(s) into the selected cluster(s)")
|
||||
os.Exit(1)
|
||||
}
|
||||
l.Log().Infof("Successfully imported %d image(s) into %d cluster(s)", len(images), len(clusters))
|
||||
},
|
||||
}
|
||||
|
||||
/*********
|
||||
* Flags *
|
||||
*********/
|
||||
cmd.Flags().StringArrayP("cluster", "c", []string{k3d.DefaultClusterName}, "Select clusters to load the image to.")
|
||||
if err := cmd.RegisterFlagCompletionFunc("cluster", util.ValidArgsAvailableClusters); err != nil {
|
||||
l.Log().Fatalln("Failed to register flag completion for '--cluster'", err)
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVarP(&loadImageOpts.KeepTar, "keep-tarball", "k", false, "Do not delete the tarball containing the saved images from the shared volume")
|
||||
cmd.Flags().BoolVarP(&loadImageOpts.KeepToolsNode, "keep-tools", "t", false, "Do not delete the tools node after import")
|
||||
|
||||
/* Subcommands */
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
|
||||
// parseLoadImageCmd parses the command input into variables required to create a cluster
|
||||
func parseLoadImageCmd(cmd *cobra.Command, args []string) ([]string, []k3d.Cluster) {
|
||||
|
||||
// --cluster
|
||||
clusterNames, err := cmd.Flags().GetStringArray("cluster")
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
clusters := []k3d.Cluster{}
|
||||
for _, clusterName := range clusterNames {
|
||||
clusters = append(clusters, k3d.Cluster{Name: clusterName})
|
||||
}
|
||||
|
||||
// images
|
||||
images := args
|
||||
if len(images) == 0 {
|
||||
l.Log().Fatalln("No images specified!")
|
||||
}
|
||||
|
||||
return images, clusters
|
||||
}
|
@ -1,52 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package kubeconfig
|
||||
|
||||
import (
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCmdKubeconfig returns a new cobra command
|
||||
func NewCmdKubeconfig() *cobra.Command {
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "kubeconfig",
|
||||
Short: "Manage kubeconfig(s)",
|
||||
Long: `Manage kubeconfig(s)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
cmd.AddCommand(NewCmdKubeconfigGet(), NewCmdKubeconfigMerge())
|
||||
|
||||
// add flags
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
@ -1,107 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package kubeconfig
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type getKubeconfigFlags struct {
|
||||
all bool
|
||||
}
|
||||
|
||||
// NewCmdKubeconfigGet returns a new cobra command
|
||||
func NewCmdKubeconfigGet() *cobra.Command {
|
||||
|
||||
writeKubeConfigOptions := client.WriteKubeConfigOptions{
|
||||
UpdateExisting: true,
|
||||
UpdateCurrentContext: true,
|
||||
OverwriteExisting: true,
|
||||
}
|
||||
|
||||
getKubeconfigFlags := getKubeconfigFlags{}
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "get [CLUSTER [CLUSTER [...]] | --all]",
|
||||
Short: "Print kubeconfig(s) from cluster(s).",
|
||||
Long: `Print kubeconfig(s) from cluster(s).`,
|
||||
Aliases: []string{"print", "show"},
|
||||
ValidArgsFunction: util.ValidArgsAvailableClusters,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if (len(args) < 1 && !getKubeconfigFlags.all) || (len(args) > 0 && getKubeconfigFlags.all) {
|
||||
return fmt.Errorf("Need to specify one or more cluster names *or* set `--all` flag")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var clusters []*k3d.Cluster
|
||||
var err error
|
||||
|
||||
// generate list of clusters
|
||||
if getKubeconfigFlags.all {
|
||||
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
for _, clusterName := range args {
|
||||
retrievedCluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, retrievedCluster)
|
||||
}
|
||||
}
|
||||
|
||||
// get kubeconfigs from all clusters
|
||||
errorGettingKubeconfig := false
|
||||
for _, c := range clusters {
|
||||
l.Log().Debugf("Getting kubeconfig for cluster '%s'", c.Name)
|
||||
fmt.Println("---") // YAML document separator
|
||||
if _, err := client.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, c, "-", &writeKubeConfigOptions); err != nil {
|
||||
l.Log().Errorln(err)
|
||||
errorGettingKubeconfig = true
|
||||
}
|
||||
}
|
||||
|
||||
// return with non-zero exit code, if there was an error for one of the clusters
|
||||
if errorGettingKubeconfig {
|
||||
os.Exit(1)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add flags
|
||||
cmd.Flags().BoolVarP(&getKubeconfigFlags.all, "all", "a", false, "Output kubeconfigs from all existing clusters")
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
@ -1,139 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package kubeconfig
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
k3dutil "github.com/rancher/k3d/v5/pkg/util"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
type mergeKubeconfigFlags struct {
|
||||
all bool
|
||||
output string
|
||||
targetDefault bool
|
||||
}
|
||||
|
||||
// NewCmdKubeconfigMerge returns a new cobra command
|
||||
func NewCmdKubeconfigMerge() *cobra.Command {
|
||||
|
||||
writeKubeConfigOptions := client.WriteKubeConfigOptions{}
|
||||
|
||||
mergeKubeconfigFlags := mergeKubeconfigFlags{}
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "merge [CLUSTER [CLUSTER [...]] | --all]",
|
||||
Aliases: []string{"write"},
|
||||
Long: `Write/Merge kubeconfig(s) from cluster(s) into new or existing kubeconfig/file.`,
|
||||
Short: "Write/Merge kubeconfig(s) from cluster(s) into new or existing kubeconfig/file.",
|
||||
ValidArgsFunction: util.ValidArgsAvailableClusters,
|
||||
Args: cobra.MinimumNArgs(0),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var clusters []*k3d.Cluster
|
||||
var err error
|
||||
|
||||
if mergeKubeconfigFlags.targetDefault && mergeKubeconfigFlags.output != "" {
|
||||
l.Log().Fatalln("Cannot use both '--output' and '--kubeconfig-merge-default' at the same time")
|
||||
}
|
||||
|
||||
// generate list of clusters
|
||||
if mergeKubeconfigFlags.all {
|
||||
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
|
||||
clusternames := []string{k3d.DefaultClusterName}
|
||||
if len(args) != 0 {
|
||||
clusternames = args
|
||||
}
|
||||
|
||||
for _, clusterName := range clusternames {
|
||||
retrievedCluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, retrievedCluster)
|
||||
}
|
||||
}
|
||||
|
||||
// get kubeconfigs from all clusters
|
||||
errorGettingKubeconfig := false
|
||||
var outputs []string
|
||||
outputDir, err := k3dutil.GetConfigDirOrCreate()
|
||||
if err != nil {
|
||||
l.Log().Errorln(err)
|
||||
l.Log().Fatalln("Failed to save kubeconfig to local directory")
|
||||
}
|
||||
for _, c := range clusters {
|
||||
l.Log().Debugf("Getting kubeconfig for cluster '%s'", c.Name)
|
||||
output := mergeKubeconfigFlags.output
|
||||
if output == "" && !mergeKubeconfigFlags.targetDefault {
|
||||
output = path.Join(outputDir, fmt.Sprintf("kubeconfig-%s.yaml", c.Name))
|
||||
}
|
||||
output, err = client.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, c, output, &writeKubeConfigOptions)
|
||||
if err != nil {
|
||||
l.Log().Errorln(err)
|
||||
errorGettingKubeconfig = true
|
||||
} else {
|
||||
outputs = append(outputs, output)
|
||||
}
|
||||
}
|
||||
|
||||
// only print kubeconfig file path if output is not stdout ("-")
|
||||
if mergeKubeconfigFlags.output != "-" {
|
||||
fmt.Println(strings.Join(outputs, ":"))
|
||||
}
|
||||
|
||||
// return with non-zero exit code, if there was an error for one of the clusters
|
||||
if errorGettingKubeconfig {
|
||||
os.Exit(1)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add flags
|
||||
cmd.Flags().StringVarP(&mergeKubeconfigFlags.output, "output", "o", "", fmt.Sprintf("Define output [ - | FILE ] (default from $KUBECONFIG or %s", clientcmd.RecommendedHomeFile))
|
||||
if err := cmd.MarkFlagFilename("output"); err != nil {
|
||||
l.Log().Fatalln("Failed to mark flag --output as filename")
|
||||
}
|
||||
cmd.Flags().BoolVarP(&mergeKubeconfigFlags.targetDefault, "kubeconfig-merge-default", "d", false, fmt.Sprintf("Merge into the default kubeconfig ($KUBECONFIG or %s)", clientcmd.RecommendedHomeFile))
|
||||
cmd.Flags().BoolVarP(&writeKubeConfigOptions.UpdateExisting, "update", "u", true, "Update conflicting fields in existing kubeconfig")
|
||||
cmd.Flags().BoolVarP(&writeKubeConfigOptions.UpdateCurrentContext, "kubeconfig-switch-context", "s", true, "Switch to new context")
|
||||
cmd.Flags().BoolVar(&writeKubeConfigOptions.OverwriteExisting, "overwrite", false, "[Careful!] Overwrite existing file, ignoring its contents")
|
||||
cmd.Flags().BoolVarP(&mergeKubeconfigFlags.all, "all", "a", false, "Get kubeconfigs from all existing clusters")
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
@ -1,57 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package node
|
||||
|
||||
import (
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCmdNode returns a new cobra command
|
||||
func NewCmdNode() *cobra.Command {
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "node",
|
||||
Short: "Manage node(s)",
|
||||
Long: `Manage node(s)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
cmd.AddCommand(NewCmdNodeCreate(),
|
||||
NewCmdNodeStart(),
|
||||
NewCmdNodeStop(),
|
||||
NewCmdNodeDelete(),
|
||||
NewCmdNodeList(),
|
||||
NewCmdNodeEdit())
|
||||
|
||||
// add flags
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
@ -1,200 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
dockerunits "github.com/docker/go-units"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
cliutil "github.com/rancher/k3d/v5/cmd/util"
|
||||
k3dc "github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/rancher/k3d/v5/version"
|
||||
)
|
||||
|
||||
// NewCmdNodeCreate returns a new cobra command
|
||||
func NewCmdNodeCreate() *cobra.Command {
|
||||
|
||||
createNodeOpts := k3d.NodeCreateOpts{}
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "create NAME",
|
||||
Short: "Create a new k3s node in docker",
|
||||
Long: `Create a new containerized k3s node (k3s in docker).`,
|
||||
Args: cobra.ExactArgs(1), // exactly one name accepted // TODO: if not specified, inherit from cluster that the node shall belong to, if that is specified
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
nodes, clusterName := parseCreateNodeCmd(cmd, args)
|
||||
if strings.HasPrefix(clusterName, "https://") {
|
||||
l.Log().Infof("Adding %d node(s) to the remote cluster '%s'...", len(nodes), clusterName)
|
||||
if err := k3dc.NodeAddToClusterMultiRemote(cmd.Context(), runtimes.SelectedRuntime, nodes, clusterName, createNodeOpts); err != nil {
|
||||
l.Log().Fatalf("failed to add %d node(s) to the remote cluster '%s': %v", len(nodes), clusterName, err)
|
||||
}
|
||||
} else {
|
||||
l.Log().Infof("Adding %d node(s) to the runtime local cluster '%s'...", len(nodes), clusterName)
|
||||
if err := k3dc.NodeAddToClusterMulti(cmd.Context(), runtimes.SelectedRuntime, nodes, &k3d.Cluster{Name: clusterName}, createNodeOpts); err != nil {
|
||||
l.Log().Fatalf("failed to add %d node(s) to the runtime local cluster '%s': %v", len(nodes), clusterName, err)
|
||||
}
|
||||
}
|
||||
l.Log().Infof("Successfully created %d node(s)!", len(nodes))
|
||||
},
|
||||
}
|
||||
|
||||
// add flags
|
||||
cmd.Flags().Int("replicas", 1, "Number of replicas of this node specification.")
|
||||
cmd.Flags().String("role", string(k3d.AgentRole), "Specify node role [server, agent]")
|
||||
if err := cmd.RegisterFlagCompletionFunc("role", util.ValidArgsNodeRoles); err != nil {
|
||||
l.Log().Fatalln("Failed to register flag completion for '--role'", err)
|
||||
}
|
||||
cmd.Flags().StringP("cluster", "c", k3d.DefaultClusterName, "Cluster URL or k3d cluster name to connect to.")
|
||||
if err := cmd.RegisterFlagCompletionFunc("cluster", util.ValidArgsAvailableClusters); err != nil {
|
||||
l.Log().Fatalln("Failed to register flag completion for '--cluster'", err)
|
||||
}
|
||||
|
||||
cmd.Flags().StringP("image", "i", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), "Specify k3s image used for the node(s)")
|
||||
cmd.Flags().String("memory", "", "Memory limit imposed on the node [From docker]")
|
||||
|
||||
cmd.Flags().BoolVar(&createNodeOpts.Wait, "wait", true, "Wait for the node(s) to be ready before returning.")
|
||||
cmd.Flags().DurationVar(&createNodeOpts.Timeout, "timeout", 0*time.Second, "Maximum waiting time for '--wait' before canceling/returning.")
|
||||
|
||||
cmd.Flags().StringSliceP("runtime-label", "", []string{}, "Specify container runtime labels in format \"foo=bar\"")
|
||||
cmd.Flags().StringSliceP("k3s-node-label", "", []string{}, "Specify k3s node labels in format \"foo=bar\"")
|
||||
|
||||
cmd.Flags().StringSliceP("network", "n", []string{}, "Add node to (another) runtime network")
|
||||
|
||||
cmd.Flags().StringVarP(&createNodeOpts.ClusterToken, "token", "t", "", "Override cluster token (required when connecting to an external cluster)")
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
|
||||
// parseCreateNodeCmd parses the command input into variables required to create a node
|
||||
func parseCreateNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, string) {
|
||||
|
||||
// --replicas
|
||||
replicas, err := cmd.Flags().GetInt("replicas")
|
||||
if err != nil {
|
||||
l.Log().Errorln("No replica count specified")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// --role
|
||||
roleStr, err := cmd.Flags().GetString("role")
|
||||
if err != nil {
|
||||
l.Log().Errorln("No node role specified")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
if _, ok := k3d.NodeRoles[roleStr]; !ok {
|
||||
l.Log().Fatalf("Unknown node role '%s'\n", roleStr)
|
||||
}
|
||||
role := k3d.NodeRoles[roleStr]
|
||||
|
||||
// --image
|
||||
image, err := cmd.Flags().GetString("image")
|
||||
if err != nil {
|
||||
l.Log().Errorln("No image specified")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// --cluster
|
||||
clusterName, err := cmd.Flags().GetString("cluster")
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// --memory
|
||||
memory, err := cmd.Flags().GetString("memory")
|
||||
if err != nil {
|
||||
l.Log().Errorln("No memory specified")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
if _, err := dockerunits.RAMInBytes(memory); memory != "" && err != nil {
|
||||
l.Log().Errorf("Provided memory limit value is invalid")
|
||||
}
|
||||
|
||||
// --runtime-label
|
||||
runtimeLabelsFlag, err := cmd.Flags().GetStringSlice("runtime-label")
|
||||
if err != nil {
|
||||
l.Log().Errorln("No runtime-label specified")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
runtimeLabels := make(map[string]string, len(runtimeLabelsFlag)+1)
|
||||
for _, label := range runtimeLabelsFlag {
|
||||
labelSplitted := strings.Split(label, "=")
|
||||
if len(labelSplitted) != 2 {
|
||||
l.Log().Fatalf("unknown runtime-label format format: %s, use format \"foo=bar\"", label)
|
||||
}
|
||||
cliutil.ValidateRuntimeLabelKey(labelSplitted[0])
|
||||
runtimeLabels[labelSplitted[0]] = labelSplitted[1]
|
||||
}
|
||||
|
||||
// Internal k3d runtime labels take precedence over user-defined labels
|
||||
runtimeLabels[k3d.LabelRole] = roleStr
|
||||
|
||||
// --k3s-node-label
|
||||
k3sNodeLabelsFlag, err := cmd.Flags().GetStringSlice("k3s-node-label")
|
||||
if err != nil {
|
||||
l.Log().Errorln("No k3s-node-label specified")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
k3sNodeLabels := make(map[string]string, len(k3sNodeLabelsFlag))
|
||||
for _, label := range k3sNodeLabelsFlag {
|
||||
labelSplitted := strings.Split(label, "=")
|
||||
if len(labelSplitted) != 2 {
|
||||
l.Log().Fatalf("unknown k3s-node-label format format: %s, use format \"foo=bar\"", label)
|
||||
}
|
||||
k3sNodeLabels[labelSplitted[0]] = labelSplitted[1]
|
||||
}
|
||||
|
||||
// --network
|
||||
networks, err := cmd.Flags().GetStringSlice("network")
|
||||
if err != nil {
|
||||
l.Log().Fatalf("failed to get --network string slice flag: %v", err)
|
||||
}
|
||||
|
||||
// generate list of nodes
|
||||
nodes := []*k3d.Node{}
|
||||
for i := 0; i < replicas; i++ {
|
||||
node := &k3d.Node{
|
||||
Name: fmt.Sprintf("%s-%s-%d", k3d.DefaultObjectNamePrefix, args[0], i),
|
||||
Role: role,
|
||||
Image: image,
|
||||
K3sNodeLabels: k3sNodeLabels,
|
||||
RuntimeLabels: runtimeLabels,
|
||||
Restart: true,
|
||||
Memory: memory,
|
||||
Networks: networks,
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
return nodes, clusterName
|
||||
}
|
@ -1,114 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package node
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type nodeDeleteFlags struct {
|
||||
All bool
|
||||
IncludeRegistries bool
|
||||
}
|
||||
|
||||
// NewCmdNodeDelete returns a new cobra command
|
||||
func NewCmdNodeDelete() *cobra.Command {
|
||||
|
||||
flags := nodeDeleteFlags{}
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "delete (NAME | --all)",
|
||||
Short: "Delete node(s).",
|
||||
Long: `Delete node(s).`,
|
||||
ValidArgsFunction: util.ValidArgsAvailableNodes,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
nodes := parseDeleteNodeCmd(cmd, args, &flags)
|
||||
nodeDeleteOpts := k3d.NodeDeleteOpts{SkipLBUpdate: flags.All} // do not update LB, if we're deleting all nodes anyway
|
||||
|
||||
if len(nodes) == 0 {
|
||||
l.Log().Infoln("No nodes found")
|
||||
} else {
|
||||
for _, node := range nodes {
|
||||
if err := client.NodeDelete(cmd.Context(), runtimes.SelectedRuntime, node, nodeDeleteOpts); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
l.Log().Infof("Successfully deleted %d node(s)!", len(nodes))
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
|
||||
// add flags
|
||||
cmd.Flags().BoolVarP(&flags.All, "all", "a", false, "Delete all existing nodes")
|
||||
cmd.Flags().BoolVarP(&flags.IncludeRegistries, "registries", "r", false, "Also delete registries")
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
|
||||
// parseDeleteNodeCmd parses the command input into variables required to delete nodes
|
||||
func parseDeleteNodeCmd(cmd *cobra.Command, args []string, flags *nodeDeleteFlags) []*k3d.Node {
|
||||
|
||||
var nodes []*k3d.Node
|
||||
var err error
|
||||
|
||||
// --all
|
||||
if flags.All {
|
||||
if !flags.IncludeRegistries {
|
||||
l.Log().Infoln("Didn't set '--registries', so won't delete registries.")
|
||||
}
|
||||
nodes, err = client.NodeList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
include := k3d.ClusterInternalNodeRoles
|
||||
exclude := []k3d.Role{}
|
||||
if flags.IncludeRegistries {
|
||||
include = append(include, k3d.RegistryRole)
|
||||
}
|
||||
nodes = client.NodeFilterByRoles(nodes, include, exclude)
|
||||
return nodes
|
||||
}
|
||||
|
||||
if !flags.All && len(args) < 1 {
|
||||
l.Log().Fatalln("Expecting at least one node name if `--all` is not set")
|
||||
}
|
||||
|
||||
for _, name := range args {
|
||||
node, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: name})
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
return nodes
|
||||
}
|
@ -1,113 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package node
|
||||
|
||||
import (
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCmdNodeEdit returns a new cobra command
|
||||
func NewCmdNodeEdit() *cobra.Command {
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "edit NODE",
|
||||
Short: "[EXPERIMENTAL] Edit node(s).",
|
||||
Long: `[EXPERIMENTAL] Edit node(s).`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
Aliases: []string{"update"},
|
||||
ValidArgsFunction: util.ValidArgsAvailableNodes,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
existingNode, changeset := parseEditNodeCmd(cmd, args)
|
||||
|
||||
l.Log().Debugf("===== Current =====\n%+v\n===== Changeset =====\n%+v\n", existingNode, changeset)
|
||||
|
||||
if err := client.NodeEdit(cmd.Context(), runtimes.SelectedRuntime, existingNode, changeset); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
l.Log().Infof("Successfully updated %s", existingNode.Name)
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
|
||||
// add flags
|
||||
cmd.Flags().StringArray("port-add", nil, "[EXPERIMENTAL] (serverlb only!) Map ports from the node container to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d node edit k3d-mycluster-serverlb --port-add 8080:80`")
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
|
||||
// parseEditNodeCmd parses the command input into variables required to delete nodes
|
||||
func parseEditNodeCmd(cmd *cobra.Command, args []string) (*k3d.Node, *k3d.Node) {
|
||||
|
||||
existingNode, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: args[0]})
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
if existingNode == nil {
|
||||
l.Log().Infof("Node %s not found", args[0])
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if existingNode.Role != k3d.LoadBalancerRole {
|
||||
l.Log().Fatalln("Currently only the loadbalancer can be updated!")
|
||||
}
|
||||
|
||||
changeset := &k3d.Node{}
|
||||
|
||||
/*
|
||||
* --port-add
|
||||
*/
|
||||
portFlags, err := cmd.Flags().GetStringArray("port-add")
|
||||
if err != nil {
|
||||
l.Log().Errorln(err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// init portmap
|
||||
changeset.Ports = nat.PortMap{}
|
||||
|
||||
for _, flag := range portFlags {
|
||||
|
||||
portmappings, err := nat.ParsePortSpec(flag)
|
||||
if err != nil {
|
||||
l.Log().Fatalf("Failed to parse port spec '%s': %+v", flag, err)
|
||||
}
|
||||
|
||||
for _, pm := range portmappings {
|
||||
changeset.Ports[pm.Port] = append(changeset.Ports[pm.Port], pm.Binding)
|
||||
}
|
||||
}
|
||||
|
||||
return existingNode, changeset
|
||||
}
|
@ -1,103 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/liggitt/tabwriter"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type nodeListFlags struct {
|
||||
noHeader bool
|
||||
output string
|
||||
}
|
||||
|
||||
// NewCmdNodeList returns a new cobra command
|
||||
func NewCmdNodeList() *cobra.Command {
|
||||
nodeListFlags := nodeListFlags{}
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "list [NODE [NODE...]]",
|
||||
Aliases: []string{"ls", "get"},
|
||||
Short: "List node(s)",
|
||||
Long: `List node(s).`,
|
||||
Args: cobra.MinimumNArgs(0), // 0 or more; 0 = all
|
||||
ValidArgsFunction: util.ValidArgsAvailableNodes,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
nodes := []*k3d.Node{}
|
||||
for _, name := range args {
|
||||
nodes = append(nodes, &k3d.Node{
|
||||
Name: name,
|
||||
})
|
||||
}
|
||||
|
||||
var existingNodes []*k3d.Node
|
||||
if len(nodes) == 0 { // Option a) no name specified -> get all nodes
|
||||
found, err := client.NodeList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
existingNodes = append(existingNodes, found...)
|
||||
} else { // Option b) cluster name specified -> get specific cluster
|
||||
for _, node := range nodes {
|
||||
found, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, node)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
existingNodes = append(existingNodes, found)
|
||||
}
|
||||
}
|
||||
|
||||
// print existing nodes
|
||||
headers := &[]string{}
|
||||
if !nodeListFlags.noHeader {
|
||||
headers = &[]string{"NAME", "ROLE", "CLUSTER", "STATUS"}
|
||||
}
|
||||
|
||||
util.PrintNodes(existingNodes, nodeListFlags.output,
|
||||
headers, util.NodePrinterFunc(func(tabwriter *tabwriter.Writer, node *k3d.Node) {
|
||||
fmt.Fprintf(tabwriter, "%s\t%s\t%s\t%s\n",
|
||||
strings.TrimPrefix(node.Name, "/"),
|
||||
string(node.Role),
|
||||
node.RuntimeLabels[k3d.LabelClusterName],
|
||||
node.State.Status)
|
||||
}))
|
||||
},
|
||||
}
|
||||
// add flags
|
||||
cmd.Flags().BoolVar(&nodeListFlags.noHeader, "no-headers", false, "Disable headers")
|
||||
cmd.Flags().StringVarP(&nodeListFlags.output, "output", "o", "", "Output format. One of: json|yaml")
|
||||
|
||||
// add subcommands
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
@ -1,61 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package node
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCmdNodeStart returns a new cobra command
|
||||
func NewCmdNodeStart() *cobra.Command {
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "start NODE", // TODO: startNode: allow one or more names or --all
|
||||
Short: "Start an existing k3d node",
|
||||
Long: `Start an existing k3d node.`,
|
||||
ValidArgsFunction: util.ValidArgsAvailableNodes,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
node := parseStartNodeCmd(cmd, args)
|
||||
if err := runtimes.SelectedRuntime.StartNode(cmd.Context(), node); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
|
||||
// parseStartNodeCmd parses the command input into variables required to start a node
|
||||
func parseStartNodeCmd(cmd *cobra.Command, args []string) *k3d.Node {
|
||||
// node name // TODO: startNode: allow node filters, e.g. `k3d node start mycluster@agent` to start all agent nodes of cluster 'mycluster'
|
||||
if len(args) == 0 || len(args[0]) == 0 {
|
||||
l.Log().Fatalln("No node name given")
|
||||
}
|
||||
|
||||
return &k3d.Node{Name: args[0]}
|
||||
}
|
@ -1,62 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package node
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
)
|
||||
|
||||
// NewCmdNodeStop returns a new cobra command
|
||||
func NewCmdNodeStop() *cobra.Command {
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "stop NAME", // TODO: stopNode: allow one or more names or --all",
|
||||
Short: "Stop an existing k3d node",
|
||||
Long: `Stop an existing k3d node.`,
|
||||
ValidArgsFunction: util.ValidArgsAvailableNodes,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
node := parseStopNodeCmd(cmd, args)
|
||||
if err := runtimes.SelectedRuntime.StopNode(cmd.Context(), node); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
|
||||
// parseStopNodeCmd parses the command input into variables required to stop a node
|
||||
func parseStopNodeCmd(cmd *cobra.Command, args []string) *k3d.Node {
|
||||
// node name // TODO: allow node filters, e.g. `k3d node stop mycluster@agent` to stop all agent nodes of cluster 'mycluster'
|
||||
if len(args) == 0 || len(args[0]) == 0 {
|
||||
l.Log().Fatalln("No node name given")
|
||||
}
|
||||
|
||||
return &k3d.Node{Name: args[0]}
|
||||
}
|
@ -1,57 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package registry
|
||||
|
||||
import (
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCmdRegistry returns a new cobra command
|
||||
func NewCmdRegistry() *cobra.Command {
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "registry",
|
||||
Aliases: []string{"registries", "reg"},
|
||||
Short: "Manage registry/registries",
|
||||
Long: `Manage registry/registries`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
l.Log().Errorln("Couldn't get help text")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
cmd.AddCommand(NewCmdRegistryCreate(),
|
||||
NewCmdRegistryStart(),
|
||||
NewCmdRegistryStop(),
|
||||
NewCmdRegistryDelete(),
|
||||
NewCmdRegistryList())
|
||||
|
||||
// add flags
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
@ -1,139 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package registry
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
|
||||
cliutil "github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type regCreatePreProcessedFlags struct {
|
||||
Port string
|
||||
Clusters []string
|
||||
}
|
||||
|
||||
type regCreateFlags struct {
|
||||
Image string
|
||||
NoHelp bool
|
||||
}
|
||||
|
||||
var helptext string = `# You can now use the registry like this (example):
|
||||
# 1. create a new cluster that uses this registry
|
||||
k3d cluster create --registry-use %s
|
||||
|
||||
# 2. tag an existing local image to be pushed to the registry
|
||||
docker tag nginx:latest %s/mynginx:v0.1
|
||||
|
||||
# 3. push that image to the registry
|
||||
docker push %s/mynginx:v0.1
|
||||
|
||||
# 4. run a pod that uses this image
|
||||
kubectl run mynginx --image %s/mynginx:v0.1
|
||||
`
|
||||
|
||||
// NewCmdRegistryCreate returns a new cobra command
|
||||
func NewCmdRegistryCreate() *cobra.Command {
|
||||
|
||||
flags := ®CreateFlags{}
|
||||
ppFlags := ®CreatePreProcessedFlags{}
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "create NAME",
|
||||
Short: "Create a new registry",
|
||||
Long: `Create a new registry.`,
|
||||
Args: cobra.MaximumNArgs(1), // maximum one name accepted
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
reg, clusters := parseCreateRegistryCmd(cmd, args, flags, ppFlags)
|
||||
regNode, err := client.RegistryRun(cmd.Context(), runtimes.SelectedRuntime, reg)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
if err := client.RegistryConnectClusters(cmd.Context(), runtimes.SelectedRuntime, regNode, clusters); err != nil {
|
||||
l.Log().Errorln(err)
|
||||
}
|
||||
l.Log().Infof("Successfully created registry '%s'", reg.Host)
|
||||
regString := fmt.Sprintf("%s:%s", reg.Host, reg.ExposureOpts.Binding.HostPort)
|
||||
if !flags.NoHelp {
|
||||
fmt.Println(fmt.Sprintf(helptext, regString, regString, regString, regString))
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add flags
|
||||
|
||||
// TODO: connecting to clusters requires non-existing config reload functionality in containerd
|
||||
cmd.Flags().StringArrayVarP(&ppFlags.Clusters, "cluster", "c", nil, "[NotReady] Select the cluster(s) that the registry shall connect to.")
|
||||
if err := cmd.RegisterFlagCompletionFunc("cluster", cliutil.ValidArgsAvailableClusters); err != nil {
|
||||
l.Log().Fatalln("Failed to register flag completion for '--cluster'", err)
|
||||
}
|
||||
if err := cmd.Flags().MarkHidden("cluster"); err != nil {
|
||||
l.Log().Fatalln("Failed to hide --cluster flag on registry create command")
|
||||
}
|
||||
|
||||
cmd.Flags().StringVarP(&flags.Image, "image", "i", fmt.Sprintf("%s:%s", k3d.DefaultRegistryImageRepo, k3d.DefaultRegistryImageTag), "Specify image used for the registry")
|
||||
|
||||
cmd.Flags().StringVarP(&ppFlags.Port, "port", "p", "random", "Select which port the registry should be listening on on your machine (localhost) (Format: `[HOST:]HOSTPORT`)\n - Example: `k3d registry create --port 0.0.0.0:5111`")
|
||||
|
||||
cmd.Flags().BoolVar(&flags.NoHelp, "no-help", false, "Disable the help text (How-To use the registry)")
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
|
||||
// parseCreateRegistryCmd parses the command input into variables required to create a registry
|
||||
func parseCreateRegistryCmd(cmd *cobra.Command, args []string, flags *regCreateFlags, ppFlags *regCreatePreProcessedFlags) (*k3d.Registry, []*k3d.Cluster) {
|
||||
|
||||
// --cluster
|
||||
clusters := []*k3d.Cluster{}
|
||||
for _, name := range ppFlags.Clusters {
|
||||
clusters = append(clusters,
|
||||
&k3d.Cluster{
|
||||
Name: name,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// --port
|
||||
exposePort, err := cliutil.ParsePortExposureSpec(ppFlags.Port, k3d.DefaultRegistryPort)
|
||||
if err != nil {
|
||||
l.Log().Errorln("Failed to parse registry port")
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
|
||||
// set the name for the registry node
|
||||
registryName := ""
|
||||
if len(args) > 0 {
|
||||
registryName = fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, args[0])
|
||||
}
|
||||
|
||||
return &k3d.Registry{Host: registryName, Image: flags.Image, ExposureOpts: *exposePort}, clusters
|
||||
}
|
@ -1,102 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package registry
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type registryDeleteFlags struct {
|
||||
All bool
|
||||
}
|
||||
|
||||
// NewCmdRegistryDelete returns a new cobra command
|
||||
func NewCmdRegistryDelete() *cobra.Command {
|
||||
|
||||
flags := registryDeleteFlags{}
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "delete (NAME | --all)",
|
||||
Short: "Delete registry/registries.",
|
||||
Long: `Delete registry/registries.`,
|
||||
Aliases: []string{"del", "rm"},
|
||||
ValidArgsFunction: util.ValidArgsAvailableRegistries,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
nodes := parseRegistryDeleteCmd(cmd, args, &flags)
|
||||
|
||||
if len(nodes) == 0 {
|
||||
l.Log().Infoln("No registries found")
|
||||
} else {
|
||||
for _, node := range nodes {
|
||||
if err := client.NodeDelete(cmd.Context(), runtimes.SelectedRuntime, node, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
|
||||
// add flags
|
||||
cmd.Flags().BoolVarP(&flags.All, "all", "a", false, "Delete all existing registries")
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
||||
|
||||
// parseRegistryDeleteCmd parses the command input into variables required to delete nodes
|
||||
func parseRegistryDeleteCmd(cmd *cobra.Command, args []string, flags *registryDeleteFlags) []*k3d.Node {
|
||||
|
||||
var nodes []*k3d.Node
|
||||
var err error
|
||||
|
||||
if flags.All {
|
||||
nodes, err = client.NodeList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
if !flags.All && len(args) < 1 {
|
||||
l.Log().Fatalln("Expecting at least one registry name if `--all` is not set")
|
||||
}
|
||||
|
||||
for _, name := range args {
|
||||
node, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: name})
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
nodes = client.NodeFilterByRoles(nodes, []k3d.Role{k3d.RegistryRole}, []k3d.Role{})
|
||||
|
||||
return nodes
|
||||
}
|
@ -1,113 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package registry
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/liggitt/tabwriter"
|
||||
"github.com/rancher/k3d/v5/cmd/util"
|
||||
"github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type registryListFlags struct {
|
||||
noHeader bool
|
||||
output string
|
||||
}
|
||||
|
||||
// NewCmdRegistryList creates a new cobra command
|
||||
func NewCmdRegistryList() *cobra.Command {
|
||||
registryListFlags := registryListFlags{}
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "list [NAME [NAME...]]",
|
||||
Aliases: []string{"ls", "get"},
|
||||
Short: "List registries",
|
||||
Long: `List registries.`,
|
||||
Args: cobra.MinimumNArgs(0), // 0 or more; 0 = all
|
||||
ValidArgsFunction: util.ValidArgsAvailableRegistries,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var existingNodes []*k3d.Node
|
||||
|
||||
nodes := []*k3d.Node{}
|
||||
for _, name := range args {
|
||||
nodes = append(nodes, &k3d.Node{
|
||||
Name: name,
|
||||
})
|
||||
}
|
||||
|
||||
if len(nodes) == 0 { // Option a) no name specified -> get all registries
|
||||
found, err := client.NodeList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
existingNodes = append(existingNodes, found...)
|
||||
} else { // Option b) registry name(s) specified -> get specific registries
|
||||
for _, node := range nodes {
|
||||
l.Log().Tracef("Node %s", node.Name)
|
||||
found, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, node)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
existingNodes = append(existingNodes, found)
|
||||
}
|
||||
}
|
||||
existingNodes = client.NodeFilterByRoles(existingNodes, []k3d.Role{k3d.RegistryRole}, []k3d.Role{})
|
||||
|
||||
// print existing registries
|
||||
headers := &[]string{}
|
||||
if !registryListFlags.noHeader {
|
||||
headers = &[]string{"NAME", "ROLE", "CLUSTER", "STATUS"}
|
||||
}
|
||||
|
||||
util.PrintNodes(existingNodes, registryListFlags.output,
|
||||
headers, util.NodePrinterFunc(func(tabwriter *tabwriter.Writer, node *k3d.Node) {
|
||||
cluster := "*"
|
||||
if _, ok := node.RuntimeLabels[k3d.LabelClusterName]; ok {
|
||||
cluster = node.RuntimeLabels[k3d.LabelClusterName]
|
||||
}
|
||||
fmt.Fprintf(tabwriter, "%s\t%s\t%s\t%s\n",
|
||||
strings.TrimPrefix(node.Name, "/"),
|
||||
string(node.Role),
|
||||
cluster,
|
||||
node.State.Status,
|
||||
)
|
||||
}),
|
||||
)
|
||||
},
|
||||
}
|
||||
|
||||
// add flags
|
||||
cmd.Flags().BoolVar(®istryListFlags.noHeader, "no-headers", false, "Disable headers")
|
||||
cmd.Flags().StringVarP(®istryListFlags.output, "output", "o", "", "Output format. One of: json|yaml")
|
||||
|
||||
// add subcommands
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package registry
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
// NewCmdRegistryStart creates a new cobra command
|
||||
func NewCmdRegistryStart() *cobra.Command {
|
||||
return &cobra.Command{}
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package registry
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
// NewCmdRegistryStop creates a new cobra command
|
||||
func NewCmdRegistryStop() *cobra.Command {
|
||||
return &cobra.Command{}
|
||||
}
|
295
cmd/root.go
295
cmd/root.go
@ -1,295 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/rancher/k3d/v5/cmd/cluster"
|
||||
cfg "github.com/rancher/k3d/v5/cmd/config"
|
||||
"github.com/rancher/k3d/v5/cmd/debug"
|
||||
"github.com/rancher/k3d/v5/cmd/image"
|
||||
"github.com/rancher/k3d/v5/cmd/kubeconfig"
|
||||
"github.com/rancher/k3d/v5/cmd/node"
|
||||
"github.com/rancher/k3d/v5/cmd/registry"
|
||||
cliutil "github.com/rancher/k3d/v5/cmd/util"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v5/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus/hooks/writer"
|
||||
)
|
||||
|
||||
// RootFlags describes a struct that holds flags that can be set on root level of the command
|
||||
type RootFlags struct {
|
||||
debugLogging bool
|
||||
traceLogging bool
|
||||
timestampedLogging bool
|
||||
version bool
|
||||
}
|
||||
|
||||
var flags = RootFlags{}
|
||||
|
||||
func NewCmdK3d() *cobra.Command {
|
||||
|
||||
// rootCmd represents the base command when called without any subcommands
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "k3d",
|
||||
Short: "https://k3d.io/ -> Run k3s in Docker!",
|
||||
Long: `https://k3d.io/
|
||||
k3d is a wrapper CLI that helps you to easily create k3s clusters inside docker.
|
||||
Nodes of a k3d cluster are docker containers running a k3s image.
|
||||
All Nodes of a k3d cluster are part of the same docker network.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if flags.version {
|
||||
printVersion()
|
||||
} else {
|
||||
if err := cmd.Usage(); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
rootCmd.PersistentFlags().BoolVar(&flags.debugLogging, "verbose", false, "Enable verbose output (debug logging)")
|
||||
rootCmd.PersistentFlags().BoolVar(&flags.traceLogging, "trace", false, "Enable super verbose output (trace logging)")
|
||||
rootCmd.PersistentFlags().BoolVar(&flags.timestampedLogging, "timestamps", false, "Enable Log timestamps")
|
||||
|
||||
// add local flags
|
||||
rootCmd.Flags().BoolVar(&flags.version, "version", false, "Show k3d and default k3s version")
|
||||
|
||||
// add subcommands
|
||||
rootCmd.AddCommand(NewCmdCompletion(rootCmd),
|
||||
cluster.NewCmdCluster(),
|
||||
kubeconfig.NewCmdKubeconfig(),
|
||||
node.NewCmdNode(),
|
||||
image.NewCmdImage(),
|
||||
cfg.NewCmdConfig(),
|
||||
registry.NewCmdRegistry(),
|
||||
debug.NewCmdDebug(),
|
||||
&cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Show k3d and default k3s version",
|
||||
Long: "Show k3d and default k3s version",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
printVersion()
|
||||
},
|
||||
},
|
||||
&cobra.Command{
|
||||
Use: "runtime-info",
|
||||
Short: "Show runtime information",
|
||||
Long: "Show some information about the runtime environment (e.g. docker info)",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
info, err := runtimes.SelectedRuntime.Info()
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
err = yaml.NewEncoder(os.Stdout).Encode(info)
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
},
|
||||
Hidden: true,
|
||||
})
|
||||
|
||||
// Init
|
||||
cobra.OnInitialize(initLogging, initRuntime)
|
||||
|
||||
return rootCmd
|
||||
}
|
||||
|
||||
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
||||
func Execute() {
|
||||
cmd := NewCmdK3d()
|
||||
if len(os.Args) > 1 {
|
||||
parts := os.Args[1:]
|
||||
// Check if it's a built-in command, else try to execute it as a plugin
|
||||
if _, _, err := cmd.Find(parts); err != nil {
|
||||
pluginFound, err := cliutil.HandlePlugin(context.Background(), parts)
|
||||
if err != nil {
|
||||
l.Log().Errorf("Failed to execute plugin '%+v'", parts)
|
||||
l.Log().Fatalln(err)
|
||||
} else if pluginFound {
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := cmd.Execute(); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
// initLogging initializes the logger
|
||||
func initLogging() {
|
||||
if flags.traceLogging {
|
||||
l.Log().SetLevel(logrus.TraceLevel)
|
||||
} else if flags.debugLogging {
|
||||
l.Log().SetLevel(logrus.DebugLevel)
|
||||
} else {
|
||||
switch logLevel := strings.ToUpper(os.Getenv("LOG_LEVEL")); logLevel {
|
||||
case "TRACE":
|
||||
l.Log().SetLevel(logrus.TraceLevel)
|
||||
case "DEBUG":
|
||||
l.Log().SetLevel(logrus.DebugLevel)
|
||||
case "WARN":
|
||||
l.Log().SetLevel(logrus.WarnLevel)
|
||||
case "ERROR":
|
||||
l.Log().SetLevel(logrus.ErrorLevel)
|
||||
default:
|
||||
l.Log().SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
}
|
||||
l.Log().SetOutput(io.Discard)
|
||||
l.Log().AddHook(&writer.Hook{
|
||||
Writer: os.Stderr,
|
||||
LogLevels: []logrus.Level{
|
||||
logrus.PanicLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.ErrorLevel,
|
||||
logrus.WarnLevel,
|
||||
},
|
||||
})
|
||||
l.Log().AddHook(&writer.Hook{
|
||||
Writer: os.Stdout,
|
||||
LogLevels: []logrus.Level{
|
||||
logrus.InfoLevel,
|
||||
logrus.DebugLevel,
|
||||
logrus.TraceLevel,
|
||||
},
|
||||
})
|
||||
|
||||
formatter := &logrus.TextFormatter{
|
||||
ForceColors: true,
|
||||
}
|
||||
|
||||
if flags.timestampedLogging || os.Getenv("LOG_TIMESTAMPS") != "" {
|
||||
formatter.FullTimestamp = true
|
||||
}
|
||||
|
||||
l.Log().SetFormatter(formatter)
|
||||
|
||||
}
|
||||
|
||||
func initRuntime() {
|
||||
runtime, err := runtimes.GetRuntime("docker")
|
||||
if err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
runtimes.SelectedRuntime = runtime
|
||||
if rtinfo, err := runtime.Info(); err == nil {
|
||||
l.Log().Debugf("Runtime Info:\n%+v", rtinfo)
|
||||
}
|
||||
}
|
||||
|
||||
func printVersion() {
|
||||
fmt.Printf("k3d version %s\n", version.GetVersion())
|
||||
fmt.Printf("k3s version %s (default)\n", version.K3sVersion)
|
||||
}
|
||||
|
||||
// NewCmdCompletion creates a new completion command
|
||||
func NewCmdCompletion(rootCmd *cobra.Command) *cobra.Command {
|
||||
|
||||
completionFunctions := map[string]func(io.Writer) error{
|
||||
"bash": rootCmd.GenBashCompletion,
|
||||
"zsh": func(writer io.Writer) error {
|
||||
if err := rootCmd.GenZshCompletion(writer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprintf(writer, "\n# source completion file\ncompdef _k3d k3d\n")
|
||||
|
||||
return nil
|
||||
},
|
||||
"psh": rootCmd.GenPowerShellCompletion,
|
||||
"powershell": rootCmd.GenPowerShellCompletionWithDesc,
|
||||
"fish": func(writer io.Writer) error {
|
||||
return rootCmd.GenFishCompletion(writer, true)
|
||||
},
|
||||
}
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "completion SHELL",
|
||||
Short: "Generate completion scripts for [bash, zsh, fish, powershell | psh]",
|
||||
Long: `To load completions:
|
||||
|
||||
Bash:
|
||||
|
||||
$ source <(k3d completion bash)
|
||||
|
||||
# To load completions for each session, execute once:
|
||||
# Linux:
|
||||
$ k3d completion bash > /etc/bash_completion.d/k3d
|
||||
# macOS:
|
||||
$ k3d completion bash > /usr/local/etc/bash_completion.d/k3d
|
||||
|
||||
Zsh:
|
||||
|
||||
# If shell completion is not already enabled in your environment,
|
||||
# you will need to enable it. You can execute the following once:
|
||||
|
||||
$ echo "autoload -U compinit; compinit" >> ~/.zshrc
|
||||
|
||||
# To load completions for each session, execute once:
|
||||
$ k3d completion zsh > "${fpath[1]}/k3d"
|
||||
|
||||
# You will need to start a new shell for this setup to take effect.
|
||||
|
||||
fish:
|
||||
|
||||
$ k3d completion fish | source
|
||||
|
||||
# To load completions for each session, execute once:
|
||||
$ k3d completion fish > ~/.config/fish/completions/k3d.fish
|
||||
|
||||
PowerShell:
|
||||
|
||||
PS> k3d completion powershell | Out-String | Invoke-Expression
|
||||
|
||||
# To load completions for every new session, run:
|
||||
PS> k3d completion powershell > k3d.ps1
|
||||
# and source this file from your PowerShell profile.
|
||||
`,
|
||||
ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
|
||||
ArgAliases: []string{"psh"},
|
||||
DisableFlagsInUseLine: true,
|
||||
Args: cobra.ExactValidArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if completionFunc, ok := completionFunctions[args[0]]; ok {
|
||||
if err := completionFunc(os.Stdout); err != nil {
|
||||
l.Log().Fatalf("Failed to generate completion script for shell '%s'", args[0])
|
||||
}
|
||||
return
|
||||
}
|
||||
l.Log().Fatalf("Shell '%s' not supported for completion", args[0])
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
@ -1,124 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
k3dcluster "github.com/rancher/k3d/v5/pkg/client"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// ValidArgsAvailableClusters is used for shell completion: proposes the list of existing clusters
|
||||
func ValidArgsAvailableClusters(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
|
||||
var completions []string
|
||||
var clusters []*k3d.Cluster
|
||||
clusters, err := k3dcluster.ClusterList(context.Background(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Errorln("Failed to get list of clusters for shell completion")
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
clusterLoop:
|
||||
for _, cluster := range clusters {
|
||||
for _, arg := range args {
|
||||
if arg == cluster.Name { // only clusters, that are not in the args yet
|
||||
continue clusterLoop
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(cluster.Name, toComplete) {
|
||||
completions = append(completions, cluster.Name)
|
||||
}
|
||||
}
|
||||
return completions, cobra.ShellCompDirectiveDefault
|
||||
}
|
||||
|
||||
// ValidArgsAvailableNodes is used for shell completion: proposes the list of existing nodes
|
||||
func ValidArgsAvailableNodes(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
|
||||
var completions []string
|
||||
var nodes []*k3d.Node
|
||||
nodes, err := k3dcluster.NodeList(context.Background(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Errorln("Failed to get list of nodes for shell completion")
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
nodeLoop:
|
||||
for _, node := range nodes {
|
||||
for _, arg := range args {
|
||||
if arg == node.Name { // only nodes, that are not in the args yet
|
||||
continue nodeLoop
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(node.Name, toComplete) {
|
||||
completions = append(completions, node.Name)
|
||||
}
|
||||
}
|
||||
return completions, cobra.ShellCompDirectiveDefault
|
||||
}
|
||||
|
||||
// ValidArgsAvailableRegistries is used for shell completions: proposes the list of existing registries
|
||||
func ValidArgsAvailableRegistries(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
|
||||
var completions []string
|
||||
var nodes []*k3d.Node
|
||||
nodes, err := k3dcluster.NodeList(context.Background(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
l.Log().Errorln("Failed to get list of nodes for shell completion")
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
nodes = k3dcluster.NodeFilterByRoles(nodes, []k3d.Role{k3d.RegistryRole}, []k3d.Role{})
|
||||
|
||||
nodeLoop:
|
||||
for _, node := range nodes {
|
||||
for _, arg := range args {
|
||||
if arg == node.Name { // only nodes, that are not in the args yet
|
||||
continue nodeLoop
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(node.Name, toComplete) {
|
||||
completions = append(completions, node.Name)
|
||||
}
|
||||
}
|
||||
return completions, cobra.ShellCompDirectiveDefault
|
||||
}
|
||||
|
||||
// ValidArgsNodeRoles is used for shell completion: proposes the list of possible node roles
|
||||
func ValidArgsNodeRoles(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
|
||||
var completions []string
|
||||
roles := []string{string(k3d.ServerRole), string(k3d.AgentRole)}
|
||||
|
||||
for _, role := range roles {
|
||||
if strings.HasPrefix(role, toComplete) {
|
||||
completions = append(completions, role)
|
||||
}
|
||||
}
|
||||
return completions, cobra.ShellCompDirectiveDefault
|
||||
}
|
@ -1,97 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/k3d/v5/pkg/config"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/viper"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
func InitViperWithConfigFile(cfgViper *viper.Viper, configFile string) error {
|
||||
|
||||
// viper for the general config (file, env and non pre-processed flags)
|
||||
cfgViper.SetEnvPrefix("K3D")
|
||||
cfgViper.AutomaticEnv()
|
||||
|
||||
cfgViper.SetConfigType("yaml")
|
||||
|
||||
// Set config file, if specified
|
||||
if configFile != "" {
|
||||
|
||||
if _, err := os.Stat(configFile); err != nil {
|
||||
l.Log().Fatalf("Failed to stat config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
// create temporary file to expand environment variables in the config without writing that back to the original file
|
||||
// we're doing it here, because this happens just before absolutely all other processing
|
||||
tmpfile, err := os.CreateTemp(os.TempDir(), fmt.Sprintf("k3d-config-tmp-%s", filepath.Base(configFile)))
|
||||
if err != nil {
|
||||
l.Log().Fatalf("error creating temp copy of configfile %s for variable expansion: %v", configFile, err)
|
||||
}
|
||||
defer tmpfile.Close()
|
||||
|
||||
originalcontent, err := os.ReadFile(configFile)
|
||||
if err != nil {
|
||||
l.Log().Fatalf("error reading config file %s: %v", configFile, err)
|
||||
}
|
||||
expandedcontent := os.ExpandEnv(string(originalcontent))
|
||||
if _, err := tmpfile.WriteString(expandedcontent); err != nil {
|
||||
l.Log().Fatalf("error writing expanded config file contents to temp file %s: %v", tmpfile.Name(), err)
|
||||
}
|
||||
|
||||
// use temp file with expanded variables
|
||||
cfgViper.SetConfigFile(tmpfile.Name())
|
||||
|
||||
// try to read config into memory (viper map structure)
|
||||
if err := cfgViper.ReadInConfig(); err != nil {
|
||||
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
|
||||
l.Log().Fatalf("Config file %s not found: %+v", configFile, err)
|
||||
}
|
||||
// config file found but some other error happened
|
||||
l.Log().Fatalf("Failed to read config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
schema, err := config.GetSchemaByVersion(cfgViper.GetString("apiVersion"))
|
||||
if err != nil {
|
||||
l.Log().Fatalf("Cannot validate config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
if err := config.ValidateSchemaFile(tmpfile.Name(), schema); err != nil {
|
||||
l.Log().Fatalf("Schema Validation failed for config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
l.Log().Infof("Using config file %s (%s#%s)", configFile, strings.ToLower(cfgViper.GetString("apiVersion")), strings.ToLower(cfgViper.GetString("kind")))
|
||||
}
|
||||
if l.Log().GetLevel() >= logrus.DebugLevel {
|
||||
c, _ := yaml.Marshal(cfgViper.AllSettings())
|
||||
l.Log().Debugf("Configuration:\n%s", c)
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,79 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
)
|
||||
|
||||
// SplitFiltersFromFlag separates a flag's value from the node filter, if there is one
|
||||
func SplitFiltersFromFlag(flag string) (string, []string, error) {
|
||||
|
||||
/* Case 1) no filter specified */
|
||||
|
||||
if !strings.Contains(flag, "@") {
|
||||
return flag, nil, nil
|
||||
}
|
||||
|
||||
/* Case 2) filter indicated using '@' in flag */
|
||||
|
||||
split := strings.Split(flag, "@")
|
||||
newsplit := []string{}
|
||||
buffer := ""
|
||||
|
||||
for i, it := range split {
|
||||
|
||||
// Case 1: There's a '\' just before the '@' sign -> Should it be escaped (aka be a literal '@')?
|
||||
if strings.HasSuffix(it, "\\") && i != len(split)-1 {
|
||||
// Case 1.1: Escaped backslash
|
||||
if strings.HasSuffix(it, "\\\\") {
|
||||
it = strings.TrimSuffix(it, "\\")
|
||||
l.Log().Warnf("The part '%s' of the flag input '%s' ends with a double backslash, so we assume you want to escape the backslash before the '@'. That's the only time we do this.", it, flag)
|
||||
} else {
|
||||
// Case 1.2: Unescaped backslash -> Escaping the '@' -> remove suffix and append it to buffer, followed by the escaped @ sign
|
||||
l.Log().Tracef("Item '%s' just before an '@' ends with '\\', so we assume it's escaping a literal '@'", it)
|
||||
buffer += strings.TrimSuffix(it, "\\") + "@"
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Case 2: There's no '\': append item to buffer, save it to new slice, empty buffer and continue
|
||||
newsplit = append(newsplit, buffer+it)
|
||||
buffer = ""
|
||||
continue
|
||||
}
|
||||
|
||||
// max number of pieces after split = 2 (only one @ allowed in flag)
|
||||
if len(newsplit) > 2 {
|
||||
return "", nil, fmt.Errorf("Invalid flag '%s': only one unescaped '@' allowed for node filter(s) (Escape literal '@' with '\\')", flag)
|
||||
}
|
||||
|
||||
// trailing or leading '@'
|
||||
if len(newsplit) < 2 {
|
||||
return "", nil, fmt.Errorf("Invalid flag '%s' includes unescaped '@' but is missing a node filter (Escape literal '@' with '\\')", flag)
|
||||
}
|
||||
|
||||
return newsplit[0], strings.Split(newsplit[1], ";"), nil
|
||||
|
||||
}
|
@ -1,89 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package util
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/liggitt/tabwriter"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type NodePrinter interface {
|
||||
Print(*tabwriter.Writer, *k3d.Node)
|
||||
}
|
||||
|
||||
type NodePrinterFunc func(*tabwriter.Writer, *k3d.Node)
|
||||
|
||||
func (npf NodePrinterFunc) Print(writter *tabwriter.Writer, node *k3d.Node) {
|
||||
npf(writter, node)
|
||||
}
|
||||
|
||||
// PrintNodes prints a list of nodes, either as a table or as a JSON/YAML listing
|
||||
func PrintNodes(nodes []*k3d.Node, outputFormat string, headers *[]string, nodePrinter NodePrinter) {
|
||||
outputFormat = strings.ToLower(outputFormat)
|
||||
|
||||
tabwriter := tabwriter.NewWriter(os.Stdout, 6, 4, 3, ' ', tabwriter.RememberWidths)
|
||||
defer tabwriter.Flush()
|
||||
|
||||
if outputFormat != "json" && outputFormat != "yaml" {
|
||||
if headers != nil {
|
||||
_, err := fmt.Fprintf(tabwriter, "%s\n", strings.Join(*headers, "\t"))
|
||||
if err != nil {
|
||||
l.Log().Fatalln("Failed to print headers")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(nodes, func(i, j int) bool {
|
||||
return nodes[i].Name < nodes[j].Name
|
||||
})
|
||||
|
||||
if outputFormat == "json" || outputFormat == "yaml" {
|
||||
var b []byte
|
||||
var err error
|
||||
|
||||
switch outputFormat {
|
||||
case "json":
|
||||
b, err = json.Marshal(nodes)
|
||||
case "yaml":
|
||||
b, err = yaml.Marshal(nodes)
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
fmt.Println(string(b))
|
||||
} else {
|
||||
for _, node := range nodes {
|
||||
if !(outputFormat == "json" || outputFormat == "yaml") {
|
||||
nodePrinter.Print(tabwriter, node)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,84 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
)
|
||||
|
||||
// HandlePlugin takes care of finding and executing a plugin based on the longest prefix
|
||||
func HandlePlugin(ctx context.Context, args []string) (bool, error) {
|
||||
argsPrefix := []string{}
|
||||
|
||||
for _, arg := range args {
|
||||
if strings.HasPrefix(arg, "-") {
|
||||
continue // drop flags
|
||||
}
|
||||
argsPrefix = append(argsPrefix, strings.ReplaceAll(arg, "-", "_")) // plugin executables assumed to have underscores
|
||||
}
|
||||
|
||||
execPath := ""
|
||||
|
||||
for len(argsPrefix) > 0 {
|
||||
path, found := FindPlugin(strings.Join(argsPrefix, "-"))
|
||||
|
||||
if !found {
|
||||
argsPrefix = argsPrefix[:len(argsPrefix)-1] // drop last element
|
||||
continue
|
||||
}
|
||||
|
||||
execPath = path
|
||||
break
|
||||
}
|
||||
|
||||
if execPath == "" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, ExecPlugin(ctx, execPath, args[len(argsPrefix):], os.Environ())
|
||||
|
||||
}
|
||||
|
||||
// FindPlugin tries to find the plugin executable on the filesystem
|
||||
func FindPlugin(name string) (string, bool) {
|
||||
path, err := exec.LookPath(fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, name))
|
||||
if err == nil && len(path) > 0 {
|
||||
return path, true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// ExecPlugin executes a found plugin
|
||||
func ExecPlugin(ctx context.Context, path string, args []string, env []string) error {
|
||||
cmd := exec.CommandContext(ctx, path, args...)
|
||||
cmd.Env = env
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stdin = os.Stdin
|
||||
return cmd.Run()
|
||||
}
|
@ -1,125 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
k3d "github.com/rancher/k3d/v5/pkg/types"
|
||||
"github.com/rancher/k3d/v5/pkg/util"
|
||||
)
|
||||
|
||||
var apiPortRegexp = regexp.MustCompile(`^(?P<hostref>(?P<hostip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})|(?P<hostname>\S+):)?(?P<port>(\d{1,5}|random))$`)
|
||||
|
||||
// ParsePortExposureSpec parses/validates a string to create an exposePort struct from it
|
||||
func ParsePortExposureSpec(exposedPortSpec, internalPort string) (*k3d.ExposureOpts, error) {
|
||||
|
||||
match := apiPortRegexp.FindStringSubmatch(exposedPortSpec)
|
||||
|
||||
if len(match) == 0 {
|
||||
return nil, fmt.Errorf("Failed to parse Port Exposure specification '%s': Format must be [(HostIP|HostName):]HostPort", exposedPortSpec)
|
||||
}
|
||||
|
||||
submatches := util.MapSubexpNames(apiPortRegexp.SubexpNames(), match)
|
||||
|
||||
// no port specified (or not matched via regex)
|
||||
if submatches["port"] == "" {
|
||||
return nil, fmt.Errorf("Failed to find port in Port Exposure spec '%s'", exposedPortSpec)
|
||||
}
|
||||
|
||||
api := &k3d.ExposureOpts{}
|
||||
|
||||
// check if there's a host reference
|
||||
if submatches["hostname"] != "" {
|
||||
l.Log().Tracef("Port Exposure: found hostname: %s", submatches["hostname"])
|
||||
addrs, err := net.LookupHost(submatches["hostname"])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to lookup host '%s' specified for Port Exposure: %+v", submatches["hostname"], err)
|
||||
}
|
||||
api.Host = submatches["hostname"]
|
||||
submatches["hostip"] = addrs[0] // set hostip to the resolved address
|
||||
}
|
||||
|
||||
realPortString := ""
|
||||
|
||||
if submatches["hostip"] == "" {
|
||||
submatches["hostip"] = k3d.DefaultAPIHost
|
||||
}
|
||||
|
||||
// start with the IP, if there is any
|
||||
if submatches["hostip"] != "" {
|
||||
realPortString += submatches["hostip"] + ":"
|
||||
}
|
||||
|
||||
// port: get a free one if there's none defined or set to random
|
||||
if submatches["port"] == "" || submatches["port"] == "random" {
|
||||
l.Log().Debugf("Port Exposure Mapping didn't specify hostPort, choosing one randomly...")
|
||||
freePort, err := GetFreePort()
|
||||
if err != nil || freePort == 0 {
|
||||
l.Log().Warnf("Failed to get random free port: %+v", err)
|
||||
l.Log().Warnf("Falling back to internal port %s (may be blocked though)...", internalPort)
|
||||
submatches["port"] = internalPort
|
||||
} else {
|
||||
submatches["port"] = strconv.Itoa(freePort)
|
||||
l.Log().Debugf("Got free port for Port Exposure: '%d'", freePort)
|
||||
}
|
||||
}
|
||||
|
||||
realPortString += fmt.Sprintf("%s:%s/tcp", submatches["port"], internalPort)
|
||||
|
||||
portMapping, err := nat.ParsePortSpec(realPortString)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse port spec for Port Exposure '%s': %+v", realPortString, err)
|
||||
}
|
||||
|
||||
api.Port = portMapping[0].Port // there can be only one due to our regexp
|
||||
api.Binding = portMapping[0].Binding
|
||||
|
||||
return api, nil
|
||||
|
||||
}
|
||||
|
||||
// ValidatePortMap validates a port mapping
|
||||
func ValidatePortMap(portmap string) (string, error) {
|
||||
return portmap, nil // TODO: ValidatePortMap: add validation of port mapping
|
||||
}
|
||||
|
||||
// GetFreePort tries to fetch an open port from the OS-Kernel
|
||||
func GetFreePort() (int, error) {
|
||||
tcpAddress, err := net.ResolveTCPAddr("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to resolve address 'localhost:0': %w", err)
|
||||
}
|
||||
|
||||
tcpListener, err := net.ListenTCP("tcp", tcpAddress)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to create tcp listener: %w", err)
|
||||
}
|
||||
defer tcpListener.Close()
|
||||
|
||||
return tcpListener.Addr().(*net.TCPAddr).Port, nil
|
||||
}
|
@ -1,35 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package util
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
)
|
||||
|
||||
// validateRuntimeLabelKey validates a given label key is not reserved for internal k3d usage
|
||||
func ValidateRuntimeLabelKey(labelKey string) {
|
||||
if strings.HasPrefix(labelKey, "k3s.") || strings.HasPrefix(labelKey, "k3d.") || labelKey == "app" {
|
||||
l.Log().Fatalf("runtime label \"%s\" is reserved for internal usage", labelKey)
|
||||
}
|
||||
}
|
@ -1,37 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package util
|
||||
|
||||
import "strings"
|
||||
|
||||
// SplitKV splits an '='-delimited string into a key-value-pair (if any)
|
||||
func SplitKV(kvstring string) (string, string) {
|
||||
// split only on first '=' sign (like `docker run` do)
|
||||
kvSlice := strings.SplitN(kvstring, "=", 2)
|
||||
|
||||
if len(kvSlice) > 1 {
|
||||
return kvSlice[0], kvSlice[1]
|
||||
}
|
||||
|
||||
// defaults to key with empty value (like `docker run` do)
|
||||
return kvstring, ""
|
||||
}
|
@ -1,107 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020-2021 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
rt "runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/k3d/v5/pkg/runtimes"
|
||||
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
)
|
||||
|
||||
// ValidateVolumeMount checks, if the source of volume mounts exists and if the destination is an absolute path
|
||||
// - SRC: source directory/file -> tests: must exist
|
||||
// - DEST: source directory/file -> tests: must be absolute path
|
||||
// ValidateVolumeMount checks, if the source of volume mounts exists and if the destination is an absolute path
|
||||
// - SRC: source directory/file -> tests: must exist
|
||||
// - DEST: source directory/file -> tests: must be absolute path
|
||||
func ValidateVolumeMount(runtime runtimes.Runtime, volumeMount string) (string, error) {
|
||||
src := ""
|
||||
dest := ""
|
||||
|
||||
// validate 'SRC[:DEST]' substring
|
||||
split := strings.Split(volumeMount, ":")
|
||||
// a volume mapping can have 3 parts seperated by a ':' followed by a node filter
|
||||
// [SOURCE:]DEST[:OPT[,OPT]][@NODEFILTER[;NODEFILTER...]]
|
||||
// On Windows the source path needs to be an absolute path which means the path starts with
|
||||
// a drive designator and will also have a ':' in it. So for Windows the maxParts is increased by one.
|
||||
maxParts := 3
|
||||
if rt.GOOS == "windows" {
|
||||
maxParts++
|
||||
}
|
||||
if len(split) < 1 {
|
||||
return "", fmt.Errorf("No volume/path specified")
|
||||
}
|
||||
if len(split) > maxParts {
|
||||
return "", fmt.Errorf("Invalid volume mount '%s': maximal %d ':' allowed", volumeMount, maxParts-1)
|
||||
}
|
||||
|
||||
// we only have SRC specified -> DEST = SRC
|
||||
// On windows the first part of the SRC is the drive letter, so we need to concat the first and second parts to get the path.
|
||||
if len(split) == 1 {
|
||||
src = split[0]
|
||||
dest = src
|
||||
} else if rt.GOOS == "windows" {
|
||||
src = split[0] + ":" + split[1]
|
||||
dest = split[2]
|
||||
} else {
|
||||
src = split[0]
|
||||
dest = split[1]
|
||||
}
|
||||
|
||||
// verify that the source exists
|
||||
if src != "" {
|
||||
// a) named volume
|
||||
isNamedVolume := true
|
||||
if err := verifyNamedVolume(runtime, src); err != nil {
|
||||
isNamedVolume = false
|
||||
}
|
||||
if !isNamedVolume {
|
||||
if _, err := os.Stat(src); err != nil {
|
||||
l.Log().Warnf("Failed to stat file/directory/named volume that you're trying to mount: '%s' in '%s' -> Please make sure it exists", src, volumeMount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// verify that the destination is an absolute path
|
||||
if !strings.HasPrefix(dest, "/") {
|
||||
return "", fmt.Errorf("Volume mount destination doesn't appear to be an absolute path: '%s' in '%s'", dest, volumeMount)
|
||||
}
|
||||
|
||||
return volumeMount, nil
|
||||
}
|
||||
|
||||
// verifyNamedVolume checks whether a named volume exists in the runtime
|
||||
func verifyNamedVolume(runtime runtimes.Runtime, volumeName string) error {
|
||||
volumeName, err := runtime.GetVolume(volumeName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to verify named volume: %w", err)
|
||||
}
|
||||
if volumeName == "" {
|
||||
return fmt.Errorf("Failed to find named volume '%s'", volumeName)
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,54 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
# Setup base system
|
||||
pacman -Syu --noconfirm openssh git gettext binutils
|
||||
sed -i "s/INTEGRITY_CHECK=.*$/INTEGRITY_CHECK=(sha256)/" /etc/makepkg.conf
|
||||
useradd -ms /bin/bash aur
|
||||
su -m aur <<'EOSU'
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration
|
||||
export HOME=/home/aur
|
||||
export REPO_URL="ssh://aur@aur.archlinux.org/$PACKAGE_NAME.git"
|
||||
export NEW_RELEASE="${COMMIT_REF##*/v}"
|
||||
export COMMIT_MESSAGE="$(echo $COMMIT_MESSAGE | envsubst)"
|
||||
echo "---------------- AUR Package version $PACKAGE_NAME/$NEW_RELEASE ----------------"
|
||||
|
||||
# SSH & GIT Setup
|
||||
mkdir "$HOME/.ssh" && chmod 700 "$HOME/.ssh"
|
||||
ssh-keyscan -t ed25519 aur.archlinux.org >> "$HOME/.ssh/known_hosts"
|
||||
echo -e "$SSH_PRIVATE_KEY" | base64 -d > "$HOME/.ssh/id_rsa"
|
||||
chmod 600 "$HOME/.ssh/id_rsa"
|
||||
git config --global user.name "$COMMIT_USERNAME"
|
||||
git config --global user.email "$COMMIT_EMAIL"
|
||||
|
||||
# Clone AUR Package
|
||||
cd /tmp
|
||||
echo "$REPO_URL"
|
||||
git clone "$REPO_URL"
|
||||
cd "$PACKAGE_NAME"
|
||||
|
||||
# Generate a dummy PKGBUILD so we can grab the latest releases SHA256SUMS
|
||||
cat PKGBUILD.template | envsubst '$NEW_RELEASE' > PKGBUILD
|
||||
|
||||
export SHA256_SUMS_x86_64="$(CARCH=x86_64 makepkg -g 2> /dev/null)"
|
||||
echo "SHA256_SUMS_x86_64: $SHA256_SUMS_x86_64"
|
||||
|
||||
export SHA256_SUMS_aarch64="$(CARCH=aarch64 makepkg -g 2> /dev/null)"
|
||||
echo "SHA256_SUMS_aarch64: $SHA256_SUMS_aarch64"
|
||||
|
||||
export SHA256_SUMS_arm="$(CARCH=arm makepkg -g 2> /dev/null)"
|
||||
echo "SHA256_SUMS_arm: $SHA256_SUMS_arm"
|
||||
|
||||
cat PKGBUILD.template | envsubst '$NEW_RELEASE$SHA256_SUMS_x86_64$SHA256_SUMS_aarch64$SHA256_SUMS_arm' > PKGBUILD
|
||||
makepkg --printsrcinfo > .SRCINFO
|
||||
|
||||
echo "------------- BUILD DONE ----------------"
|
||||
git add PKGBUILD .SRCINFO
|
||||
git commit -m "$COMMIT_MESSAGE"
|
||||
git push
|
||||
echo "------------- PUBLISH DONE ----------------"
|
||||
EOSU
|
@ -1,27 +0,0 @@
|
||||
image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}{{else}}latest{{/if}}-dind
|
||||
{{#if build.tags}}
|
||||
tags:
|
||||
{{#each build.tags}}
|
||||
- {{this}}
|
||||
{{/each}}
|
||||
{{/if}}
|
||||
manifests:
|
||||
- image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}dind-linux-amd64
|
||||
platform:
|
||||
architecture: amd64
|
||||
os: linux
|
||||
- image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}dind-linux-arm64
|
||||
platform:
|
||||
variant: v8
|
||||
architecture: arm64
|
||||
os: linux
|
||||
- image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}dind-linux-arm
|
||||
platform:
|
||||
variant: v7
|
||||
architecture: arm
|
||||
os: linux
|
||||
- image: rancher/k3d:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}dind-linux-arm
|
||||
platform:
|
||||
variant: v6
|
||||
architecture: arm
|
||||
os: linux
|
@ -1,15 +0,0 @@
|
||||
# docgen
|
||||
|
||||
Only used to generate the command tree for <https://k3d.io/usage/commands>.
|
||||
|
||||
The code will output files in [`../docs/usage/commands/`](../docs/usage/commands/)
|
||||
|
||||
## Run
|
||||
|
||||
```bash
|
||||
# ensure that you're in the docgen dir, as the relative path to the docs/ dir is hardcoded
|
||||
cd docgen
|
||||
|
||||
# run
|
||||
./run.sh
|
||||
```
|
@ -1,13 +0,0 @@
|
||||
module github.com/rancher/k3d/docgen
|
||||
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/rancher/k3d/v5 v5.0.0-00010101000000-000000000000
|
||||
github.com/spf13/cobra v1.2.1
|
||||
golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78 // indirect
|
||||
golang.org/x/term v0.0.0-20210406210042-72f3dc4e9b72 // indirect
|
||||
)
|
||||
|
||||
replace github.com/rancher/k3d/v5 => /PATH/TO/YOUR/REPO/DIRECTORY
|
1447
docgen/go.sum
1447
docgen/go.sum
File diff suppressed because it is too large
Load Diff
@ -1,16 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3d/v5/cmd"
|
||||
l "github.com/rancher/k3d/v5/pkg/logger"
|
||||
"github.com/spf13/cobra/doc"
|
||||
)
|
||||
|
||||
func main() {
|
||||
k3d := cmd.NewCmdK3d()
|
||||
k3d.DisableAutoGenTag = true
|
||||
|
||||
if err := doc.GenMarkdownTree(k3d, "../docs/usage/commands"); err != nil {
|
||||
l.Log().Fatalln(err)
|
||||
}
|
||||
}
|
@ -1,22 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
REPLACE_PLACEHOLDER="/PATH/TO/YOUR/REPO/DIRECTORY"
|
||||
|
||||
CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
[ -d "$CURR_DIR" ] || { echo "FATAL: no current dir (maybe running in zsh?)"; exit 1; }
|
||||
|
||||
REPO_DIR=${CURR_DIR%"/docgen"}
|
||||
|
||||
echo "$REPO_DIR"
|
||||
|
||||
sed -i "s%$REPLACE_PLACEHOLDER%$REPO_DIR%" "$CURR_DIR/go.mod"
|
||||
|
||||
go mod tidy
|
||||
|
||||
go mod vendor
|
||||
|
||||
go run ./main.go
|
||||
|
||||
sed -i "s%$REPO_DIR%$REPLACE_PLACEHOLDER%" "$CURR_DIR/go.mod"
|
||||
|
||||
rm -r "$CURR_DIR/vendor"
|
@ -1,6 +0,0 @@
|
||||
nav:
|
||||
- index.md
|
||||
- usage
|
||||
- design
|
||||
- faq
|
||||
collapse: false
|
@ -1,5 +0,0 @@
|
||||
title: Design
|
||||
nav:
|
||||
- project.md
|
||||
- defaults.md
|
||||
- networking.md
|
@ -1,60 +0,0 @@
|
||||
# Defaults
|
||||
|
||||
## k3d reserved settings
|
||||
|
||||
When you create a K3s cluster in Docker using k3d, we make use of some K3s configuration options, making them "reserved" for k3d.
|
||||
This means, that overriding those options with your own may break the cluster setup.
|
||||
|
||||
### Environment Variables
|
||||
|
||||
The following K3s environment variables are used to configure the cluster:
|
||||
|
||||
| Variable | K3d Default | Configurable? |
|
||||
|----------|-------------|---------------|
|
||||
| `K3S_URL`| `https://$CLUSTERNAME-server-0:6443` | no |
|
||||
| `K3S_TOKEN`| random | yes (`--token`) |
|
||||
| `K3S_KUBECONFIG_OUTPUT`| `/output/kubeconfig.yaml` | no |
|
||||
|
||||
## k3d Loadbalancer
|
||||
|
||||
By default, k3d creates an Nginx loadbalancer alongside the clusters it creates to handle the port-forwarding.
|
||||
The loadbalancer can partly be configured using k3d-defined settings.
|
||||
|
||||
| Nginx setting | k3d default | k3d setting |
|
||||
|-------------|-------------|-------------|
|
||||
| `proxy_timeout` (default for all server stanzas) | `600` (s) | `settings.defaultProxyTimeout` | |
|
||||
|`worker_connections` | `1024` | `settings.workerConnections` |
|
||||
|
||||
### Overrides
|
||||
|
||||
- Example via CLI: `k3d cluster create --lb-config-override settings.defaultProxyTimeout=900`
|
||||
- Example via Config File:
|
||||
|
||||
```yaml
|
||||
# ... truncated ...
|
||||
k3d:
|
||||
loadbalancer:
|
||||
configOverrides:
|
||||
- settings.workerConnections=2048
|
||||
```
|
||||
|
||||
## Multiple server nodes
|
||||
|
||||
- by default, when `--server` > 1 and no `--datastore-x` option is set, the first server node (server-0) will be the initializing server node
|
||||
- the initializing server node will have the `--cluster-init` flag appended
|
||||
- all other server nodes will refer to the initializing server node via `--server https://<init-node>:6443`
|
||||
|
||||
## API-Ports
|
||||
|
||||
- by default, we expose the API-Port (`6443`) by forwarding traffic from the default server loadbalancer (nginx container) to the server node(s)
|
||||
- port `6443` of the loadbalancer is then mapped to a specific (`--api-port` flag) or a random (default) port on the host system
|
||||
|
||||
## Kubeconfig
|
||||
|
||||
- if `--kubeconfig-update-default` is set, we use the default loading rules to get the default kubeconfig:
|
||||
- First: kubeconfig specified via the KUBECONFIG environment variable (error out if multiple are specified)
|
||||
- Second: default kubeconfig in home directory (e.g. `$HOME/.kube/config`)
|
||||
|
||||
## Networking
|
||||
|
||||
- [by default, k3d creates a new (docker) network for every cluster](./networking)
|
@ -1,27 +0,0 @@
|
||||
# Networking
|
||||
|
||||
- Related issues:
|
||||
- [rancher/k3d #220](https://github.com/rancher/k3d/issues/220)
|
||||
|
||||
## Introduction
|
||||
|
||||
By default, k3d creates a new (docker) network for every new cluster.
|
||||
Using the `--network STRING` flag upon creation to connect to an existing network.
|
||||
Existing networks won't be managed by k3d together with the cluster lifecycle.
|
||||
|
||||
## Connecting to docker "internal"/pre-defined networks
|
||||
|
||||
### `host` network
|
||||
|
||||
When using the `--network` flag to connect to the host network (i.e. `k3d cluster create --network host`), you won't be able to create more than **one server node**.
|
||||
An edge case would be one server node (with agent disabled) and one agent node.
|
||||
|
||||
### `bridge` network
|
||||
|
||||
By default, every network that k3d creates is working in `bridge` mode.
|
||||
But when you try to use `--network bridge` to connect to docker's internal `bridge` network, you may run into issues with grabbing certificates from the API-Server.
|
||||
Single-Node clusters should work though.
|
||||
|
||||
### `none` "network"
|
||||
|
||||
Well.. this doesn't really make sense for k3d anyway ¯\\\_(ツ)\_/¯
|
@ -1,109 +0,0 @@
|
||||
# Project Overview
|
||||
|
||||
## About This Page
|
||||
|
||||
On this page we'll try to give an overview of all the moving bits and pieces in k3d to ease contributions to the project.
|
||||
|
||||
## Directory Overview
|
||||
|
||||
- [`.github/`](https://github.com/rancher/k3d/tree/main/.github)
|
||||
- templates for issues and pull requests
|
||||
- GitHub Action workflow definitions
|
||||
- [`cmd/`](https://github.com/rancher/k3d/tree/main/cmd)
|
||||
- everything related to the actual k3d CLI, like the whole command tree, config initialization, argument parsing, etc.
|
||||
- [`docgen/`](https://github.com/rancher/k3d/tree/main/docgen)
|
||||
- sub-module used to auto-generate the documentation for the CLI commands, which ends up in [`docs/usage/commands/`](https://github.com/rancher/k3d/tree/main/docs/usage/commands)
|
||||
- [`docs/`](https://github.com/rancher/k3d/tree/main/docs)
|
||||
- all the resources used to build [k3d.io](https://k3d.io) using mkdocs
|
||||
- [`pkg/`](<https://github.com/rancher/k3d/tree/main/pkg>)
|
||||
- the place where the magic happens.. here you find all the main logic of k3d
|
||||
- all function calls within [`cmd/`](https://github.com/rancher/k3d/tree/main/cmd) that do non-trivial things are imported from here
|
||||
- this (or rather sub-packages) is what other projects would import as a module to work with k3d without using the CLI
|
||||
- [`proxy/`](https://github.com/rancher/k3d/tree/main/proxy)
|
||||
- configuration to build the [`rancher/k3d-proxy`](https://hub.docker.com/r/rancher/k3d-proxy/) container image which is used as a loadbalancer/proxy in front of (almost) every k3d cluster
|
||||
- this is basically just a combination of NGINX with confd and some k3d-specific configuration details
|
||||
- [`tests/`](https://github.com/rancher/k3d/tree/main/tests)
|
||||
- a set of bash scripts used for end-to-end (E2E) tests of k3d
|
||||
- mostly used for all the functionality of the k3d CLI which cannot be tested using Go unit tests
|
||||
- [`tools/`](https://github.com/rancher/k3d/tree/main/tools)
|
||||
- sub-module used to build the [`rancher/k3d-tools`](https://hub.docker.com/r/rancher/k3d-tools) container image which supports some k3d functionality like `k3d image import`
|
||||
- [`vendor/`](https://github.com/rancher/k3d/tree/main/vendor)
|
||||
- result of `go mod vendor`, which contains all dependencies of k3d
|
||||
- [`version/`](https://github.com/rancher/k3d/tree/main/version)
|
||||
- package used to code k3d/k3s versions into releases
|
||||
- this is where `go build` injects the version tags when building k3d
|
||||
- that's the output you see when issuing `k3d version`
|
||||
|
||||
## Packages Overview
|
||||
|
||||
- [`pkg/`](https://github.com/rancher/k3d/tree/main/pkg)
|
||||
- [`actions/`](https://github.com/rancher/k3d/tree/main/pkg/actions)
|
||||
- hook actions describing actions (commands, etc.) that run at specific stages of the node/cluster lifecycle
|
||||
- e.g. writing configuration files to the container filesystem just before the node (container) starts
|
||||
- [`client/`](https://github.com/rancher/k3d/tree/main/pkg/client)
|
||||
- all the top level functionality to work with k3d primitives
|
||||
- create/retrieve/update/delete/start/stop clusters, nodes, registries, etc. managed by k3d
|
||||
- [`config/`](https://github.com/rancher/k3d/tree/main/pkg/config)
|
||||
- everything related to the k3d configuration (files), like `SimpleConfig` and `ClusterConfig`
|
||||
- [`runtimes/`](https://github.com/rancher/k3d/tree/main/pkg/runtimes)
|
||||
- interface and implementations of runtimes that power k3d (currently, that's only Docker)
|
||||
- functions in [`client/`](https://github.com/rancher/k3d/tree/main/pkg/client) eventually call runtime functions to "materialize" nodes and clusters
|
||||
- [`tools/`](https://github.com/rancher/k3d/tree/main/pkg/tools)
|
||||
- functions eventually calling the [`k3d-tools`](https://hub.docker.com/r/rancher/k3d-tools) container (see [`tools/`](https://github.com/rancher/k3d/tree/main/tools) in the repo root)
|
||||
- [`types/`](https://github.com/rancher/k3d/tree/main/pkg/types)
|
||||
- definition of all k3d primitives and many other details and defaults
|
||||
- e.g. contains the definition of a `Node` or a `Cluster` in k3d
|
||||
- [`util/`](https://github.com/rancher/k3d/tree/main/pkg/util)
|
||||
- some helper functions e.g. for string manipulation/generation, regexp or other re-usable usages
|
||||
|
||||
## Anatomy of a Cluster
|
||||
|
||||
By default, every k3d cluster consists of at least 2 containers (nodes):
|
||||
|
||||
1. (optional, but default and strongly recommended) loadbalancer
|
||||
|
||||
- image: [`rancher/k3d-proxy`](https://hub.docker.com/r/rancher/k3d-proxy/), built from [`proxy/`](https://github.com/rancher/k3d/tree/main/proxy)
|
||||
- purpose: proxy and load balance requests from the outside (i.e. most of the times your local host) to the cluster
|
||||
- by default, it e.g. proxies all the traffic for the Kubernetes API to port `6443` (default listening port of K3s) to all the server nodes in the cluster
|
||||
- can be used for multiple port-mappings to one or more nodes in your cluster
|
||||
- that way, port-mappings can also easily be added/removed after the cluster creation, as we can simply re-create the proxy without affecting cluster state
|
||||
|
||||
2. (required, always present) primary server node
|
||||
|
||||
- image: [`rancher/k3s`](https://hub.docker.com/r/rancher/k3s/), built from [`github.com/k3s-io/k3s`](https://github.com/k3s-io/k3s)
|
||||
- purpose: (initializing) server (formerly: master) node of the cluster
|
||||
- runs the K3s executable (which runs containerd, the Kubernetes API Server, etcd/sqlite, etc.): `k3s server`
|
||||
- in a multi-server setup, it initializes the cluster with an embedded etcd database (using the K3s `--cluster-init` flag)
|
||||
|
||||
3. (optional) secondary server node(s)
|
||||
|
||||
- image: [`rancher/k3s`](https://hub.docker.com/r/rancher/k3s/), built from [`github.com/k3s-io/k3s`](https://github.com/k3s-io/k3s)
|
||||
|
||||
4. (optional) agent node(s)
|
||||
|
||||
- image: [`rancher/k3s`](https://hub.docker.com/r/rancher/k3s/), built from [`github.com/k3s-io/k3s`](https://github.com/k3s-io/k3s)
|
||||
- purpose: running the K3s agent process (kubelet, etc.): `k3s agent`
|
||||
|
||||
## Automation (CI)
|
||||
|
||||
The k3d repository mainly leverages the following two CI systems:
|
||||
|
||||
- GitHub Actions
|
||||
- 2 workflows in <https://github.com/rancher/k3d/tree/main/.github/workflows> to push the artifact to AUR (Arch Linux User Repository)
|
||||
- logs/history can be seen in the Actions tab: <https://github.com/rancher/k3d/actions>
|
||||
- DroneCI
|
||||
- a set of pipelines in a single file: <https://github.com/rancher/k3d/blob/main/.drone.yml>
|
||||
- static code analysis
|
||||
- build
|
||||
- tests
|
||||
- docker builds + pushes
|
||||
- render + push docs
|
||||
- (pre-) release to GitHub
|
||||
- `push` events end up here (also does the releases, when a tag is pushed): <https://drone-publish.rancher.io/rancher/k3d>
|
||||
- `pr`s end up here: <https://drone-pr.rancher.io/rancher/k3d>
|
||||
|
||||
## Documentation
|
||||
|
||||
The website [k3d.io](https://k3d.io) containing all the documentation for k3d is built using [`mkdocs`](https://www.mkdocs.org/), configured via the [`mkdocs.yml`](https://github.com/rancher/k3d/blob/main/mkdocs.yml) config file with all the content residing in the [`docs/`](https://github.com/rancher/k3d/tree/main/docs) directory (Markdown).
|
||||
Use `mkdocs serve` in the repository root to build and serve the webpage locally.
|
||||
Some parts of the documentation are being auto-generated, like [`docs/usage/commands/`](https://github.com/rancher/k3d/tree/main/docs/usage/commands) is auto-generated using Cobra's command docs generation functionality in [`docgen/`](https://github.com/rancher/k3d/tree/main/docgen).
|
5
docs/documentation.md
Normal file
5
docs/documentation.md
Normal file
@ -0,0 +1,5 @@
|
||||
# Documentation
|
||||
|
||||
## Functionality
|
||||
|
||||
... under construction ...
|
125
docs/examples.md
Normal file
125
docs/examples.md
Normal file
@ -0,0 +1,125 @@
|
||||
# Examples
|
||||
|
||||
## Expose services
|
||||
|
||||
### 1. via Ingress
|
||||
|
||||
In this example, we will deploy a simple nginx webserver deployment and make it accessible via ingress.
|
||||
Therefore, we have to create the cluster in a way, that the internal port 80 (where the `traefik` ingress controller is listening on) is exposed on the host system.
|
||||
|
||||
1. Create a cluster, mapping the ingress port 80 to localhost:8081
|
||||
|
||||
`k3d create --api-port 6550 --publish 8081:80 --workers 2`
|
||||
|
||||
- Note: `--api-port 6550` is not required for the example to work. It's used to have `k3s`'s API-Server listening on port 6550 with that port mapped to the host system.
|
||||
|
||||
2. Get the kubeconfig file
|
||||
|
||||
`export KUBECONFIG="$(k3d get-kubeconfig --name='k3s-default')"`
|
||||
|
||||
3. Create a nginx deployment
|
||||
|
||||
`kubectl create deployment nginx --image=nginx`
|
||||
|
||||
4. Create a ClusterIP service for it
|
||||
|
||||
`kubectl create service clusterip nginx --tcp=80:80`
|
||||
|
||||
5. Create an ingress object for it with `kubectl apply -f`
|
||||
*Note*: `k3s` deploys [`traefik`](https://github.com/containous/traefik) as the default ingress controller
|
||||
|
||||
```YAML
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: nginx
|
||||
annotations:
|
||||
ingress.kubernetes.io/ssl-redirect: "false"
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: nginx
|
||||
servicePort: 80
|
||||
```
|
||||
|
||||
6. Curl it via localhost
|
||||
|
||||
`curl localhost:8081/`
|
||||
|
||||
### 2. via NodePort
|
||||
|
||||
1. Create a cluster, mapping the port 30080 from worker-0 to localhost:8082
|
||||
|
||||
`k3d create --publish 8082:30080@k3d-k3s-default-worker-0 --workers 2`
|
||||
|
||||
- Note: Kubernetes' default NodePort range is [`30000-32767`](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport)
|
||||
|
||||
... (Steps 2 and 3 like above) ...
|
||||
|
||||
1. Create a NodePort service for it with `kubectl apply -f`
|
||||
|
||||
```YAML
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
name: nginx
|
||||
spec:
|
||||
ports:
|
||||
- name: 80-80
|
||||
nodePort: 30080
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: nginx
|
||||
type: NodePort
|
||||
```
|
||||
|
||||
2. Curl it via localhost
|
||||
|
||||
`curl localhost:8082/`
|
||||
|
||||
|
||||
## Running on filesystems k3s doesn't like (btrfs, tmpfs, …)
|
||||
|
||||
The following script leverages a [Docker loopback volume plugin](https://github.com/ashald/docker-volume-loopback) to mask the problematic filesystem away from k3s by providing a small ext4 filesystem underneath `/var/lib/rancher/k3s` (k3s' data dir).
|
||||
|
||||
```bash
|
||||
#!/bin/bash -x
|
||||
|
||||
CLUSTER_NAME="${1:-k3s-default}"
|
||||
NUM_WORKERS="${2:-2}"
|
||||
|
||||
setup() {
|
||||
PLUGIN_LS_OUT=`docker plugin ls --format '{{.Name}},{{.Enabled}}' | grep -E '^ashald/docker-volume-loopback'`
|
||||
[ -z "${PLUGIN_LS_OUT}" ] && docker plugin install ashald/docker-volume-loopback DATA_DIR=/tmp/docker-loop/data
|
||||
sleep 3
|
||||
[ "${PLUGIN_LS_OUT##*,}" != "true" ] && docker plugin enable ashald/docker-volume-loopback
|
||||
|
||||
K3D_MOUNTS=()
|
||||
for i in `seq 0 ${NUM_WORKERS}`; do
|
||||
[ ${i} -eq 0 ] && VOLUME_NAME="k3d-${CLUSTER_NAME}-server" || VOLUME_NAME="k3d-${CLUSTER_NAME}-worker-$((${i}-1))"
|
||||
docker volume create -d ashald/docker-volume-loopback ${VOLUME_NAME} -o sparse=true -o fs=ext4
|
||||
K3D_MOUNTS+=('-v' "${VOLUME_NAME}:/var/lib/rancher/k3s@${VOLUME_NAME}")
|
||||
done
|
||||
k3d c -i rancher/k3s:v0.9.1 -n ${CLUSTER_NAME} -w ${NUM_WORKERS} ${K3D_MOUNTS[@]}
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
K3D_VOLUMES=()
|
||||
k3d d -n ${CLUSTER_NAME}
|
||||
for i in `seq 0 ${NUM_WORKERS}`; do
|
||||
[ ${i} -eq 0 ] && VOLUME_NAME="k3d-${CLUSTER_NAME}-server" || VOLUME_NAME="k3d-${CLUSTER_NAME}-worker-$((${i}-1))"
|
||||
K3D_VOLUMES+=("${VOLUME_NAME}")
|
||||
done
|
||||
docker volume rm -f ${K3D_VOLUMES[@]}
|
||||
}
|
||||
|
||||
setup
|
||||
#cleanup
|
||||
```
|
13
docs/faq.md
Normal file
13
docs/faq.md
Normal file
@ -0,0 +1,13 @@
|
||||
# FAQ / Nice to know
|
||||
|
||||
- As [@jaredallard](https://github.com/jaredallard) [pointed out](https://github.com/rancher/k3d/pull/48), people running `k3d` on a system with **btrfs**, may need to mount `/dev/mapper` into the nodes for the setup to work.
|
||||
- This will do: `k3d create -v /dev/mapper:/dev/mapper`
|
||||
- An additional solution proposed by [@zer0def](https://github.com/zer0def) can be found in the [examples section](examples.md) (_Running on filesystems k3s doesn't like (btrfs, tmpfs, …)_)
|
||||
|
||||
- Pods go to evicted state after doing X
|
||||
- Related issues: [#133 - Pods evicted due to `NodeHasDiskPressure`](https://github.com/rancher/k3d/issues/133) (collection of #119 and #130)
|
||||
- Background: somehow docker runs out of space for the k3d node containers, which triggers a hard eviction in the kubelet
|
||||
- Possible [fix/workaround by @zer0def](https://github.com/rancher/k3d/issues/133#issuecomment-549065666):
|
||||
- use a docker storage driver which cleans up properly (e.g. overlay2)
|
||||
- clean up or expand docker root filesystem
|
||||
- change the kubelet's eviction thresholds upon cluster creation: `k3d create --agent-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%' --agent-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%'`
|
@ -1,4 +0,0 @@
|
||||
title: FAQ
|
||||
nav:
|
||||
- faq.md
|
||||
collapse: true
|
161
docs/faq/faq.md
161
docs/faq/faq.md
@ -1,161 +0,0 @@
|
||||
# FAQ
|
||||
|
||||
## Issues with BTRFS
|
||||
|
||||
- As [@jaredallard](https://github.com/jaredallard) [pointed out](https://github.com/rancher/k3d/pull/48), people running `k3d` on a system with **btrfs**, may need to mount `/dev/mapper` into the nodes for the setup to work.
|
||||
- This will do: `#!bash k3d cluster create CLUSTER_NAME -v /dev/mapper:/dev/mapper`
|
||||
|
||||
## Issues with ZFS
|
||||
|
||||
- k3s currently has [no support for ZFS](https://github.com/rancher/k3s/issues/66) and thus, creating multi-server setups (e.g. `#!bash k3d cluster create multiserver --servers 3`) fails, because the initializing server node (server flag `--cluster-init`) errors out with the following log:
|
||||
|
||||
```bash
|
||||
starting kubernetes: preparing server: start cluster and https: raft_init(): io: create I/O capabilities probe file: posix_allocate: operation not supported on socket
|
||||
```
|
||||
|
||||
- This issue can be worked around by providing docker with a different filesystem (that's also better for docker-in-docker stuff).
|
||||
- A possible solution can be found here: [https://github.com/rancher/k3s/issues/1688#issuecomment-619570374](https://github.com/rancher/k3s/issues/1688#issuecomment-619570374)
|
||||
|
||||
## Pods evicted due to lack of disk space
|
||||
|
||||
- Pods go to evicted state after doing X
|
||||
- Related issues: [#133 - Pods evicted due to `NodeHasDiskPressure`](https://github.com/rancher/k3d/issues/133) (collection of #119 and #130)
|
||||
- Background: somehow docker runs out of space for the k3d node containers, which triggers a hard eviction in the kubelet
|
||||
- Possible [fix/workaround by @zer0def](https://github.com/rancher/k3d/issues/133#issuecomment-549065666):
|
||||
- use a docker storage driver which cleans up properly (e.g. overlay2)
|
||||
- clean up or expand docker root filesystem
|
||||
- change the kubelet's eviction thresholds upon cluster creation:
|
||||
|
||||
```bash
|
||||
k3d cluster create \
|
||||
--k3s-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%@agent:*' \
|
||||
--k3s-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%@agent:*'
|
||||
```
|
||||
|
||||
## Restarting a multi-server cluster or the initializing server node fails
|
||||
|
||||
- What you do: You create a cluster with more than one server node and later, you either stop `server-0` or stop/start the whole cluster
|
||||
- What fails: After the restart, you cannot connect to the cluster anymore and `kubectl` will give you a lot of errors
|
||||
- What causes this issue: it's a [known issue with dqlite in `k3s`](https://github.com/rancher/k3s/issues/1391) which doesn't allow the initializing server node to go down
|
||||
- What's the solution: Hopefully, this will be solved by the planned [replacement of dqlite with embedded etcd in k3s](https://github.com/rancher/k3s/pull/1770)
|
||||
- Related issues: [#262](https://github.com/rancher/k3d/issues/262)
|
||||
|
||||
## Passing additional arguments/flags to k3s (and on to e.g. the kube-apiserver)
|
||||
|
||||
- The Problem: Passing a feature flag to the Kubernetes API Server running inside k3s.
|
||||
- Example: you want to enable the EphemeralContainers feature flag in Kubernetes
|
||||
- Solution: `#!bash k3d cluster create --k3s-arg '--kube-apiserver-arg=feature-gates=EphemeralContainers=true@server:*'`
|
||||
- **Note**: Be aware of where the flags require dashes (`--`) and where not.
|
||||
- the k3s flag (`--kube-apiserver-arg`) has the dashes
|
||||
- the kube-apiserver flag `feature-gates` doesn't have them (k3s adds them internally)
|
||||
|
||||
- Second example:
|
||||
|
||||
```bash
|
||||
k3d cluster create k3d-one \
|
||||
--k3s-arg "--cluster-cidr=10.118.0.0/17@server:*" \
|
||||
--k3s-arg "--service-cidr=10.118.128.0/17@server:*" \
|
||||
--k3s-arg "--disable=servicelb@server:*" \
|
||||
--k3s-arg "--disable=traefik@server:*" \
|
||||
--verbose
|
||||
```
|
||||
|
||||
- **Note**: There are many ways to use the `"` and `'` quotes, just be aware, that sometimes shells also try to interpret/interpolate parts of the commands
|
||||
|
||||
## How to access services (like a database) running on my Docker Host Machine
|
||||
|
||||
- As of version v3.1.0, we're injecting the `host.k3d.internal` entry into the k3d containers (k3s nodes) and into the CoreDNS ConfigMap, enabling you to access your host system by referring to it as `host.k3d.internal`
|
||||
|
||||
## Running behind a corporate proxy
|
||||
|
||||
Running k3d behind a corporate proxy can lead to some issues with k3d that have already been reported in more than one issue.
|
||||
Some can be fixed by passing the `HTTP_PROXY` environment variables to k3d, some have to be fixed in docker's `daemon.json` file and some are as easy as adding a volume mount.
|
||||
|
||||
## Pods fail to start: `x509: certificate signed by unknown authority`
|
||||
|
||||
- Example Error Message:
|
||||
|
||||
```bash
|
||||
Failed to create pod sandbox: rpc error: code = Unknown desc = failed to get sandbox image "docker.io/rancher/pause:3.1": failed to pull image "docker.io/rancher/pause:3.1": failed to pull and unpack image "docker.io/rancher/pause:3.1": failed to resolve reference "docker.io/rancher/pause:3.1": failed to do request: Head https://registry-1.docker.io/v2/rancher/pause/manifests/3.1: x509: certificate signed by unknown authority
|
||||
```
|
||||
|
||||
- Problem: inside the container, the certificate of the corporate proxy cannot be validated
|
||||
- Possible Solution: Mounting the CA Certificate from your host into the node containers at start time via `k3d cluster create --volume /path/to/your/certs.crt:/etc/ssl/certs/yourcert.crt`
|
||||
- Issue: [rancher/k3d#535](https://github.com/rancher/k3d/discussions/535#discussioncomment-474982)
|
||||
|
||||
## Spurious PID entries in `/proc` after deleting `k3d` cluster with shared mounts
|
||||
|
||||
- When you perform cluster create and deletion operations multiple times with **same cluster name** and **shared volume mounts**, it was observed that `grep k3d /proc/*/mountinfo` shows many spurious entries
|
||||
- Problem: Due to above, at times you'll see `no space left on device: unknown` when a pod is scheduled to the nodes
|
||||
- If you observe anything of above sort you can check for inaccessible file systems and unmount them by using below command (note: please remove `xargs umount -l` and check for the diff o/p first)
|
||||
- `diff <(df -ha | grep pods | awk '{print $NF}') <(df -h | grep pods | awk '{print $NF}') | awk '{print $2}' | xargs umount -l`
|
||||
- As per the conversation on [rancher/k3d#594](https://github.com/rancher/k3d/issues/594#issuecomment-837900646) above issue wasn't reported/known earlier and so there are high chances that it's not universal.
|
||||
|
||||
## [SOLVED] Nodes fail to start or get stuck in `NotReady` state with log `nf_conntrack_max: permission denied`
|
||||
|
||||
### Problem
|
||||
|
||||
- When: This happens when running k3d on a Linux system with a kernel version >= 5.12.2 (and others like >= 5.11.19) when creating a new cluster
|
||||
- the node(s) stop or get stuck with a log line like this: `<TIMESTAMP> F0516 05:05:31.782902 7 server.go:495] open /proc/sys/net/netfilter/nf_conntrack_max: permission denied`
|
||||
- Why: The issue was introduced by a change in the Linux kernel ([Changelog 5.12.2](https://cdn.kernel.org/pub/linux/kernel/v5.x/ChangeLog-5.12.2): [Commit](https://github.com/torvalds/linux/commit/671c54ea8c7ff47bd88444f3fffb65bf9799ce43)), that changed the netfilter_conntrack behavior in a way that `kube-proxy` is not able to set the `nf_conntrack_max` value anymore
|
||||
|
||||
### Workaround
|
||||
|
||||
- Workaround: as a workaround, we can tell `kube-proxy` to not even try to set this value:
|
||||
|
||||
```bash
|
||||
k3d cluster create \
|
||||
--k3s-arg "--kube-proxy-arg=conntrack-max-per-core=0@server:*" \
|
||||
--k3s-arg "--kube-proxy-arg=conntrack-max-per-core=0@agent:*" \
|
||||
--image rancher/k3s:v1.20.6-k3s
|
||||
```
|
||||
|
||||
### Fix
|
||||
|
||||
- **Note**: k3d v4.4.5 already uses rancher/k3s:v1.21.1-k3s1 as the new default k3s image, so no workarounds needed there!
|
||||
|
||||
This is going to be fixed "upstream" in k3s itself in [rancher/k3s#3337](https://github.com/k3s-io/k3s/pull/3337) and backported to k3s versions as low as v1.18.
|
||||
|
||||
- **The fix was released and backported in k3s, so you don't need to use the workaround when using one of the following k3s versions (or later ones)**
|
||||
- v1.18.19-k3s1 ([rancher/k3s#3344](https://github.com/k3s-io/k3s/pull/3344))
|
||||
- v1.19.11-k3s1 ([rancher/k3s#3343](https://github.com/k3s-io/k3s/pull/3343))
|
||||
- v1.20.7-k3s1 ([rancher/k3s#3342](https://github.com/k3s-io/k3s/pull/3342))
|
||||
- v1.21.1-k3s1 ([rancher/k3s#3341](https://github.com/k3s-io/k3s/pull/3341)))
|
||||
- Issue Reference: [rancher/k3s#607](https://github.com/rancher/k3d/issues/607)
|
||||
|
||||
## DockerHub Pull Rate Limit
|
||||
|
||||
### Problem
|
||||
|
||||
You're deploying something to the cluster using an image from DockerHub and the image fails to be pulled, with a `429` response code and a message saying `You have reached your pull rate limit. You may increase the limit by authenticating and upgrading`.
|
||||
|
||||
### Cause
|
||||
|
||||
This is caused by DockerHub's pull rate limit (see <https://docs.docker.com/docker-hub/download-rate-limit/>), which limits pulls from unauthenticated/anonymous users to 100 pulls per hour and for authenticated users (not paying customers) to 200 pulls per hour (as of the time of writing).
|
||||
|
||||
### Solution
|
||||
|
||||
a) use images from a private registry, e.g. configured as a pull-through cache for DockerHub
|
||||
b) use a different public registry without such limitations, if the same image is stored there
|
||||
c) authenticate containerd inside k3s/k3d to use your DockerHub user
|
||||
|
||||
#### (c) Authenticate Containerd against DockerHub
|
||||
|
||||
1. Create a registry configuration file for containerd:
|
||||
|
||||
```yaml
|
||||
# saved as e.g. $HOME/registries.yaml
|
||||
configs:
|
||||
"docker.io":
|
||||
auth:
|
||||
username: "$USERNAME"
|
||||
password: "$PASSWORD"
|
||||
```
|
||||
|
||||
2. Create a k3d cluster using that config:
|
||||
|
||||
```bash
|
||||
k3d cluster create --registry-config $HOME/registries.yaml
|
||||
```
|
||||
|
||||
3. Profit. That's it. In the test for this, we pulled the same image 120 times in a row (confirmed, that pull numbers went up), without being rate limited (as a non-paying, normal user)
|
116
docs/index.md
116
docs/index.md
@ -1,116 +0,0 @@
|
||||
# Overview
|
||||
|
||||

|
||||
|
||||
## What is k3d?
|
||||
|
||||
k3d is a lightweight wrapper to run [k3s](https://github.com/rancher/k3s) (Rancher Lab's minimal Kubernetes distribution) in docker.
|
||||
|
||||
k3d makes it very easy to create single- and multi-node [k3s](https://github.com/rancher/k3s) clusters in docker, e.g. for local development on Kubernetes.
|
||||
|
||||
**Note:** k3d is a **community-driven project**, that is supported by Rancher (SUSE) and it's not an official Rancher (SUSE) project.
|
||||
|
||||
??? Tip "View a quick demo"
|
||||
<asciinema-player src="/static/asciicast/20210917_k3d_v5.0.0_01.cast" cols=200 rows=32></asciinema-player>
|
||||
|
||||
## Learning
|
||||
|
||||
!!! Tip "k3d demo repository: [iwilltry42/k3d-demo](https://github.com/iwilltry42/k3d-demo)"
|
||||
Featured use-cases include:
|
||||
|
||||
- **hot-reloading** of code when developing on k3d (Python Flask App)
|
||||
- build-deploy-test cycle using **Tilt**
|
||||
- full cluster lifecycle for simple and **multi-server** clusters
|
||||
- Proof of Concept of using k3d as a service in **Drone CI**
|
||||
|
||||
- [Rancher Meetup - May 2020 - Simplifying Your Cloud-Native Development Workflow With K3s, K3c and K3d (YouTube)](https://www.youtube.com/watch?v=hMr3prm9gDM)
|
||||
|
||||
## Requirements
|
||||
|
||||
- [**docker**](https://docs.docker.com/install/) to be able to use k3d at all
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) to interact with the Kubernetes cluster
|
||||
|
||||
## Releases
|
||||
|
||||
| Platform | Stage | Version | Release Date | |
|
||||
|-----------------|--------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|---|
|
||||
| [**GitHub Releases**](https://github.com/rancher/k3d/releases) | stable | [](https://github.com/rancher/k3d/releases/latest) | [](https://github.com/rancher/k3d/releases/latest) | |
|
||||
| [**GitHub Releases**](https://github.com/rancher/k3d/releases) | latest | [](https://github.com/rancher/k3d/releases) | [](https://github.com/rancher/k3d/releases) | |
|
||||
| [**Homebrew**](https://formulae.brew.sh/formula/k3d) | - | [](https://formulae.brew.sh/formula/k3d) | - | |
|
||||
| [**Chocolatey**](https://chocolatey.org/packages/k3d/)| stable | [](https://chocolatey.org/packages/k3d/) | - | |
|
||||
|
||||
## Installation
|
||||
|
||||
You have several options there:
|
||||
|
||||
### [:fontawesome-regular-file-code: Install Script](https://raw.githubusercontent.com/rancher/k3d/main/install.sh)
|
||||
|
||||
#### Install current latest release
|
||||
|
||||
- wget: `#!bash wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash`
|
||||
- curl: `#!bash curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash`
|
||||
|
||||
#### Install specific release
|
||||
|
||||
Use the install script to grab a specific release (via `TAG` environment variable):
|
||||
|
||||
- wget: `#!bash wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v5.0.0 bash`
|
||||
- curl: `#!bash curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v5.0.0 bash`
|
||||
|
||||
### Other Installers
|
||||
|
||||
??? Tip "Other Installation Methods"
|
||||
|
||||
- [:fontawesome-solid-beer: Homebrew (MacOS/Linux)](https://brew.sh): `#!bash brew install k3d`
|
||||
|
||||
*Note*: The formula can be found in [homebrew/homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/k3d.rb) and is mirrored to [homebrew/linuxbrew-core](https://github.com/Homebrew/linuxbrew-core/blob/master/Formula/k3d.rb)
|
||||
|
||||
- [:material-arch: AUR (Arch Linux User Repository)](https://aur.archlinux.org/): `#!bash yay -S rancher-k3d-bin`
|
||||
|
||||
Package [rancher-k3d-bin](https://aur.archlinux.org/packages/rancher-k3d-bin/)
|
||||
|
||||
- [:material-github: Download GitHub Release](https://github.com/rancher/k3d/releases)
|
||||
|
||||
Grab a release binary from the [release tab](https://github.com/rancher/k3d/releases) and install it yourself
|
||||
|
||||
- [:material-microsoft-windows: Chocolatey (Windows)](https://chocolatey.org/): `choco install k3d`
|
||||
|
||||
*Note*: package source can be found in [erwinkersten/chocolatey-packages](https://github.com/erwinkersten/chocolatey-packages/tree/master/automatic/k3d)
|
||||
|
||||
- [arkade](https://github.com/alexellis/arkade): `arkade get k3d`
|
||||
|
||||
- [asdf](https://asdf-vm.com): `asdf plugin-add k3d && asdf install k3d latest`
|
||||
|
||||
*Note*: `asdf plugin-add k3d`, then `asdf install k3d <tag>` with `<tag> = latest` or `5.x.x` for a specific version (maintained by [spencergilbert/asdf-k3d](https://github.com/spencergilbert/asdf-k3d))
|
||||
|
||||
- Others
|
||||
- install via go: `#!bash go install github.com/rancher/k3d@latest` (**Note**: this will give you unreleased/bleeding-edge changes)
|
||||
|
||||
## Quick Start
|
||||
|
||||
Create a cluster named `mycluster` with just a single server node:
|
||||
|
||||
```bash
|
||||
k3d cluster create mycluster
|
||||
```
|
||||
|
||||
Use the new cluster with [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/), e.g.:
|
||||
|
||||
```bash
|
||||
kubectl get nodes
|
||||
```
|
||||
|
||||
??? Note "Getting the cluster's kubeconfig (included in `k3d cluster create`)"
|
||||
Get the new cluster's connection details merged into your default kubeconfig (usually specified using the `KUBECONFIG` environment variable or the default path `#!bash $HOME/.kube/config`) and directly switch to the new context:
|
||||
|
||||
```bash
|
||||
k3d kubeconfig merge mycluster --kubeconfig-switch-context
|
||||
```
|
||||
|
||||
## Related Projects
|
||||
|
||||
- [vscode-k3d](https://github.com/inercia/vscode-k3d/): VSCode Extension to handle k3d clusters from within VSCode
|
||||
- [k3x](https://github.com/inercia/k3x): a graphics interface (for Linux) to k3d.
|
||||
- [AbsaOSS/k3d-action](https://github.com/AbsaOSS/k3d-action): fully customizable GitHub Action to run lightweight Kubernetes clusters.
|
||||
- [AutoK3s](https://github.com/cnrancher/autok3s): a lightweight tool to help run K3s everywhere including k3d provider.
|
||||
- [nolar/setup-k3d-k3s](https://github.com/nolar/setup-k3d-k3s): setup K3d/K3s for GitHub Actions.
|
244
docs/registries.md
Normal file
244
docs/registries.md
Normal file
@ -0,0 +1,244 @@
|
||||
# Using registries with k3d
|
||||
|
||||
## <a name="registries-file"></a>Registries configuration file
|
||||
|
||||
You can add registries by specifying them in a `registries.yaml` in your `$HOME/.k3d` directory.
|
||||
This file will be loaded automatically by k3d if present and will be shared between all your
|
||||
k3d clusters, but you can also use a specific file for a new cluster with the
|
||||
`--registries-file` argument.
|
||||
|
||||
This file is a regular [k3s registries configuration file](https://rancher.com/docs/k3s/latest/en/installation/airgap/#create-registry-yaml),
|
||||
and looks like this:
|
||||
|
||||
```yaml
|
||||
mirrors:
|
||||
"my.company.registry:5000":
|
||||
endpoint:
|
||||
- http://my.company.registry:5000
|
||||
```
|
||||
|
||||
In this example, an image with a name like `my.company.registry:5000/nginx:latest` would be
|
||||
_pulled_ from the registry running at `http://my.company.registry:5000`.
|
||||
|
||||
Note well there is an important limitation: **this configuration file will only work with
|
||||
k3s >= v0.10.0**. It will fail silently with previous versions of k3s, but you find in the
|
||||
[section below](#k3s-old) an alternative solution.
|
||||
|
||||
This file can also be used for providing additional information necessary for accessing
|
||||
some registries, like [authentication](#auth) and [certificates](#certs).
|
||||
|
||||
### <a name="auth"></a>Authenticated registries
|
||||
|
||||
When using authenticated registries, we can add the _username_ and _password_ in a
|
||||
`configs` section in the `registries.yaml`, like this:
|
||||
|
||||
```yaml
|
||||
mirrors:
|
||||
my.company.registry:
|
||||
endpoint:
|
||||
- http://my.company.registry
|
||||
|
||||
configs:
|
||||
my.company.registry:
|
||||
auth:
|
||||
username: aladin
|
||||
password: abracadabra
|
||||
```
|
||||
|
||||
### <a name="certs"></a>Secure registries
|
||||
|
||||
When using secure registries, the [`registries.yaml` file](#registries-file) must include information
|
||||
about the certificates. For example, if you want to use images from the secure registry
|
||||
running at `https://my.company.registry`, you must first download a CA file valid for that server
|
||||
and store it in some well-known directory like `${HOME}/.k3d/my-company-root.pem`.
|
||||
|
||||
Then you have to mount the CA file in some directory in the nodes in the cluster and
|
||||
include that mounted file in a `configs` section in the [`registries.yaml` file](#registries-file).
|
||||
For example, if we mount the CA file in `/etc/ssl/certs/my-company-root.pem`, the `registries.yaml`
|
||||
will look like:
|
||||
|
||||
```yaml
|
||||
mirrors:
|
||||
my.company.registry:
|
||||
endpoint:
|
||||
- https://my.company.registry
|
||||
|
||||
configs:
|
||||
my.company.registry:
|
||||
tls:
|
||||
# we will mount "my-company-root.pem" in the /etc/ssl/certs/ directory.
|
||||
ca_file: "/etc/ssl/certs/my-company-root.pem"
|
||||
```
|
||||
|
||||
Finally, we can create the cluster, mounting the CA file in the path we
|
||||
specified in `ca_file`:
|
||||
|
||||
```shell script
|
||||
k3d create --volume ${HOME}/.k3d/my-company-root.pem:/etc/ssl/certs/my-company-root.pem ...
|
||||
```
|
||||
|
||||
## Using a local registry
|
||||
|
||||
### Using the k3d registry
|
||||
|
||||
k3d can manage a local registry that you can use for pushing your images to, and your k3d nodes
|
||||
will be able to use those images automatically. k3d will create the registry for you and connect
|
||||
it to your k3d cluster. It is important to note that this registry will be shared between all
|
||||
your k3d clusters, and it will be released when the last of the k3d clusters that was using it
|
||||
is deleted.
|
||||
|
||||
In order to enable the k3d registry when creating a new cluster, you must run k3d with the
|
||||
`--enable-registry` argument
|
||||
|
||||
```shell script
|
||||
k3d create --enable-registry ...
|
||||
```
|
||||
|
||||
Then you must make it accessible as described in [the next section](#etc-hosts). And
|
||||
then you should [check your local registry](#testing).
|
||||
|
||||
### Using your own local registry
|
||||
|
||||
If you don't want k3d to manage your registry, you can start it with some `docker` commands, like:
|
||||
|
||||
```shell script
|
||||
docker volume create local_registry
|
||||
docker container run -d --name registry.localhost -v local_registry:/var/lib/registry --restart always -p 5000:5000 registry:2
|
||||
```
|
||||
|
||||
These commands will start your registry in `registry.localhost:5000`. In order to push to this registry, you will
|
||||
need to make it accessible as we described in [the next section ](#etc-hosts). Once your
|
||||
registry is up and running, we will need to add it to your [`registries.yaml` configuration file](#registries-file).
|
||||
Finally, you must connect the registry network to the k3d cluster network:
|
||||
`docker network connect k3d-k3s-default registry.localhost`. And then you can
|
||||
[check your local registry](#testing).
|
||||
|
||||
### <a name="etc-hosts"></a>Pushing to your local registry address
|
||||
|
||||
The registry will be located, by default, at `registry.localhost:5000` (customizable with the `--registry-name`
|
||||
and `--registry-port` parameters). All the nodes in your k3d cluster can resolve this hostname (thanks to the
|
||||
DNS server provided by the Docker daemon) but, in order to be able to push to this registry, this hostname
|
||||
but also be resolved from your host.
|
||||
|
||||
Luckily (for Linux users), [NSS-myhostname](http://man7.org/linux/man-pages/man8/nss-myhostname.8.html) ships with many Linux distributions
|
||||
and should resolve `*.localhost` automatically to `127.0.0.1`.
|
||||
Otherwise, it's installable using `sudo apt install libnss-myhostname`.
|
||||
|
||||
If it's not the case, you can add an entry in your `/etc/hosts` file like this:
|
||||
|
||||
```shell script
|
||||
127.0.0.1 registry.localhost
|
||||
```
|
||||
|
||||
Once again, this will only work with k3s >= v0.10.0 (see the [section below](#k3s-old)
|
||||
when using k3s <= v0.9.1)
|
||||
|
||||
### <a name="registry-volume"></a>Local registry volume
|
||||
|
||||
The local k3d registry uses a volume for storying the images. This volume will be destroyed
|
||||
when the k3d registry is released. In order to persist this volume and make these images survive
|
||||
the removal of the registry, you can specify a volume with the `--registry-volume` and use the
|
||||
`--keep-registry-volume` flag when deleting the cluster. This will create a volume with the given
|
||||
name the first time the registry is used, while successive invocations will just mount this
|
||||
existing volume in the k3d registry container.
|
||||
|
||||
### <a name="docker-hub-cache"></a>Docker Hub Cache
|
||||
|
||||
The local k3d registry can also be used for caching images from the Docker Hub. You can start the
|
||||
registry as a pull-through cache when the cluster is created with `--enable-registry-cache`. Used
|
||||
in conjuction with `--registry-volume`/`--keep-registry-volume` can speed up all the downloads
|
||||
from the Hub by keeping a persistent cache of images in your local machine.
|
||||
|
||||
**Note**: This disables the registry for pushing local images to it! ([Comment](https://github.com/rancher/k3d/pull/207#issuecomment-617318637))
|
||||
|
||||
## <a name="testing"></a>Testing your registry
|
||||
|
||||
You should test that you can
|
||||
|
||||
* push to your registry from your local development machine.
|
||||
* use images from that registry in `Deployments` in your k3d cluster.
|
||||
|
||||
We will verify these two things for a local registry (located at `registry.localhost:5000`) running
|
||||
in your development machine. Things would be basically the same for checking an external
|
||||
registry, but some additional configuration could be necessary in your local machine when
|
||||
using an authenticated or secure registry (please refer to Docker's documentation for this).
|
||||
|
||||
Firstly, we can download some image (like `nginx`) and push it to our local registry with:
|
||||
|
||||
```shell script
|
||||
docker pull nginx:latest
|
||||
docker tag nginx:latest registry.localhost:5000/nginx:latest
|
||||
docker push registry.localhost:5000/nginx:latest
|
||||
```
|
||||
|
||||
Then we can deploy a pod referencing this image to your cluster:
|
||||
|
||||
```shell script
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-test-registry
|
||||
labels:
|
||||
app: nginx-test-registry
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx-test-registry
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx-test-registry
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx-test-registry
|
||||
image: registry.localhost:5000/nginx:latest
|
||||
ports:
|
||||
- containerPort: 80
|
||||
EOF
|
||||
```
|
||||
|
||||
Then you should check that the pod is running with `kubectl get pods -l "app=nginx-test-registry"`.
|
||||
|
||||
## <a name="k3s-old"></a>Configuring registries for k3s <= v0.9.1
|
||||
|
||||
k3s servers below v0.9.1 do not recognize the `registries.yaml` file as we described in
|
||||
the [previous section](#registries-file), so you will need to embed the contents of that
|
||||
file in a `containerd` configuration file. You will have to create your own `containerd`
|
||||
configuration file at some well-known path like `${HOME}/.k3d/config.toml.tmpl`, like this:
|
||||
|
||||
<pre>
|
||||
# Original section: no changes
|
||||
[plugins.opt]
|
||||
path = "{{ .NodeConfig.Containerd.Opt }}"
|
||||
[plugins.cri]
|
||||
stream_server_address = "{{ .NodeConfig.AgentConfig.NodeName }}"
|
||||
stream_server_port = "10010"
|
||||
{{- if .IsRunningInUserNS }}
|
||||
disable_cgroup = true
|
||||
disable_apparmor = true
|
||||
restrict_oom_score_adj = true
|
||||
{{ end -}}
|
||||
{{- if .NodeConfig.AgentConfig.PauseImage }}
|
||||
sandbox_image = "{{ .NodeConfig.AgentConfig.PauseImage }}"
|
||||
{{ end -}}
|
||||
{{- if not .NodeConfig.NoFlannel }}
|
||||
[plugins.cri.cni]
|
||||
bin_dir = "{{ .NodeConfig.AgentConfig.CNIBinDir }}"
|
||||
conf_dir = "{{ .NodeConfig.AgentConfig.CNIConfDir }}"
|
||||
{{ end -}}
|
||||
|
||||
# Added section: additional registries and the endpoints
|
||||
[plugins.cri.registry.mirrors]
|
||||
[plugins.cri.registry.mirrors."<b>registry.localhost:5000</b>"]
|
||||
endpoint = ["http://<b>registry.localhost:5000</b>"]
|
||||
</pre>
|
||||
|
||||
and then mount it at `/var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl` (where
|
||||
the `containerd` in your k3d nodes will load it) when creating the k3d cluster:
|
||||
|
||||
```bash
|
||||
k3d create \
|
||||
--volume ${HOME}/.k3d/config.toml.tmpl:/var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl
|
||||
```
|
@ -1,8 +0,0 @@
|
||||
mkdocs==1.2.2
|
||||
mkdocs-material==7.2.6
|
||||
pymdown-extensions==8.2
|
||||
mkdocs-git-revision-date-localized-plugin==0.9.3
|
||||
mkdocs-awesome-pages-plugin==2.5.0
|
||||
mdx_truly_sane_lists==1.2 # https://github.com/radude/mdx_truly_sane_lists
|
||||
mkdocs-include-markdown-plugin==3.2.2 # https://github.com/mondeja/mkdocs-include-markdown-plugin
|
||||
mike==1.1.0 # versioned docs: https://github.com/jimporter/mike
|
156
docs/static/asciicast/20200515_k3d.01.cast
vendored
156
docs/static/asciicast/20200515_k3d.01.cast
vendored
@ -1,156 +0,0 @@
|
||||
{"version": 2, "width": 200, "height": 32, "timestamp": 1589533999, "env": {"SHELL": "/bin/zsh", "TERM": "xterm-256color"}}
|
||||
[0.006629, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"]
|
||||
[0.007092, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[34mThisCouldBeYou\u001b[0m$ \u001b[K\u001b[?2004h"]
|
||||
[1.440839, "o", "k"]
|
||||
[1.528862, "o", "\bk3"]
|
||||
[1.693385, "o", "d"]
|
||||
[1.84465, "o", " "]
|
||||
[1.924598, "o", "v"]
|
||||
[2.022618, "o", "e"]
|
||||
[2.173655, "o", "r"]
|
||||
[2.342623, "o", "s"]
|
||||
[2.353239, "o", "i"]
|
||||
[2.56165, "o", "o"]
|
||||
[2.683536, "o", "n"]
|
||||
[2.870895, "o", "\u001b[?2004l\r\r\n"]
|
||||
[2.896406, "o", "k3d version v3.0.0-beta.1\r\nk3s version v1.17.5-k3s1 (default)\r\n"]
|
||||
[2.898123, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"]
|
||||
[2.898459, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[34mThisCouldBeYou\u001b[0m$ \u001b[K\u001b[?2004h"]
|
||||
[4.828641, "o", "k"]
|
||||
[4.992663, "o", "\bk3"]
|
||||
[5.256788, "o", "d"]
|
||||
[5.872924, "o", " "]
|
||||
[5.961906, "o", "c"]
|
||||
[6.180756, "o", "r"]
|
||||
[6.266619, "o", "e"]
|
||||
[6.290691, "o", "a"]
|
||||
[6.378818, "o", "t"]
|
||||
[6.464656, "o", "e"]
|
||||
[6.631722, "o", " "]
|
||||
[6.794721, "o", "c"]
|
||||
[6.873717, "o", "l"]
|
||||
[7.082768, "o", "u"]
|
||||
[7.159752, "o", "s"]
|
||||
[7.269729, "o", "t"]
|
||||
[7.388766, "o", "e"]
|
||||
[7.456672, "o", "r"]
|
||||
[7.621959, "o", " "]
|
||||
[7.742623, "o", "d"]
|
||||
[7.819679, "o", "e"]
|
||||
[7.940748, "o", "m"]
|
||||
[8.083705, "o", "o"]
|
||||
[8.281751, "o", " "]
|
||||
[8.765723, "o", "-"]
|
||||
[8.91971, "o", "-"]
|
||||
[9.095513, "o", "m"]
|
||||
[9.172699, "o", "a"]
|
||||
[9.249893, "o", "s"]
|
||||
[9.403748, "o", "t"]
|
||||
[9.500553, "o", "e"]
|
||||
[9.566754, "o", "r"]
|
||||
[9.744773, "o", "s"]
|
||||
[9.931786, "o", " "]
|
||||
[10.118618, "o", "3"]
|
||||
[10.282676, "o", " "]
|
||||
[10.65772, "o", "-"]
|
||||
[10.811745, "o", "-"]
|
||||
[11.017705, "o", "w"]
|
||||
[11.215672, "o", "o"]
|
||||
[11.306527, "o", "r"]
|
||||
[11.438747, "o", "k"]
|
||||
[11.493668, "o", "e"]
|
||||
[11.570851, "o", "r"]
|
||||
[11.779695, "o", "s"]
|
||||
[11.977563, "o", " "]
|
||||
[12.10969, "o", "3"]
|
||||
[12.571635, "o", "\u001b[?2004l\r\r\n"]
|
||||
[12.758448, "o", "\u001b[36mINFO\u001b[0m[0000] Created network 'k3d-demo' \r\n"]
|
||||
[12.762761, "o", "\u001b[36mINFO\u001b[0m[0000] Created volume 'k3d-demo-images' \r\n"]
|
||||
[12.762805, "o", "\u001b[36mINFO\u001b[0m[0000] Creating initializing master node \r\n\u001b[36mINFO\u001b[0m[0000] Creating node 'k3d-demo-master-0' \r\n"]
|
||||
[23.291657, "o", "\u001b[36mINFO\u001b[0m[0010] Creating node 'k3d-demo-master-1' \r\n"]
|
||||
[24.710807, "o", "\u001b[36mINFO\u001b[0m[0012] Creating node 'k3d-demo-master-2' \r\n"]
|
||||
[25.082483, "o", "\u001b[36mINFO\u001b[0m[0012] Creating node 'k3d-demo-worker-0' \r\n"]
|
||||
[25.583579, "o", "\u001b[36mINFO\u001b[0m[0013] Creating node 'k3d-demo-worker-1' \r\n"]
|
||||
[25.995178, "o", "\u001b[36mINFO\u001b[0m[0013] Creating node 'k3d-demo-worker-2' \r\n"]
|
||||
[26.407777, "o", "\u001b[36mINFO\u001b[0m[0013] Creating LoadBalancer 'k3d-demo-masterlb' \r\n"]
|
||||
[26.864898, "o", "\u001b[36mINFO\u001b[0m[0014] Cluster 'demo' created successfully! \r\n\u001b[36mINFO\u001b[0m[0014] You can now use it like this: \r\nexport KUBECONFIG=$(k3d get kubeconfig demo)\r\nkubectl cluster-info\r\n"]
|
||||
[26.865832, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"]
|
||||
[26.865936, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[34mThisCouldBeYou\u001b[0m$ \u001b[K\u001b[?2004h"]
|
||||
[28.485689, "o", "k"]
|
||||
[28.574492, "o", "\bk3"]
|
||||
[28.837499, "o", "d"]
|
||||
[29.035592, "o", " "]
|
||||
[29.145724, "o", "g"]
|
||||
[29.244609, "o", "e"]
|
||||
[29.376873, "o", "t"]
|
||||
[29.530563, "o", " "]
|
||||
[29.696594, "o", "c"]
|
||||
[29.826811, "o", "l"]
|
||||
[30.047654, "o", "u"]
|
||||
[30.190485, "o", "s"]
|
||||
[30.344734, "o", "t"]
|
||||
[30.499719, "o", "e"]
|
||||
[30.576583, "o", "r"]
|
||||
[30.730679, "o", "s"]
|
||||
[30.917549, "o", "\u001b[?2004l\r\r\n"]
|
||||
[30.950674, "o", "NAME MASTERS WORKERS\r\ndemo 3 3\r\n"]
|
||||
[30.951974, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"]
|
||||
[30.952061, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[34mThisCouldBeYou\u001b[0m$ \u001b[K\u001b[?2004h"]
|
||||
[32.588716, "o", "k"]
|
||||
[32.688532, "o", "\bk3"]
|
||||
[32.875419, "o", "d"]
|
||||
[33.028534, "o", " "]
|
||||
[33.124585, "o", "g"]
|
||||
[33.215679, "o", "e"]
|
||||
[33.326491, "o", "t"]
|
||||
[33.402563, "o", " "]
|
||||
[33.545576, "o", "k"]
|
||||
[33.733553, "o", "u"]
|
||||
[33.876741, "o", "b"]
|
||||
[33.953568, "o", "e"]
|
||||
[34.048717, "o", "c"]
|
||||
[34.162605, "o", "o"]
|
||||
[34.282878, "o", "n"]
|
||||
[34.381804, "o", "f"]
|
||||
[34.514685, "o", "i"]
|
||||
[34.59177, "o", "g"]
|
||||
[34.799961, "o", " "]
|
||||
[34.932516, "o", "d"]
|
||||
[35.02066, "o", "e"]
|
||||
[35.174697, "o", "m"]
|
||||
[35.306576, "o", "o"]
|
||||
[35.515706, "o", " "]
|
||||
[35.965907, "o", "-"]
|
||||
[36.109917, "o", "-"]
|
||||
[36.227722, "o", "s"]
|
||||
[36.416702, "o", "w"]
|
||||
[36.504783, "o", "i"]
|
||||
[36.669967, "o", "t"]
|
||||
[36.911819, "o", "c"]
|
||||
[37.032831, "o", "h"]
|
||||
[37.274843, "o", "\u001b[?2004l\r\r\n"]
|
||||
[37.371773, "o", "/home/ThisCouldBeYou/.kube/merged_config.yaml\r\n"]
|
||||
[37.372336, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"]
|
||||
[37.372408, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[34mThisCouldBeYou\u001b[0m$ \u001b[K\u001b[?2004h"]
|
||||
[42.510815, "o", "k"]
|
||||
[42.82706, "o", "\bku"]
|
||||
[42.972866, "o", "b"]
|
||||
[43.070857, "o", "e"]
|
||||
[43.17076, "o", "c"]
|
||||
[43.368711, "o", "t"]
|
||||
[43.467697, "o", "l"]
|
||||
[43.643995, "o", " "]
|
||||
[43.983766, "o", "g"]
|
||||
[44.094802, "o", "e"]
|
||||
[44.237659, "o", "t"]
|
||||
[44.391841, "o", " "]
|
||||
[44.578931, "o", "n"]
|
||||
[44.674603, "o", "o"]
|
||||
[44.754837, "o", "d"]
|
||||
[44.83183, "o", "e"]
|
||||
[45.029756, "o", "s"]
|
||||
[45.194725, "o", "\u001b[?2004l\r\r\n"]
|
||||
[45.286817, "o", "NAME STATUS ROLES AGE VERSION\r\nk3d-demo-master-2 Ready master 14s v1.17.5+k3s1\r\nk3d-demo-worker-1"]
|
||||
[45.287003, "o", " Ready <none> 18s v1.17.5+k3s1\r\nk3d-demo-master-0 Ready master 23s v1.17.5+k3s1\r\nk3d-demo-worker-2 Ready <none> 17s v1.17.5+k3s1\r\nk3d-demo-worker-0 Ready <none> 18s v1.17.5+k3s1\r\nk3d-demo-master-1 Ready master 15s v1.17.5+k3s1\r\n"]
|
||||
[45.288129, "o", "\u001b[K\u001b[?2004h"]
|
||||
[47.690299, "o", "\u001b[?2004l\r\r\n"]
|
215
docs/static/asciicast/20200715_k3d.01.cast
vendored
215
docs/static/asciicast/20200715_k3d.01.cast
vendored
@ -1,215 +0,0 @@
|
||||
{"version":2,"width":213,"height":47,"timestamp":1594792376,"theme":{},"env":{"SHELL":"/bin/zsh","TERM":"xterm-256color"}}
|
||||
[2.221,"o","\u001b[H\u001b[2J\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[32mThisCouldBeYou\u001b[0m$ \u001b[K"]
|
||||
[2.881,"o","k"]
|
||||
[3.002,"o","\u0008k3"]
|
||||
[3.187,"o","d"]
|
||||
[3.32,"o"," "]
|
||||
[3.42,"o","v"]
|
||||
[3.53,"o","e"]
|
||||
[3.662,"o","r"]
|
||||
[3.827,"o","s"]
|
||||
[3.913,"o","i"]
|
||||
[4.113,"o","o"]
|
||||
[4.245,"o","n"]
|
||||
[4.443,"o","\u001b[?2004l\r\r\n"]
|
||||
[4.471,"o","k3d version v3.0.0\r\nk3s version v1.18.6-k3s1 (default)\r\n"]
|
||||
[4.473,"o","\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[32mThisCouldBeYou\u001b[0m$ \u001b[K\u001b[?2004h"]
|
||||
[5.719,"o","k"]
|
||||
[5.829,"o","\u0008k3"]
|
||||
[6.038,"o","d"]
|
||||
[6.235,"o"," "]
|
||||
[6.75,"o","c"]
|
||||
[6.852,"o","l"]
|
||||
[7.094,"o","u"]
|
||||
[7.193,"o","s"]
|
||||
[7.534,"o","t"]
|
||||
[7.71,"o","e"]
|
||||
[7.831,"o","r"]
|
||||
[8.029,"o"," "]
|
||||
[8.268,"o","l"]
|
||||
[8.491,"o","i"]
|
||||
[8.634,"o","s"]
|
||||
[8.796,"o","t"]
|
||||
[8.943,"o","\u001b[?2004l\r\r\n"]
|
||||
[8.971,"o","NAME SERVERS AGENTS\r\n"]
|
||||
[8.972,"o","\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[32mThisCouldBeYou\u001b[0m$ \u001b[K\u001b[?2004h"]
|
||||
[9.756,"o","k"]
|
||||
[9.854,"o","\u0008k3"]
|
||||
[10.05,"o","d"]
|
||||
[10.448,"o"," "]
|
||||
[10.811,"o","c"]
|
||||
[10.91,"o","l"]
|
||||
[11.119,"o","u"]
|
||||
[11.174,"o","s"]
|
||||
[11.284,"o","t"]
|
||||
[11.416,"o","e"]
|
||||
[11.515,"o","r"]
|
||||
[11.648,"o"," "]
|
||||
[11.746,"o","c"]
|
||||
[11.988,"o","r"]
|
||||
[12.054,"o","e"]
|
||||
[12.087,"o","a"]
|
||||
[12.208,"o","t"]
|
||||
[12.285,"o","e"]
|
||||
[13.055,"o"," "]
|
||||
[13.77,"o","d"]
|
||||
[13.858,"o","e"]
|
||||
[13.99,"o","m"]
|
||||
[14.133,"o","o"]
|
||||
[14.321,"o"," "]
|
||||
[14.629,"o","-"]
|
||||
[14.801,"o","-"]
|
||||
[14.867,"o","s"]
|
||||
[15.065,"o","e"]
|
||||
[15.111,"o","r"]
|
||||
[15.343,"o","v"]
|
||||
[15.409,"o","e"]
|
||||
[15.519,"o","r"]
|
||||
[15.706,"o","s"]
|
||||
[15.905,"o"," "]
|
||||
[16.74,"o","3"]
|
||||
[16.914,"o"," "]
|
||||
[17.137,"o","-"]
|
||||
[17.291,"o","-"]
|
||||
[17.44,"o","a"]
|
||||
[17.586,"o","g"]
|
||||
[17.687,"o","e"]
|
||||
[17.763,"o","n"]
|
||||
[17.84,"o","t"]
|
||||
[18.017,"o","s"]
|
||||
[18.259,"o"," "]
|
||||
[18.379,"o","3"]
|
||||
[19.137,"o","\u001b[?2004l\r\r\n"]
|
||||
[19.222,"o","\u001b[36mINFO\u001b[0m[0000] Created network 'k3d-demo' \r\n"]
|
||||
[19.225,"o","\u001b[36mINFO\u001b[0m[0000] Created volume 'k3d-demo-images' \r\n"]
|
||||
[19.225,"o","\u001b[36mINFO\u001b[0m[0000] Creating initializing server node \r\n\u001b[36mINFO\u001b[0m[0000] Creating node 'k3d-demo-server-0' \r\n"]
|
||||
[30.266,"o","\u001b[36mINFO\u001b[0m[0011] Creating node 'k3d-demo-server-1' \r\n"]
|
||||
[31.634,"o","\u001b[36mINFO\u001b[0m[0012] Creating node 'k3d-demo-server-2' \r\n"]
|
||||
[32.125,"o","\u001b[36mINFO\u001b[0m[0012] Creating node 'k3d-demo-agent-0' \r\n"]
|
||||
[32.519,"o","\u001b[36mINFO\u001b[0m[0013] Creating node 'k3d-demo-agent-1' \r\n"]
|
||||
[33.096,"o","\u001b[36mINFO\u001b[0m[0013] Creating node 'k3d-demo-agent-2' \r\n"]
|
||||
[33.577,"o","\u001b[36mINFO\u001b[0m[0014] Creating LoadBalancer 'k3d-demo-serverlb' \r\n"]
|
||||
[38.248,"o","\u001b[36mINFO\u001b[0m[0019] Cluster 'demo' created successfully! \r\n"]
|
||||
[38.33,"o","\u001b[36mINFO\u001b[0m[0019] You can now use it like this: \r\nkubectl cluster-info\r\n"]
|
||||
[38.335,"o","\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"]
|
||||
[38.336,"o","\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[32mThisCouldBeYou\u001b[0m$ \u001b[K\u001b[?2004h"]
|
||||
[40.212,"o","k"]
|
||||
[40.465,"o","\u0008ku"]
|
||||
[40.63,"o","b"]
|
||||
[40.773,"o","e"]
|
||||
[40.916,"o","c"]
|
||||
[41.169,"o","t"]
|
||||
[41.302,"o","l"]
|
||||
[41.53,"o"," "]
|
||||
[41.686,"o","c"]
|
||||
[41.774,"o","o"]
|
||||
[41.928,"o","n"]
|
||||
[42.093,"o","f"]
|
||||
[42.213,"o","i"]
|
||||
[42.401,"o","g"]
|
||||
[42.565,"o"," "]
|
||||
[42.686,"o","v"]
|
||||
[42.807,"o","i"]
|
||||
[42.895,"o","e"]
|
||||
[43.082,"o","w"]
|
||||
[43.27,"o","\u001b[?2004l\r\r\n"]
|
||||
[43.304,"o","apiVersion: v1\r\nclusters:\r\n- cluster:\r\n certificate-authority-data: DATA+OMITTED\r\n server: https://0.0.0.0:38365\r\n name: k3d-demo\r\ncontexts:\r\n- context:\r\n cluster: k3d-demo\r\n user: admin@k3d-demo\r\n name: k3d-demo\r\ncurrent-context: k3d-demo\r\nkind: Config\r\npreferences: {}\r\nusers:\r\n- name: admin@k3d-demo\r\n user:\r\n password: 1c22f8175521452403719784fa0b124f\r\n username: admin\r\n"]
|
||||
[43.305,"o","\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"]
|
||||
[43.305,"o","\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[32mThisCouldBeYou\u001b[0m$ \u001b[K\u001b[?2004h"]
|
||||
[45.754,"o","k"]
|
||||
[45.843,"o","\u0008k3"]
|
||||
[45.987,"o","d"]
|
||||
[46.075,"o"," "]
|
||||
[46.24,"o","c"]
|
||||
[46.36,"o","l"]
|
||||
[46.569,"o","u"]
|
||||
[46.658,"o","s"]
|
||||
[46.768,"o","t"]
|
||||
[46.91,"o","e"]
|
||||
[46.977,"o","r"]
|
||||
[47.042,"o"," "]
|
||||
[47.252,"o","l"]
|
||||
[47.472,"o","i"]
|
||||
[47.56,"o","s"]
|
||||
[47.681,"o","t"]
|
||||
[47.846,"o","\u001b[?2004l\r\r\n"]
|
||||
[47.88,"o","NAME SERVERS AGENTS\r\ndemo 3 3\r\n"]
|
||||
[47.881,"o","\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"]
|
||||
[47.882,"o","\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[32mThisCouldBeYou\u001b[0m$ \u001b[K\u001b[?2004h"]
|
||||
[48.517,"o","k"]
|
||||
[48.616,"o","\u0008k3"]
|
||||
[48.77,"o","d"]
|
||||
[48.968,"o"," "]
|
||||
[50.133,"o","n"]
|
||||
[50.276,"o","o"]
|
||||
[50.33,"o","d"]
|
||||
[50.386,"o","e"]
|
||||
[50.462,"o"," "]
|
||||
[50.605,"o","l"]
|
||||
[50.792,"o","i"]
|
||||
[50.881,"o","s"]
|
||||
[51.046,"o","t"]
|
||||
[51.255,"o","\u001b[?2004l\r\r\n"]
|
||||
[51.266,"o","NAME ROLE CLUSTER\r\nk3d-demo-agent-0 agent "]
|
||||
[51.266,"o","demo\r\nk3d-demo-agent-1 agent demo\r\nk3d-demo-agent-2 agent demo\r\nk3d-demo-server-0 server demo\r\nk3d-demo-server-1 server demo\r\nk3d-demo-server-2 server demo\r\nk3d-demo-serverlb loadbalancer demo\r\n"]
|
||||
[51.267,"o","\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"]
|
||||
[51.267,"o","\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[32mThisCouldBeYou\u001b[0m$ \u001b[K\u001b[?2004h"]
|
||||
[58.525,"o","k"]
|
||||
[58.822,"o","\u0008ku"]
|
||||
[58.999,"o","b"]
|
||||
[59.109,"o","e"]
|
||||
[59.217,"o","c"]
|
||||
[59.48,"o","t"]
|
||||
[59.582,"o","l"]
|
||||
[59.791,"o"," "]
|
||||
[59.876,"o","g"]
|
||||
[59.978,"o","e"]
|
||||
[60.109,"o","t"]
|
||||
[60.285,"o"," "]
|
||||
[60.495,"o","n"]
|
||||
[60.626,"o","o"]
|
||||
[60.725,"o","d"]
|
||||
[60.791,"o","e"]
|
||||
[61,"o","s"]
|
||||
[61.176,"o","\u001b[?2004l\r\r\n"]
|
||||
[61.334,"o","NAME STATUS ROLES AGE VERSION\r\nk3d-demo-server-2 Ready master 22s v1.18.4+k3s1\r\nk3d-demo-agent-2 Ready \u003cnone\u003e 26s v1.18.4+k3s1\r\nk3d-demo-agent-0 Ready \u003cnone\u003e "]
|
||||
[61.334,"o","27s v1.18.4+k3s1\r\nk3d-demo-agent-1 Ready \u003cnone\u003e 27s v1.18.4+k3s1\r\nk3d-demo-server-1 Ready master 19s v1.18.4+k3s1\r\nk3d-demo-server-0 Ready master 32s v1.18.4+k3s1\r\n"]
|
||||
[61.336,"o","\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[32mThisCouldBeYou\u001b[0m$ \u001b[K\u001b[?2004h"]
|
||||
[63.901,"o","k"]
|
||||
[64.08,"o","\u0008k3"]
|
||||
[64.297,"o","d"]
|
||||
[64.464,"o"," "]
|
||||
[64.674,"o","c"]
|
||||
[64.84,"o","l"]
|
||||
[65.126,"o","u"]
|
||||
[65.245,"o","s"]
|
||||
[65.411,"o","t"]
|
||||
[65.553,"o","e"]
|
||||
[65.675,"o","r"]
|
||||
[65.918,"o"," "]
|
||||
[66.18,"o","d"]
|
||||
[66.302,"o","e"]
|
||||
[66.499,"o","l"]
|
||||
[66.687,"o","e"]
|
||||
[66.874,"o","t"]
|
||||
[66.984,"o","e"]
|
||||
[67.794,"o"," "]
|
||||
[67.919,"o","d"]
|
||||
[67.985,"o","e"]
|
||||
[68.084,"o","m"]
|
||||
[68.257,"o","o"]
|
||||
[68.623,"o","\u001b[?2004l\r\r\n"]
|
||||
[68.637,"o","\u001b[36mINFO\u001b[0m[0000] Deleting cluster 'demo' \r\n"]
|
||||
[69.822,"o","\u001b[36mINFO\u001b[0m[0001] Deleted k3d-demo-serverlb \r\n"]
|
||||
[70.309,"o","\u001b[36mINFO\u001b[0m[0001] Deleted k3d-demo-agent-2 \r\n"]
|
||||
[71.017,"o","\u001b[36mINFO\u001b[0m[0002] Deleted k3d-demo-agent-1 \r\n"]
|
||||
[71.645,"o","\u001b[36mINFO\u001b[0m[0003] Deleted k3d-demo-agent-0 \r\n"]
|
||||
[72.274,"o","\u001b[36mINFO\u001b[0m[0003] Deleted k3d-demo-server-2 \r\n"]
|
||||
[73.264,"o","\u001b[36mINFO\u001b[0m[0004] Deleted k3d-demo-server-1 \r\n"]
|
||||
[74.155,"o","\u001b[36mINFO\u001b[0m[0005] Deleted k3d-demo-server-0 \r\n"]
|
||||
[74.155,"o","\u001b[36mINFO\u001b[0m[0005] Deleting cluster network '2324ab59566f5bca41df87cbf7b65e14539cfe9feea1087f143eec45ac91652f' \r\n"]
|
||||
[74.35,"o","\u001b[36mINFO\u001b[0m[0005] Deleting image volume 'k3d-demo-images' \r\n"]
|
||||
[74.36,"o","\u001b[36mINFO\u001b[0m[0005] Removing cluster details from default kubeconfig... \r\n"]
|
||||
[74.362,"o","\u001b[36mINFO\u001b[0m[0005] Removing standalone kubeconfig file (if there is one)... \r\n\u001b[36mINFO\u001b[0m[0005] Successfully deleted cluster demo! \r\n"]
|
||||
[74.363,"o","\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[32mThisCouldBeYou\u001b[0m$ \u001b[K\u001b[?2004h"]
|
||||
[75.795,"o","\u001b[?2004l\r\r\n"]
|
162
docs/static/asciicast/20210917_k3d_v5.0.0_01.cast
vendored
162
docs/static/asciicast/20210917_k3d_v5.0.0_01.cast
vendored
@ -1,162 +0,0 @@
|
||||
{"version": 2, "width": 213, "height": 45, "timestamp": 1631908903, "env": {"SHELL": "bash", "TERM": "xterm-256color"}}
|
||||
[0.018381, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
|
||||
[1.636481, "o", "k"]
|
||||
[1.702291, "o", "3"]
|
||||
[1.835268, "o", "d"]
|
||||
[2.024007, "o", " "]
|
||||
[2.111734, "o", "v"]
|
||||
[2.210891, "o", "e"]
|
||||
[2.343441, "o", "r"]
|
||||
[2.516933, "o", "s"]
|
||||
[2.583471, "o", "i"]
|
||||
[2.773563, "o", "o"]
|
||||
[2.927568, "o", "n"]
|
||||
[3.159219, "o", "\r\n\u001b[?2004l\r"]
|
||||
[3.179508, "o", "k3d version v5.0.0\r\nk3s version v1.21.4-k3s1 (default)\r\n"]
|
||||
[3.180754, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
|
||||
[4.57973, "o", "k"]
|
||||
[4.656235, "o", "3"]
|
||||
[4.763252, "o", "d"]
|
||||
[4.865396, "o", " "]
|
||||
[4.986278, "o", "c"]
|
||||
[5.051494, "o", "l"]
|
||||
[5.238737, "o", "u"]
|
||||
[5.292747, "o", "s"]
|
||||
[5.381595, "o", "t"]
|
||||
[5.503508, "o", "e"]
|
||||
[5.578881, "o", "r"]
|
||||
[5.666704, "o", " "]
|
||||
[5.766742, "o", "c"]
|
||||
[5.962787, "o", "r"]
|
||||
[6.029469, "o", "e"]
|
||||
[6.061464, "o", "a"]
|
||||
[6.184275, "o", "t"]
|
||||
[6.281805, "o", "e"]
|
||||
[6.445508, "o", " "]
|
||||
[6.666863, "o", "-"]
|
||||
[7.20248, "o", "-"]
|
||||
[7.334019, "o", "a"]
|
||||
[7.490134, "o", "g"]
|
||||
[7.566087, "o", "e"]
|
||||
[7.631634, "o", "n"]
|
||||
[7.729597, "o", "t"]
|
||||
[7.897099, "o", "s"]
|
||||
[8.049496, "o", " "]
|
||||
[8.280178, "o", "3"]
|
||||
[8.499599, "o", " "]
|
||||
[8.631147, "o", "d"]
|
||||
[8.707104, "o", "e"]
|
||||
[8.773508, "o", "m"]
|
||||
[8.91407, "o", "o"]
|
||||
[9.113612, "o", "\r\n\u001b[?2004l\r"]
|
||||
[9.132118, "o", "\u001b[36mINFO\u001b[0m[0000] Prep: Network \r\n"]
|
||||
[9.183203, "o", "\u001b[36mINFO\u001b[0m[0000] Created network 'k3d-demo' \r\n"]
|
||||
[9.187229, "o", "\u001b[36mINFO\u001b[0m[0000] Created volume 'k3d-demo-images' \r\n"]
|
||||
[10.187972, "o", "\u001b[36mINFO\u001b[0m[0001] Creating node 'k3d-demo-server-0' \r\n"]
|
||||
[10.281058, "o", "\u001b[36mINFO\u001b[0m[0001] Creating node 'k3d-demo-agent-0' \r\n"]
|
||||
[10.368708, "o", "\u001b[36mINFO\u001b[0m[0001] Creating node 'k3d-demo-agent-1' \r\n"]
|
||||
[10.455282, "o", "\u001b[36mINFO\u001b[0m[0001] Creating node 'k3d-demo-agent-2' \r\n"]
|
||||
[10.536337, "o", "\u001b[36mINFO\u001b[0m[0001] Creating LoadBalancer 'k3d-demo-serverlb' \r\n"]
|
||||
[10.609539, "o", "\u001b[36mINFO\u001b[0m[0001] Using the k3d-tools node to gather environment information \r\n"]
|
||||
[10.628592, "o", "\u001b[36mINFO\u001b[0m[0001] Starting new tools node... \r\n"]
|
||||
[10.702678, "o", "\u001b[36mINFO\u001b[0m[0001] Starting Node 'k3d-demo-tools' \r\n"]
|
||||
[11.394216, "o", "\u001b[36mINFO\u001b[0m[0002] Deleted k3d-demo-tools \r\n"]
|
||||
[11.394427, "o", "\u001b[36mINFO\u001b[0m[0002] Starting cluster 'demo' \r\n\u001b[36mINFO\u001b[0m[0002] Starting servers... \r\n"]
|
||||
[11.404635, "o", "\u001b[36mINFO\u001b[0m[0002] Starting Node 'k3d-demo-server-0' \r\n"]
|
||||
[16.378372, "o", "\u001b[36mINFO\u001b[0m[0007] Starting agents... \r\n"]
|
||||
[16.388922, "o", "\u001b[36mINFO\u001b[0m[0007] Starting Node 'k3d-demo-agent-0' \r\n"]
|
||||
[16.389848, "o", "\u001b[36mINFO\u001b[0m[0007] Starting Node 'k3d-demo-agent-1' \r\n"]
|
||||
[16.397254, "o", "\u001b[36mINFO\u001b[0m[0007] Starting Node 'k3d-demo-agent-2' \r\n"]
|
||||
[31.590126, "o", "\u001b[36mINFO\u001b[0m[0022] Starting helpers... \r\n"]
|
||||
[31.637947, "o", "\u001b[36mINFO\u001b[0m[0022] Starting Node 'k3d-demo-serverlb' \r\n"]
|
||||
[38.185432, "o", "\u001b[36mINFO\u001b[0m[0029] Trying to get IP of the docker host and inject it into the cluster as 'host.k3d.internal' for easy access \r\n"]
|
||||
[50.256861, "o", "\u001b[36mINFO\u001b[0m[0041] Cluster 'demo' created successfully! \r\n\u001b[36mINFO\u001b[0m[0041] --kubeconfig-update-default=false --> sets --kubeconfig-switch-context=false \r\n"]
|
||||
[50.295453, "o", "\u001b[36mINFO\u001b[0m[0041] You can now use it like this: \r\nkubectl config use-context k3d-demo\r\nkubectl cluster-info\r\n"]
|
||||
[50.299281, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
|
||||
[52.777117, "o", "k"]
|
||||
[52.873341, "o", "3"]
|
||||
[53.006105, "o", "d"]
|
||||
[53.147707, "o", " "]
|
||||
[53.245736, "o", "c"]
|
||||
[53.343772, "o", "l"]
|
||||
[53.551038, "o", "u"]
|
||||
[53.617941, "o", "s"]
|
||||
[53.724853, "o", "t"]
|
||||
[53.878933, "o", "e"]
|
||||
[53.956281, "o", "r"]
|
||||
[54.076303, "o", " "]
|
||||
[54.21845, "o", "l"]
|
||||
[54.339561, "o", "s"]
|
||||
[54.447647, "o", "\r\n\u001b[?2004l\r"]
|
||||
[54.47118, "o", "NAME SERVERS AGENTS LOADBALANCER\r\ndemo 1/1 3/3 true\r\n"]
|
||||
[54.472506, "o", "\u001b[?2004h"]
|
||||
[54.472562, "o", "\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
|
||||
[54.838629, "o", "k"]
|
||||
[54.918551, "o", "3"]
|
||||
[55.015846, "o", "d"]
|
||||
[55.115834, "o", " "]
|
||||
[55.290514, "o", "n"]
|
||||
[55.378089, "o", "o"]
|
||||
[55.454292, "o", "d"]
|
||||
[55.508669, "o", "e"]
|
||||
[55.869687, "o", " "]
|
||||
[56.05605, "o", "l"]
|
||||
[56.176004, "o", "s"]
|
||||
[56.31685, "o", "\r\n\u001b[?2004l\r"]
|
||||
[56.341161, "o", "NAME ROLE CLUSTER STATUS\r\nk3d-demo-agent-0 agent demo running\r\nk3d-demo-agent-1 agent demo running\r\nk3d-demo-agent-2 agent demo running\r\nk3d-demo-server-0 server demo running\r\nk3d-demo-serverlb loadbalancer demo running\r\n"]
|
||||
[56.34231, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
|
||||
[57.733293, "o", "k"]
|
||||
[57.932149, "o", "u"]
|
||||
[58.059135, "o", "b"]
|
||||
[58.137901, "o", "e"]
|
||||
[58.23908, "o", "c"]
|
||||
[58.418996, "o", "t"]
|
||||
[58.496899, "o", "l"]
|
||||
[58.687091, "o", " "]
|
||||
[58.740349, "o", "g"]
|
||||
[58.832322, "o", "e"]
|
||||
[58.955499, "o", "t"]
|
||||
[59.067944, "o", " "]
|
||||
[59.246223, "o", "n"]
|
||||
[59.344781, "o", "o"]
|
||||
[59.426918, "o", "d"]
|
||||
[59.493282, "o", "e"]
|
||||
[59.672248, "o", "s"]
|
||||
[59.772331, "o", "\r\n\u001b[?2004l\r"]
|
||||
[60.41166, "o", "NAME STATUS ROLES AGE VERSION\r\nk3d-demo-agent-2 Ready <none> 29s v1.21.4+k3s1\r\nk3d-demo-server-0 Ready control-plane,master 41s v1.21.4+k3s1\r\nk3d-demo-agent-0 Ready <none> 31s v1.21.4+k3s1\r\nk3d-demo-agent-1 Ready <none> 31s v1.21.4+k3s1\r\n"]
|
||||
[60.414302, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
|
||||
[61.301105, "o", "k"]
|
||||
[61.534792, "o", "u"]
|
||||
[61.723192, "o", "b"]
|
||||
[61.800647, "o", "e"]
|
||||
[61.912191, "o", "c"]
|
||||
[62.111433, "o", "t"]
|
||||
[62.220654, "o", "l"]
|
||||
[62.400417, "o", " "]
|
||||
[62.434071, "o", "g"]
|
||||
[62.523052, "o", "e"]
|
||||
[62.634216, "o", "t"]
|
||||
[62.700412, "o", " "]
|
||||
[62.923073, "o", "p"]
|
||||
[63.120958, "o", "o"]
|
||||
[63.231192, "o", "d"]
|
||||
[63.287011, "o", "s"]
|
||||
[63.497854, "o", " "]
|
||||
[63.642017, "o", "-"]
|
||||
[63.896056, "o", "A"]
|
||||
[64.129633, "o", "\r\n\u001b[?2004l\r"]
|
||||
[64.180813, "o", "NAMESPACE NAME READY STATUS RESTARTS AGE\r\nkube-system coredns-7448499f4d-rrmh5 1/1 Running 0 34s\r\nkube-system metrics-server-86cbb8457f-6hkns 1/1 Running 0 34s\r\nkube-system local-path-provisioner-5ff76fc89d-ltzd4 1/1 Running 0 34s\r\nkube-system helm-install-traefik-crd-st9fm 0/1 Completed 0 34s\r\nkube-system traefik-97b44b794-lgljm 0/1 ContainerCreating 0 11s\r\nkube-system helm-install-traefik-6t7fr 0/1 Completed 1 "]
|
||||
[64.181, "o", "34s\r\nkube-system svclb-traefik-wztvf 2/2 Running 0 11s\r\nkube-system svclb-traefik-ksk54 2/2 Running 0 11s\r\nkube-system svclb-traefik-s286b 2/2 Running 0 11s\r\nkube-system svclb-traefik-ksbmz 2/2 Running 0 11s\r\n"]
|
||||
[64.182931, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
|
||||
[66.050907, "o", "#"]
|
||||
[66.160953, "o", " "]
|
||||
[66.559434, "o", "P"]
|
||||
[66.768444, "o", "r"]
|
||||
[66.844975, "o", "o"]
|
||||
[67.022583, "o", "f"]
|
||||
[67.098851, "o", "i"]
|
||||
[67.286285, "o", "t"]
|
||||
[67.921864, "o", "."]
|
||||
[69.59588, "o", "\r\n\u001b[?2004l\r"]
|
||||
[69.596126, "o", "\u001b[?2004h\u001b]0;ThisCouldBeYou: ~\u0007\u001b[01;32mThisCouldBeYou\u001b[00m:\u001b[01;34m~\u001b[00m$ "]
|
||||
[70.123764, "o", "\u001b[?2004l\r\r\nexit\r\n"]
|
2563
docs/static/css/asciinema-player.css
vendored
2563
docs/static/css/asciinema-player.css
vendored
File diff suppressed because it is too large
Load Diff
50
docs/static/css/extra.css
vendored
50
docs/static/css/extra.css
vendored
@ -1,50 +0,0 @@
|
||||
.md-header__button.md-logo img, .md-header__button.md-logo svg {
|
||||
width: 3rem;
|
||||
}
|
||||
|
||||
.md-header-nav__button.md-logo {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
.md-header {
|
||||
height: 3rem;
|
||||
}
|
||||
|
||||
.md-header-nav {
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.md-ellipsis {
|
||||
display: inline;
|
||||
}
|
||||
|
||||
.md-header-nav__topic {
|
||||
position: relative;
|
||||
}
|
||||
|
||||
/* This is equal to light mode */
|
||||
[data-md-color-primary=black] .md-tabs {
|
||||
|
||||
/* Set color of the tab bar */
|
||||
background-color: #0DCEFF;
|
||||
}
|
||||
|
||||
/* Dark Mode */
|
||||
[data-md-color-scheme="slate"] .md-header {
|
||||
/* keep black backgroud of title bar (header) */
|
||||
background-color: black;
|
||||
}
|
||||
|
||||
/* Tab Bar */
|
||||
.md-tabs {
|
||||
color: black;
|
||||
}
|
||||
|
||||
.md-tabs__item {
|
||||
font-weight: bolder;
|
||||
}
|
||||
|
||||
.md-tabs__link--active {
|
||||
text-decoration: underline;
|
||||
}
|
Binary file not shown.
Before Width: | Height: | Size: 8.2 KiB |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user