Merge branch 'master' of https://github.com/rancher/k3d into feature/add-node-command

This commit is contained in:
iwilltry42 2020-01-02 14:42:12 +01:00
commit 6e05342812
No known key found for this signature in database
GPG Key ID: 7BA57AD1CFF16110
7 changed files with 160 additions and 19 deletions

1
.gitignore vendored
View File

@ -17,3 +17,4 @@ _dist/
# Editors
.vscode/
.local/

View File

@ -46,6 +46,7 @@ or...
Check out what you can do via `k3d help`
Example Workflow: Create a new cluster and use it with `kubectl`
(*Note:* `kubectl` is not part of `k3d`, so you have to [install it first if needed](https://kubernetes.io/docs/tasks/tools/install-kubectl/))
1. `k3d create` to create a new single-node cluster (docker container)
2. `export KUBECONFIG=$(k3d get-kubeconfig)` to make `kubectl` to use the kubeconfig for that cluster

View File

@ -147,13 +147,18 @@ func createKubeConfigFile(cluster string) error {
// set the host name to remote docker machine's IP address.
//
// Otherwise, the hostname remains as 'localhost'
//
// Additionally, we replace every occurence of 'default' in the kubeconfig with the actual cluster name
apiHost := server[0].Labels["apihost"]
if apiHost != "" {
s := string(trimBytes)
s = strings.ReplaceAll(s, "default", cluster)
if apiHost != "" {
s = strings.Replace(s, "localhost", apiHost, 1)
trimBytes = []byte(s)
s = strings.Replace(s, "127.0.0.1", apiHost, 1)
}
trimBytes = []byte(s)
_, err = kubeconfigfile.Write(trimBytes)
if err != nil {
return fmt.Errorf("Couldn't write to kubeconfig.yaml\n%+v", err)

View File

@ -152,6 +152,9 @@ func CreateCluster(c *cli.Context) error {
* Add user-supplied arguments for the k3s agent
*/
if c.IsSet("agent-arg") {
if c.Int("workers") < 1 {
log.Warnln("--agent-arg supplied, but --workers is 0, so no agents will be created")
}
k3AgentArgs = append(k3AgentArgs, c.StringSlice("agent-arg")...)
}
@ -239,7 +242,7 @@ func CreateCluster(c *cli.Context) error {
// We're simply scanning the container logs for a line that tells us that everything's up and running
// TODO: also wait for worker nodes
if c.IsSet("wait") {
if err := waitForContainerLogMessage(serverContainerID, "Running kubelet", c.Int("wait")); err != nil {
if err := waitForContainerLogMessage(serverContainerID, "Wrote kubeconfig", c.Int("wait")); err != nil {
deleteCluster()
return fmt.Errorf("ERROR: failed while waiting for server to come up\n%+v", err)
}
@ -277,6 +280,7 @@ kubectl cluster-info`, os.Args[0], c.String("name"))
// DeleteCluster removes the containers belonging to a cluster and its local directory
func DeleteCluster(c *cli.Context) error {
clusters, err := getClusters(c.Bool("all"), c.String("name"))
if err != nil {
@ -284,6 +288,9 @@ func DeleteCluster(c *cli.Context) error {
}
if len(clusters) == 0 {
if !c.IsSet("all") && !c.IsSet("name") {
return fmt.Errorf("No cluster with name '%s' found (You can add `--all` and `--name <CLUSTER-NAME>` to delete other clusters)", c.String("name"))
}
return fmt.Errorf("No cluster(s) found")
}
@ -416,6 +423,9 @@ func GetKubeConfig(c *cli.Context) error {
}
if len(clusters) == 0 {
if !c.IsSet("all") && !c.IsSet("name") {
return fmt.Errorf("No cluster with name '%s' found (You can add `--all` and `--name <CLUSTER-NAME>` to check other clusters)", c.String("name"))
}
return fmt.Errorf("No cluster(s) found")
}

View File

@ -179,6 +179,7 @@ func createWorker(spec *ClusterSpec, postfix int) (string, error) {
},
PortBindings: workerPublishedPorts.PortBindings,
Privileged: true,
Init: &[]bool{true}[0],
}
if spec.AutoRestart {

View File

@ -84,16 +84,16 @@ Therefore, we have to create the cluster in a way, that the internal port 80 (wh
`curl localhost:8082/`
## Connect with a local insecure registry
## Connect with a private insecure registry
This guide takes you through setting up a local insecure (http) registry and integrating it into your workflow so that:
This guide takes you through setting up a private insecure (http) registry and integrating it into your workflow so that:
- you can push to the registry from your host
- the cluster managed by k3d can pull from that registry
The registry will be named `registry.local` and run on port `5000`.
### Create the registry
### Step 1: Create the registry
<pre>
docker volume create local_registry
@ -101,11 +101,79 @@ docker volume create local_registry
docker container run -d --name <b>registry.local</b> -v local_registry:/var/lib/registry --restart always -p <b>5000:5000</b> registry:2
</pre>
### Create the cluster with k3d
### Step 2: Prepare configuration to connect to the registry
First we need a place to store the config template: `mkdir -p /home/${USER}/.k3d`
First we need a place to store the config template: `mkdir -p ${HOME}/.k3d`
Create a file named `config.toml.tmpl` in `/home/${USER}/.k3d`, with following content:
#### Step 2 - Option 1: use `registries.yaml` (for k3s >= v0.10.0)
Create a file named `registries.yaml` in `${HOME}/.k3d` with following content:
```yaml
mirrors:
"registry.local:5000":
endpoint:
- http://registry.local:5000
```
#### Step 2 - Option 2: use `config.toml.tmpl` to directly modify the containerd config (all versions)
Create a file named `config.toml.tmpl` in `${HOME}/.k3d`, with following content:
##### Step 2 - Option 2.1 -> for k3s >= v0.10.0
<pre>
[plugins.opt]
path = "{{ .NodeConfig.Containerd.Opt }}"
[plugins.cri]
stream_server_address = "127.0.0.1"
stream_server_port = "10010"
{{- if .IsRunningInUserNS }}
disable_cgroup = true
disable_apparmor = true
restrict_oom_score_adj = true
{{end}}
{{- if .NodeConfig.AgentConfig.PauseImage }}
sandbox_image = "{{ .NodeConfig.AgentConfig.PauseImage }}"
{{end}}
{{- if not .NodeConfig.NoFlannel }}
[plugins.cri.cni]
bin_dir = "{{ .NodeConfig.AgentConfig.CNIBinDir }}"
conf_dir = "{{ .NodeConfig.AgentConfig.CNIConfDir }}"
{{end}}
[plugins.cri.containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
{{ if .PrivateRegistryConfig }}
{{ if .PrivateRegistryConfig.Mirrors }}
[plugins.cri.registry.mirrors]{{end}}
{{range $k, $v := .PrivateRegistryConfig.Mirrors }}
[plugins.cri.registry.mirrors."{{$k}}"]
endpoint = [{{range $i, $j := $v.Endpoints}}{{if $i}}, {{end}}{{printf "%q" .}}{{end}}]
{{end}}
{{range $k, $v := .PrivateRegistryConfig.Configs }}
{{ if $v.Auth }}
[plugins.cri.registry.configs."{{$k}}".auth]
{{ if $v.Auth.Username }}username = "{{ $v.Auth.Username }}"{{end}}
{{ if $v.Auth.Password }}password = "{{ $v.Auth.Password }}"{{end}}
{{ if $v.Auth.Auth }}auth = "{{ $v.Auth.Auth }}"{{end}}
{{ if $v.Auth.IdentityToken }}identity_token = "{{ $v.Auth.IdentityToken }}"{{end}}
{{end}}
{{ if $v.TLS }}
[plugins.cri.registry.configs."{{$k}}".tls]
{{ if $v.TLS.CAFile }}ca_file = "{{ $v.TLS.CAFile }}"{{end}}
{{ if $v.TLS.CertFile }}cert_file = "{{ $v.TLS.CertFile }}"{{end}}
{{ if $v.TLS.KeyFile }}key_file = "{{ $v.TLS.KeyFile }}"{{end}}
{{end}}
{{end}}
{{end}}
# Added section: additional registries and the endpoints
[plugins.cri.registry.mirrors]
[plugins.cri.registry.mirrors."<b>registry.local:5000</b>"]
endpoint = ["http://<b>registry.local:5000</b>"]
</pre>
##### Step 2 - Option 2.2 -> for k3s <= v0.9.1
<pre>
# Original section: no changes
@ -134,23 +202,28 @@ sandbox_image = "{{ .NodeConfig.AgentConfig.PauseImage }}"
endpoint = ["http://<b>registry.local:5000</b>"]
</pre>
Finally start a cluster with k3d, passing-in the config template:
### Step 3: Start the cluster
Finally start a cluster with k3d, passing-in the `registries.yaml` or `config.toml.tmpl`:
```bash
CLUSTER_NAME=k3s-default
k3d create \
--name ${CLUSTER_NAME} \
--wait 0 \
--auto-restart \
--volume /home/${USER}/.k3d/config.toml.tmpl:/var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl
--volume ${HOME}/.k3d/registries.yaml:/etc/rancher/k3s/registries.yaml
```
### Wire them up
or
```bash
k3d create \
--volume ${HOME}/.k3d/config.toml.tmpl:/var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl
```
### Step 4: Wire them up
- Connect the registry to the cluster network: `docker network connect k3d-k3s-default registry.local`
- Add `127.0.0.1 registry.local` to your `/etc/hosts`
### Test
### Step 5: Test
Push an image to the registry:
@ -190,6 +263,48 @@ EOF
... and check that the pod is running: `kubectl get pods -l "app=nginx-test-registry"`
## Connect with a private secure registry
This guide takes you through setting up a private secure (https) registry with a non-publicly trusted CA and integrating it into your workflow so that:
- you can push to the registry
- the cluster managed by k3d can pull from that registry
The registry will be named `registry.companyinternal.net` and it is assumed to already be set up, with a non-publicly trusted cert.
### Step 1: Prepare configuration to connect to the registry
First we need a place to store the config template: `mkdir -p ${HOME}/.k3d`
### Step 2: Configure `registries.yaml` (for k3s >= v0.10.0) to point to your root CA
Create a file named `registries.yaml` in `${HOME}/.k3d` with following content:
```yaml
mirrors:
registry.companyinternal.net:
endpoint:
- https://registry.companyinternal.net
configs:
registry.companyinternal.net:
tls:
ca_file: "/etc/ssl/certs/companycaroot.pem"
```
### Step 3: Get a copy of the root CA
Download it to `${HOME}/.k3d/companycaroot.pem`
### Step 4: Start the cluster
Finally start a cluster with k3d, passing-in the `registries.yaml` and root CA cert:
```bash
k3d create \
--volume ${HOME}/.k3d/registries.yaml:/etc/rancher/k3s/registries.yaml \
--volume ${HOME}/.k3d/companycaroot.pem:/etc/ssl/certs/companycaroot.pem
```
## Running on filesystems k3s doesn't like (btrfs, tmpfs, …)
The following script leverages a [Docker loopback volume plugin](https://github.com/ashald/docker-volume-loopback) to mask the problematic filesystem away from k3s by providing a small ext4 filesystem underneath `/var/lib/rancher/k3s` (k3s' data dir).

View File

@ -3,3 +3,11 @@
- As [@jaredallard](https://github.com/jaredallard) [pointed out](https://github.com/rancher/k3d/pull/48), people running `k3d` on a system with **btrfs**, may need to mount `/dev/mapper` into the nodes for the setup to work.
- This will do: `k3d create -v /dev/mapper:/dev/mapper`
- An additional solution proposed by [@zer0def](https://github.com/zer0def) can be found in the [examples section](examples.md) (_Running on filesystems k3s doesn't like (btrfs, tmpfs, …)_)
- Pods go to evicted state after doing X
- Related issues: [#133 - Pods evicted due to `NodeHasDiskPressure`](https://github.com/rancher/k3d/issues/133) (collection of #119 and #130)
- Background: somehow docker runs out of space for the k3d node containers, which triggers a hard eviction in the kubelet
- Possible [fix/workaround by @zer0def](https://github.com/rancher/k3d/issues/133#issuecomment-549065666):
- use a docker storage driver which cleans up properly (e.g. overlay2)
- clean up or expand docker root filesystem
- change the kubelet's eviction thresholds upon cluster creation: `k3d create --agent-arg '--eviction-hard=imagefs.available<1%,nodefs.available<1%' --agent-arg '--eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%'`