add some comments

This commit is contained in:
iwilltry42 2019-04-29 08:19:01 +02:00
parent 705ef69dd9
commit 942e667b24
5 changed files with 60 additions and 18 deletions

View File

@ -1,5 +1,9 @@
package run package run
/*
* This file contains the "backend" functionality for the CLI commands (and flags)
*/
import ( import (
"bytes" "bytes"
"context" "context"
@ -19,7 +23,7 @@ import (
"github.com/urfave/cli" "github.com/urfave/cli"
) )
// CheckTools checks if the installed tools work correctly // CheckTools checks if the docker API server is responding
func CheckTools(c *cli.Context) error { func CheckTools(c *cli.Context) error {
log.Print("Checking docker...") log.Print("Checking docker...")
ctx := context.Background() ctx := context.Background()
@ -67,7 +71,7 @@ func CreateCluster(c *cli.Context) error {
k3sServerArgs = append(k3sServerArgs, c.StringSlice("server-arg")...) k3sServerArgs = append(k3sServerArgs, c.StringSlice("server-arg")...)
} }
// let's go // create the server
log.Printf("Creating cluster [%s]", c.String("name")) log.Printf("Creating cluster [%s]", c.String("name"))
dockerID, err := createServer( dockerID, err := createServer(
c.GlobalBool("verbose"), c.GlobalBool("verbose"),
@ -88,10 +92,13 @@ func CreateCluster(c *cli.Context) error {
return err return err
} }
// wait for k3s to be up and running if we want it // Wait for k3s to be up and running if wanted.
// We're simply scanning the container logs for a line that tells us that everything's up and running
// TODO: also wait for worker nodes
start := time.Now() start := time.Now()
timeout := time.Duration(c.Int("timeout")) * time.Second timeout := time.Duration(c.Int("timeout")) * time.Second
for c.IsSet("wait") { for c.IsSet("wait") {
// not running after timeout exceeded? Rollback and delete everything.
if timeout != 0 && !time.Now().After(start.Add(timeout)) { if timeout != 0 && !time.Now().After(start.Add(timeout)) {
err := DeleteCluster(c) err := DeleteCluster(c)
if err != nil { if err != nil {
@ -100,6 +107,7 @@ func CreateCluster(c *cli.Context) error {
return errors.New("Cluster creation exceeded specified timeout") return errors.New("Cluster creation exceeded specified timeout")
} }
// scan container logs for a line that tells us that the required services are up and running
out, err := docker.ContainerLogs(ctx, dockerID, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true}) out, err := docker.ContainerLogs(ctx, dockerID, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true})
if err != nil { if err != nil {
out.Close() out.Close()
@ -116,9 +124,12 @@ func CreateCluster(c *cli.Context) error {
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
} }
// create the directory where we will put the kubeconfig file by default (when running `k3d get-config`)
// TODO: this can probably be moved to `k3d get-config` or be removed in a different approach
createClusterDir(c.String("name")) createClusterDir(c.String("name"))
// worker nodes // spin up the worker nodes
// TODO: do this concurrently in different goroutines
if c.Int("workers") > 0 { if c.Int("workers") > 0 {
k3sWorkerArgs := []string{} k3sWorkerArgs := []string{}
env := []string{k3sClusterSecret} env := []string{k3sClusterSecret}
@ -150,7 +161,7 @@ kubectl cluster-info`, os.Args[0], c.String("name"))
return nil return nil
} }
// DeleteCluster removes the cluster container and its cluster directory // DeleteCluster removes the containers belonging to a cluster and its local directory
func DeleteCluster(c *cli.Context) error { func DeleteCluster(c *cli.Context) error {
// operate on one or all clusters // operate on one or all clusters
@ -177,6 +188,7 @@ func DeleteCluster(c *cli.Context) error {
for _, cluster := range clusters { for _, cluster := range clusters {
log.Printf("Removing cluster [%s]", cluster.name) log.Printf("Removing cluster [%s]", cluster.name)
if len(cluster.workers) > 0 { if len(cluster.workers) > 0 {
// TODO: this could be done in goroutines
log.Printf("...Removing %d workers\n", len(cluster.workers)) log.Printf("...Removing %d workers\n", len(cluster.workers))
for _, worker := range cluster.workers { for _, worker := range cluster.workers {
if err := removeContainer(worker.ID); err != nil { if err := removeContainer(worker.ID); err != nil {

View File

@ -1,5 +1,10 @@
package run package run
/*
* The functions in this file take care of spinning up the
* k3s server and worker containers as well as deleting them.
*/
import ( import (
"context" "context"
"fmt" "fmt"
@ -17,6 +22,7 @@ import (
"github.com/docker/docker/client" "github.com/docker/docker/client"
) )
// createServer creates and starts a k3s server container
func createServer(verbose bool, image string, port string, args []string, env []string, name string, volumes []string) (string, error) { func createServer(verbose bool, image string, port string, args []string, env []string, name string, volumes []string) (string, error) {
log.Printf("Creating server using %s...\n", image) log.Printf("Creating server using %s...\n", image)
ctx := context.Background() ctx := context.Background()
@ -24,6 +30,8 @@ func createServer(verbose bool, image string, port string, args []string, env []
if err != nil { if err != nil {
return "", fmt.Errorf("ERROR: couldn't create docker client\n%+v", err) return "", fmt.Errorf("ERROR: couldn't create docker client\n%+v", err)
} }
// pull the required docker image
reader, err := docker.ImagePull(ctx, image, types.ImagePullOptions{}) reader, err := docker.ImagePull(ctx, image, types.ImagePullOptions{})
if err != nil { if err != nil {
return "", fmt.Errorf("ERROR: couldn't pull image %s\n%+v", image, err) return "", fmt.Errorf("ERROR: couldn't pull image %s\n%+v", image, err)
@ -40,6 +48,7 @@ func createServer(verbose bool, image string, port string, args []string, env []
} }
} }
// configure container options (host/network configuration, labels, env vars, etc.)
containerLabels := make(map[string]string) containerLabels := make(map[string]string)
containerLabels["app"] = "k3d" containerLabels["app"] = "k3d"
containerLabels["component"] = "server" containerLabels["component"] = "server"
@ -74,6 +83,7 @@ func createServer(verbose bool, image string, port string, args []string, env []
}, },
} }
// create the container
resp, err := docker.ContainerCreate(ctx, &container.Config{ resp, err := docker.ContainerCreate(ctx, &container.Config{
Image: image, Image: image,
Cmd: append([]string{"server"}, args...), Cmd: append([]string{"server"}, args...),
@ -87,6 +97,7 @@ func createServer(verbose bool, image string, port string, args []string, env []
return "", fmt.Errorf("ERROR: couldn't create container %s\n%+v", containerName, err) return "", fmt.Errorf("ERROR: couldn't create container %s\n%+v", containerName, err)
} }
// start the container
if err := docker.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil { if err := docker.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
return "", fmt.Errorf("ERROR: couldn't start container %s\n%+v", containerName, err) return "", fmt.Errorf("ERROR: couldn't start container %s\n%+v", containerName, err)
} }
@ -95,6 +106,7 @@ func createServer(verbose bool, image string, port string, args []string, env []
} }
// createWorker creates/starts a k3s agent node that connects to the server
func createWorker(verbose bool, image string, args []string, env []string, name string, volumes []string, postfix string, serverPort string) (string, error) { func createWorker(verbose bool, image string, args []string, env []string, name string, volumes []string, postfix string, serverPort string) (string, error) {
ctx := context.Background() ctx := context.Background()
docker, err := client.NewEnvClient() docker, err := client.NewEnvClient()
@ -102,6 +114,7 @@ func createWorker(verbose bool, image string, args []string, env []string, name
return "", fmt.Errorf("ERROR: couldn't create docker client\n%+v", err) return "", fmt.Errorf("ERROR: couldn't create docker client\n%+v", err)
} }
// pull the required docker image
reader, err := docker.ImagePull(ctx, image, types.ImagePullOptions{}) reader, err := docker.ImagePull(ctx, image, types.ImagePullOptions{})
if err != nil { if err != nil {
return "", fmt.Errorf("ERROR: couldn't pull image %s\n%+v", image, err) return "", fmt.Errorf("ERROR: couldn't pull image %s\n%+v", image, err)
@ -113,6 +126,7 @@ func createWorker(verbose bool, image string, args []string, env []string, name
} }
} }
// configure container options (host/network configuration, labels, env vars, etc.)
containerLabels := make(map[string]string) containerLabels := make(map[string]string)
containerLabels["app"] = "k3d" containerLabels["app"] = "k3d"
containerLabels["component"] = "worker" containerLabels["component"] = "worker"
@ -143,6 +157,7 @@ func createWorker(verbose bool, image string, args []string, env []string, name
}, },
} }
// create the container
resp, err := docker.ContainerCreate(ctx, &container.Config{ resp, err := docker.ContainerCreate(ctx, &container.Config{
Image: image, Image: image,
Env: env, Env: env,
@ -152,6 +167,7 @@ func createWorker(verbose bool, image string, args []string, env []string, name
return "", fmt.Errorf("ERROR: couldn't create container %s\n%+v", containerName, err) return "", fmt.Errorf("ERROR: couldn't create container %s\n%+v", containerName, err)
} }
// start the container
if err := docker.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil { if err := docker.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
return "", fmt.Errorf("ERROR: couldn't start container %s\n%+v", containerName, err) return "", fmt.Errorf("ERROR: couldn't start container %s\n%+v", containerName, err)
} }
@ -159,13 +175,20 @@ func createWorker(verbose bool, image string, args []string, env []string, name
return resp.ID, nil return resp.ID, nil
} }
// removeContainer tries to rm a container, selected by Docker ID, and does a rm -f if it fails (e.g. if container is still running)
func removeContainer(ID string) error { func removeContainer(ID string) error {
// TODO: first check if container is running, then try to stop it with a timeout before trying to remove it
// if it does not terminate gracefully, try a force remove
ctx := context.Background() ctx := context.Background()
docker, err := client.NewEnvClient() docker, err := client.NewEnvClient()
if err != nil { if err != nil {
return fmt.Errorf("ERROR: couldn't create docker client\n%+v", err) return fmt.Errorf("ERROR: couldn't create docker client\n%+v", err)
} }
// first, try a soft remove
if err := docker.ContainerRemove(ctx, ID, types.ContainerRemoveOptions{}); err != nil { if err := docker.ContainerRemove(ctx, ID, types.ContainerRemoveOptions{}); err != nil {
// if soft remove didn't succeed, force remove the container
log.Printf("WARNING: couldn't delete container [%s], trying a force remove now.", ID) log.Printf("WARNING: couldn't delete container [%s], trying a force remove now.", ID)
if err := docker.ContainerRemove(ctx, ID, types.ContainerRemoveOptions{Force: true}); err != nil { if err := docker.ContainerRemove(ctx, ID, types.ContainerRemoveOptions{Force: true}); err != nil {
return fmt.Errorf("FAILURE: couldn't delete container [%s] -> %+v", ID, err) return fmt.Errorf("FAILURE: couldn't delete container [%s] -> %+v", ID, err)

View File

@ -11,6 +11,8 @@ import (
"github.com/docker/docker/client" "github.com/docker/docker/client"
) )
// createClusterNetwork creates a docker network for a cluster that will be used
// to let the server and worker containers communicate with each other easily.
func createClusterNetwork(clusterName string) (string, error) { func createClusterNetwork(clusterName string) (string, error) {
ctx := context.Background() ctx := context.Background()
docker, err := client.NewEnvClient() docker, err := client.NewEnvClient()
@ -18,6 +20,7 @@ func createClusterNetwork(clusterName string) (string, error) {
return "", fmt.Errorf("ERROR: couldn't create docker client\n%+v", err) return "", fmt.Errorf("ERROR: couldn't create docker client\n%+v", err)
} }
// create the network with a set of labels and the cluster name as network name
resp, err := docker.NetworkCreate(ctx, clusterName, types.NetworkCreate{ resp, err := docker.NetworkCreate(ctx, clusterName, types.NetworkCreate{
Labels: map[string]string{ Labels: map[string]string{
"app": "k3d", "app": "k3d",
@ -31,6 +34,7 @@ func createClusterNetwork(clusterName string) (string, error) {
return resp.ID, nil return resp.ID, nil
} }
// deleteClusterNetwork deletes a docker network based on the name of a cluster it belongs to
func deleteClusterNetwork(clusterName string) error { func deleteClusterNetwork(clusterName string) error {
ctx := context.Background() ctx := context.Background()
docker, err := client.NewEnvClient() docker, err := client.NewEnvClient()
@ -49,6 +53,7 @@ func deleteClusterNetwork(clusterName string) error {
return fmt.Errorf("ERROR: couldn't find network for cluster %s\n%+v", clusterName, err) return fmt.Errorf("ERROR: couldn't find network for cluster %s\n%+v", clusterName, err)
} }
// there should be only one network that matches the name... but who knows?
for _, network := range networks { for _, network := range networks {
if err := docker.NetworkRemove(ctx, network.ID); err != nil { if err := docker.NetworkRemove(ctx, network.ID); err != nil {
log.Printf("WARNING: couldn't remove network for cluster %s\n%+v", clusterName, err) log.Printf("WARNING: couldn't remove network for cluster %s\n%+v", clusterName, err)

View File

@ -16,6 +16,7 @@ const (
var src = rand.NewSource(time.Now().UnixNano()) var src = rand.NewSource(time.Now().UnixNano())
// GenerateRandomString thanks to https://stackoverflow.com/a/31832326/6450189 // GenerateRandomString thanks to https://stackoverflow.com/a/31832326/6450189
// GenerateRandomString is used to generate a random string that is used as a cluster secret
func GenerateRandomString(n int) string { func GenerateRandomString(n int) string {
sb := strings.Builder{} sb := strings.Builder{}

27
main.go
View File

@ -41,10 +41,10 @@ func main() {
Action: run.CheckTools, Action: run.CheckTools,
}, },
{ {
// create creates a new k3s cluster in a container // create creates a new k3s cluster in docker containers
Name: "create", Name: "create",
Aliases: []string{"c"}, Aliases: []string{"c"},
Usage: "Create a single node k3s cluster in a container", Usage: "Create a single- or multi-node k3s cluster in docker containers",
Flags: []cli.Flag{ Flags: []cli.Flag{
cli.StringFlag{ cli.StringFlag{
Name: "name, n", Name: "name, n",
@ -53,7 +53,7 @@ func main() {
}, },
cli.StringFlag{ cli.StringFlag{
Name: "volume, v", Name: "volume, v",
Usage: "Mount one or more volumes into the cluster node (Docker notation: `source:destination[,source:destination]`)", Usage: "Mount one or more volumes into every node of the cluster (Docker notation: `source:destination[,source:destination]`)",
}, },
cli.StringFlag{ cli.StringFlag{
Name: "version", Name: "version",
@ -63,7 +63,7 @@ func main() {
cli.IntFlag{ cli.IntFlag{
Name: "port, p", Name: "port, p",
Value: 6443, Value: 6443,
Usage: "Set a port on which the ApiServer will listen", Usage: "Map the Kubernetes ApiServer port to a local port",
}, },
cli.IntFlag{ cli.IntFlag{
Name: "timeout, t", Name: "timeout, t",
@ -72,7 +72,7 @@ func main() {
}, },
cli.BoolFlag{ cli.BoolFlag{
Name: "wait, w", Name: "wait, w",
Usage: "Wait for the cluster to come up", Usage: "Wait for the cluster to come up before returning",
}, },
cli.StringSliceFlag{ cli.StringSliceFlag{
Name: "server-arg, x", Name: "server-arg, x",
@ -103,7 +103,7 @@ func main() {
}, },
cli.BoolFlag{ cli.BoolFlag{
Name: "all, a", Name: "all, a",
Usage: "delete all existing clusters (this ignores the --name/-n flag)", Usage: "Delete all existing clusters (this ignores the --name/-n flag)",
}, },
}, },
Action: run.DeleteCluster, Action: run.DeleteCluster,
@ -116,11 +116,11 @@ func main() {
cli.StringFlag{ cli.StringFlag{
Name: "name, n", Name: "name, n",
Value: "k3s_default", Value: "k3s_default",
Usage: "name of the cluster", Usage: "Name of the cluster",
}, },
cli.BoolFlag{ cli.BoolFlag{
Name: "all, a", Name: "all, a",
Usage: "stop all running clusters (this ignores the --name/-n flag)", Usage: "Stop all running clusters (this ignores the --name/-n flag)",
}, },
}, },
Action: run.StopCluster, Action: run.StopCluster,
@ -133,11 +133,11 @@ func main() {
cli.StringFlag{ cli.StringFlag{
Name: "name, n", Name: "name, n",
Value: "k3s_default", Value: "k3s_default",
Usage: "name of the cluster", Usage: "Name of the cluster",
}, },
cli.BoolFlag{ cli.BoolFlag{
Name: "all, a", Name: "all, a",
Usage: "start all stopped clusters (this ignores the --name/-n flag)", Usage: "Start all stopped clusters (this ignores the --name/-n flag)",
}, },
}, },
Action: run.StartCluster, Action: run.StartCluster,
@ -150,7 +150,7 @@ func main() {
Flags: []cli.Flag{ Flags: []cli.Flag{
cli.BoolFlag{ cli.BoolFlag{
Name: "all, a", Name: "all, a",
Usage: "also show non-running clusters", Usage: "Also show non-running clusters",
}, },
}, },
Action: run.ListClusters, Action: run.ListClusters,
@ -163,17 +163,18 @@ func main() {
cli.StringFlag{ cli.StringFlag{
Name: "name, n", Name: "name, n",
Value: "k3s_default", Value: "k3s_default",
Usage: "name of the cluster", Usage: "Name of the cluster",
}, },
cli.BoolFlag{ cli.BoolFlag{
Name: "all, a", Name: "all, a",
Usage: "get kubeconfig for all clusters (this ignores the --name/-n flag)", Usage: "Get kubeconfig for all clusters (this ignores the --name/-n flag)",
}, },
}, },
Action: run.GetKubeConfig, Action: run.GetKubeConfig,
}, },
} }
// Global flags
app.Flags = []cli.Flag{ app.Flags = []cli.Flag{
cli.BoolFlag{ cli.BoolFlag{
Name: "verbose", Name: "verbose",