Merge pull request #300 from rancher/enhancement/noun-verb-syntax
[Enhancement] NEW noun verb syntax
This commit is contained in:
commit
26cd8bbb3f
4
.github/ISSUE_TEMPLATE/bug_report.md
vendored
4
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -10,7 +10,7 @@ assignees: ''
|
||||
**What did you do?**
|
||||
|
||||
- How was the cluster created?
|
||||
- `k3d create -x A -y B`
|
||||
- `k3d cluster create -x A -y B`
|
||||
|
||||
- What did you do afterwards?
|
||||
- k3d commands?
|
||||
@ -31,7 +31,7 @@ If applicable, add screenshots or terminal output (code block) to help explain y
|
||||
|
||||
**Which version of `k3d`?**
|
||||
|
||||
- output of `k3d --version`
|
||||
- output of `k3d version`
|
||||
|
||||
**Which version of docker?**
|
||||
|
||||
|
5
.github/ISSUE_TEMPLATE/feature_request.md
vendored
5
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@ -15,8 +15,9 @@ Please link to the issue/PR here and explain how your request is related to it.
|
||||
|
||||
Do you need...
|
||||
|
||||
- a new command (next to e.g. `create`, `delete`, etc. used via `k3d <your-command>`)?
|
||||
- a new flag for a command (e.g. `k3d create --<your-flag>`)?
|
||||
- a new noun (next to e.g. `cluster`, `node`, etc. used via `k3d <noun>`)?
|
||||
- a new verb (next to e.g. `cluster create`, `node start`, etc. used via `k3d <noun> <verb>`)
|
||||
- a new flag for a command (e.g. `k3d cluster create --<your-flag>`)?
|
||||
- which command?
|
||||
- different functionality for an existing command/flag
|
||||
- which command or flag?
|
||||
|
2
Makefile
2
Makefile
@ -68,7 +68,7 @@ GO_SRC += $(foreach dir,$(REC_DIRS),$(shell find $(dir) -name "*.go"))
|
||||
########## Required Tools ##########
|
||||
# Go Package required
|
||||
PKG_GOX := github.com/mitchellh/gox@v1.0.1
|
||||
PKG_GOLANGCI_LINT_VERSION := 1.25.0
|
||||
PKG_GOLANGCI_LINT_VERSION := 1.28.3
|
||||
PKG_GOLANGCI_LINT_SCRIPT := https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh
|
||||
PKG_GOLANGCI_LINT := github.com/golangci/golangci-lint/cmd/golangci-lint@v${PKG_GOLANGCI_LINT_VERSION}
|
||||
|
||||
|
@ -73,10 +73,10 @@ Check out what you can do via `k3d help` or check the docs @ [k3d.io](https://k3
|
||||
|
||||
Example Workflow: Create a new cluster and use it with `kubectl`
|
||||
|
||||
1. `k3d create cluster CLUSTER_NAME` to create a new single-node cluster (= 1 container running k3s)
|
||||
2. `k3d get kubeconfig CLUSTER_NAME --switch` to update your default kubeconfig and switch the current-context to the new one
|
||||
1. `k3d cluster create CLUSTER_NAME` to create a new single-node cluster (= 1 container running k3s + 1 loadbalancer container)
|
||||
2. `k3d kubeconfig merge CLUSTER_NAME --switch-context` to update your default kubeconfig and switch the current-context to the new one
|
||||
3. execute some commands like `kubectl get pods --all-namespaces`
|
||||
4. `k3d delete cluster CLUSTER_NAME` to delete the default cluster
|
||||
4. `k3d cluster delete CLUSTER_NAME` to delete the default cluster
|
||||
|
||||
## Connect
|
||||
|
||||
@ -90,4 +90,4 @@ This repository is based on [@zeerorg](https://github.com/zeerorg/)'s [zeerorg/k
|
||||
|
||||
## Related Projects
|
||||
|
||||
* [k3x](https://github.com/inercia/k3x): a graphics interface (for Linux) to k3d.
|
||||
- [k3x](https://github.com/inercia/k3x): a graphics interface (for Linux) to k3d.
|
||||
|
@ -19,22 +19,21 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package delete
|
||||
package cluster
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCmdDelete returns a new cobra command
|
||||
func NewCmdDelete() *cobra.Command {
|
||||
// NewCmdCluster returns a new cobra command
|
||||
func NewCmdCluster() *cobra.Command {
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "delete",
|
||||
Short: "Delete a resource [cluster, node].",
|
||||
Long: `Delete a resource [cluster, node].`,
|
||||
Use: "cluster",
|
||||
Short: "Manage cluster(s)",
|
||||
Long: `Manage cluster(s)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
log.Errorln("Couldn't get help text")
|
||||
@ -44,8 +43,13 @@ func NewCmdDelete() *cobra.Command {
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
cmd.AddCommand(NewCmdDeleteCluster())
|
||||
cmd.AddCommand(NewCmdDeleteNode())
|
||||
cmd.AddCommand(NewCmdClusterCreate())
|
||||
cmd.AddCommand(NewCmdClusterStart())
|
||||
cmd.AddCommand(NewCmdClusterStop())
|
||||
cmd.AddCommand(NewCmdClusterDelete())
|
||||
cmd.AddCommand(NewCmdClusterList())
|
||||
|
||||
// add flags
|
||||
|
||||
// done
|
||||
return cmd
|
@ -1,4 +1,4 @@
|
||||
/*Package create ...
|
||||
/*
|
||||
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
@ -20,7 +20,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package create
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -40,7 +40,7 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const createClusterDescription = `
|
||||
const clusterCreateDescription = `
|
||||
Create a new k3s cluster with containerized nodes (k3s in docker).
|
||||
Every cluster will consist of one or more containers:
|
||||
- 1 (or more) master node container (k3s)
|
||||
@ -48,24 +48,24 @@ Every cluster will consist of one or more containers:
|
||||
- (optionally) 1 (or more) worker node containers (k3s)
|
||||
`
|
||||
|
||||
// NewCmdCreateCluster returns a new cobra command
|
||||
func NewCmdCreateCluster() *cobra.Command {
|
||||
// NewCmdClusterCreate returns a new cobra command
|
||||
func NewCmdClusterCreate() *cobra.Command {
|
||||
|
||||
createClusterOpts := &k3d.CreateClusterOpts{}
|
||||
createClusterOpts := &k3d.ClusterCreateOpts{}
|
||||
var updateDefaultKubeconfig, updateCurrentContext bool
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "cluster NAME",
|
||||
Short: "Create a new k3s cluster in docker",
|
||||
Long: createClusterDescription,
|
||||
Use: "create NAME",
|
||||
Short: "Create a new cluster",
|
||||
Long: clusterCreateDescription,
|
||||
Args: cobra.RangeArgs(0, 1), // exactly one cluster name can be set (default: k3d.DefaultClusterName)
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
// parse args and flags
|
||||
cluster := parseCreateClusterCmd(cmd, args, createClusterOpts)
|
||||
|
||||
// check if a cluster with that name exists already
|
||||
if _, err := k3dCluster.GetCluster(cmd.Context(), runtimes.SelectedRuntime, cluster); err == nil {
|
||||
if _, err := k3dCluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, cluster); err == nil {
|
||||
log.Fatalf("Failed to create cluster '%s' because a cluster with that name already exists", cluster.Name)
|
||||
}
|
||||
|
||||
@ -74,11 +74,11 @@ func NewCmdCreateCluster() *cobra.Command {
|
||||
log.Debugln("'--update-default-kubeconfig set: enabling wait-for-master")
|
||||
cluster.CreateClusterOpts.WaitForMaster = true
|
||||
}
|
||||
if err := k3dCluster.CreateCluster(cmd.Context(), runtimes.SelectedRuntime, cluster); err != nil {
|
||||
if err := k3dCluster.ClusterCreate(cmd.Context(), runtimes.SelectedRuntime, cluster); err != nil {
|
||||
// rollback if creation failed
|
||||
log.Errorln(err)
|
||||
log.Errorln("Failed to create cluster >>> Rolling Back")
|
||||
if err := k3dCluster.DeleteCluster(cmd.Context(), runtimes.SelectedRuntime, cluster); err != nil {
|
||||
if err := k3dCluster.ClusterDelete(cmd.Context(), runtimes.SelectedRuntime, cluster); err != nil {
|
||||
log.Errorln(err)
|
||||
log.Fatalln("Cluster creation FAILED, also FAILED to rollback changes!")
|
||||
}
|
||||
@ -88,7 +88,7 @@ func NewCmdCreateCluster() *cobra.Command {
|
||||
|
||||
if updateDefaultKubeconfig || updateCurrentContext {
|
||||
log.Debugf("Updating default kubeconfig with a new context for cluster %s", cluster.Name)
|
||||
if _, err := k3dCluster.GetAndWriteKubeConfig(cmd.Context(), runtimes.SelectedRuntime, cluster, "", &k3dCluster.WriteKubeConfigOptions{UpdateExisting: true, OverwriteExisting: false, UpdateCurrentContext: updateCurrentContext}); err != nil {
|
||||
if _, err := k3dCluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, cluster, "", &k3dCluster.WriteKubeConfigOptions{UpdateExisting: true, OverwriteExisting: false, UpdateCurrentContext: updateCurrentContext}); err != nil {
|
||||
log.Warningln(err)
|
||||
}
|
||||
}
|
||||
@ -99,9 +99,9 @@ func NewCmdCreateCluster() *cobra.Command {
|
||||
fmt.Printf("kubectl config use-context %s\n", fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, cluster.Name))
|
||||
} else if !updateCurrentContext {
|
||||
if runtime.GOOS == "windows" {
|
||||
fmt.Printf("$env:KUBECONFIG=(%s get kubeconfig %s)\n", os.Args[0], cluster.Name)
|
||||
fmt.Printf("$env:KUBECONFIG=(%s kubeconfig get %s)\n", os.Args[0], cluster.Name)
|
||||
} else {
|
||||
fmt.Printf("export KUBECONFIG=$(%s get kubeconfig %s)\n", os.Args[0], cluster.Name)
|
||||
fmt.Printf("export KUBECONFIG=$(%s kubeconfig get %s)\n", os.Args[0], cluster.Name)
|
||||
}
|
||||
}
|
||||
fmt.Println("kubectl cluster-info")
|
||||
@ -153,7 +153,7 @@ func NewCmdCreateCluster() *cobra.Command {
|
||||
}
|
||||
|
||||
// parseCreateClusterCmd parses the command input into variables required to create a cluster
|
||||
func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts *k3d.CreateClusterOpts) *k3d.Cluster {
|
||||
func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts *k3d.ClusterCreateOpts) *k3d.Cluster {
|
||||
|
||||
/********************************
|
||||
* Parse and validate arguments *
|
@ -19,7 +19,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package delete
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -36,12 +36,13 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCmdDeleteCluster returns a new cobra command
|
||||
func NewCmdDeleteCluster() *cobra.Command {
|
||||
// NewCmdClusterDelete returns a new cobra command
|
||||
func NewCmdClusterDelete() *cobra.Command {
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "cluster [NAME [NAME ...] | --all]",
|
||||
Use: "delete [NAME [NAME ...] | --all]",
|
||||
Aliases: []string{"del", "rm"},
|
||||
Short: "Delete cluster(s).",
|
||||
Long: `Delete cluster(s).`,
|
||||
Args: cobra.MinimumNArgs(0), // 0 or n arguments; 0 = default cluster name
|
||||
@ -53,11 +54,11 @@ func NewCmdDeleteCluster() *cobra.Command {
|
||||
log.Infoln("No clusters found")
|
||||
} else {
|
||||
for _, c := range clusters {
|
||||
if err := cluster.DeleteCluster(cmd.Context(), runtimes.SelectedRuntime, c); err != nil {
|
||||
if err := cluster.ClusterDelete(cmd.Context(), runtimes.SelectedRuntime, c); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
log.Infoln("Removing cluster details from default kubeconfig...")
|
||||
if err := cluster.RemoveClusterFromDefaultKubeConfig(cmd.Context(), c); err != nil {
|
||||
if err := cluster.KubeconfigRemoveClusterFromDefaultConfig(cmd.Context(), c); err != nil {
|
||||
log.Warnln("Failed to remove cluster details from default kubeconfig")
|
||||
log.Warnln(err)
|
||||
}
|
||||
@ -99,7 +100,7 @@ func parseDeleteClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
if all, err := cmd.Flags().GetBool("all"); err != nil {
|
||||
log.Fatalln(err)
|
||||
} else if all {
|
||||
clusters, err = cluster.GetClusters(cmd.Context(), runtimes.SelectedRuntime)
|
||||
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
@ -112,7 +113,7 @@ func parseDeleteClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
}
|
||||
|
||||
for _, name := range clusternames {
|
||||
cluster, err := cluster.GetCluster(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
cluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
@ -19,7 +19,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package get
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -44,17 +44,17 @@ type clusterFlags struct {
|
||||
token bool
|
||||
}
|
||||
|
||||
// NewCmdGetCluster returns a new cobra command
|
||||
func NewCmdGetCluster() *cobra.Command {
|
||||
// NewCmdClusterList returns a new cobra command
|
||||
func NewCmdClusterList() *cobra.Command {
|
||||
|
||||
clusterFlags := clusterFlags{}
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "cluster [NAME [NAME...]]",
|
||||
Aliases: []string{"clusters"},
|
||||
Short: "Get cluster(s)",
|
||||
Long: `Get cluster(s).`,
|
||||
Use: "list [NAME [NAME...]]",
|
||||
Aliases: []string{"ls", "get"},
|
||||
Short: "List cluster(s)",
|
||||
Long: `List cluster(s).`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
clusters := buildClusterList(cmd.Context(), args)
|
||||
PrintClusters(clusters, clusterFlags)
|
||||
@ -78,14 +78,14 @@ func buildClusterList(ctx context.Context, args []string) []*k3d.Cluster {
|
||||
|
||||
if len(args) == 0 {
|
||||
// cluster name not specified : get all clusters
|
||||
clusters, err = k3cluster.GetClusters(ctx, runtimes.SelectedRuntime)
|
||||
clusters, err = k3cluster.ClusterList(ctx, runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
for _, clusterName := range args {
|
||||
// cluster name specified : get specific cluster
|
||||
retrievedCluster, err := k3cluster.GetCluster(ctx, runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
retrievedCluster, err := k3cluster.ClusterGet(ctx, runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
@ -19,7 +19,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package start
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"time"
|
||||
@ -35,14 +35,14 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewCmdStartCluster returns a new cobra command
|
||||
func NewCmdStartCluster() *cobra.Command {
|
||||
// NewCmdClusterStart returns a new cobra command
|
||||
func NewCmdClusterStart() *cobra.Command {
|
||||
|
||||
startClusterOpts := types.StartClusterOpts{}
|
||||
startClusterOpts := types.ClusterStartOpts{}
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "cluster [NAME [NAME...] | --all]",
|
||||
Use: "start [NAME [NAME...] | --all]",
|
||||
Long: `Start existing k3d cluster(s)`,
|
||||
Short: "Start existing k3d cluster(s)",
|
||||
ValidArgsFunction: util.ValidArgsAvailableClusters,
|
||||
@ -52,7 +52,7 @@ func NewCmdStartCluster() *cobra.Command {
|
||||
log.Infoln("No clusters found")
|
||||
} else {
|
||||
for _, c := range clusters {
|
||||
if err := cluster.StartCluster(cmd.Context(), runtimes.SelectedRuntime, c, startClusterOpts); err != nil {
|
||||
if err := cluster.ClusterStart(cmd.Context(), runtimes.SelectedRuntime, c, startClusterOpts); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
@ -79,7 +79,7 @@ func parseStartClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
if all, err := cmd.Flags().GetBool("all"); err != nil {
|
||||
log.Fatalln(err)
|
||||
} else if all {
|
||||
clusters, err = cluster.GetClusters(cmd.Context(), runtimes.SelectedRuntime)
|
||||
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
@ -92,7 +92,7 @@ func parseStartClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
}
|
||||
|
||||
for _, name := range clusternames {
|
||||
cluster, err := cluster.GetCluster(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
cluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
@ -19,7 +19,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package stop
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
@ -32,12 +32,12 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewCmdStopCluster returns a new cobra command
|
||||
func NewCmdStopCluster() *cobra.Command {
|
||||
// NewCmdClusterStop returns a new cobra command
|
||||
func NewCmdClusterStop() *cobra.Command {
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "cluster [NAME [NAME...] | --all]",
|
||||
Use: "stop [NAME [NAME...] | --all]",
|
||||
Short: "Stop existing k3d cluster(s)",
|
||||
Long: `Stop existing k3d cluster(s).`,
|
||||
ValidArgsFunction: util.ValidArgsAvailableClusters,
|
||||
@ -47,7 +47,7 @@ func NewCmdStopCluster() *cobra.Command {
|
||||
log.Infoln("No clusters found")
|
||||
} else {
|
||||
for _, c := range clusters {
|
||||
if err := cluster.StopCluster(cmd.Context(), runtimes.SelectedRuntime, c); err != nil {
|
||||
if err := cluster.ClusterStop(cmd.Context(), runtimes.SelectedRuntime, c); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
@ -72,7 +72,7 @@ func parseStopClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
if all, err := cmd.Flags().GetBool("all"); err != nil {
|
||||
log.Fatalln(err)
|
||||
} else if all {
|
||||
clusters, err = cluster.GetClusters(cmd.Context(), runtimes.SelectedRuntime)
|
||||
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
@ -85,7 +85,7 @@ func parseStopClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
}
|
||||
|
||||
for _, name := range clusternames {
|
||||
cluster, err := cluster.GetCluster(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
cluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
@ -19,21 +19,21 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package load
|
||||
package image
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCmdLoad returns a new cobra command
|
||||
func NewCmdLoad() *cobra.Command {
|
||||
// NewCmdImage returns a new cobra command
|
||||
func NewCmdImage() *cobra.Command {
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "load",
|
||||
Short: "Load a resource [image] into a cluster.",
|
||||
Long: `Load a resource [image] into a cluster.`,
|
||||
Use: "image",
|
||||
Short: "Handle container images.",
|
||||
Long: `Handle container images.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
log.Errorln("Couldn't get help text")
|
||||
@ -43,7 +43,7 @@ func NewCmdLoad() *cobra.Command {
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
cmd.AddCommand(NewCmdLoadImage())
|
||||
cmd.AddCommand(NewCmdImageImport())
|
||||
|
||||
// add flags
|
||||
|
@ -19,7 +19,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package load
|
||||
package image
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
@ -32,16 +32,16 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewCmdLoadImage returns a new cobra command
|
||||
func NewCmdLoadImage() *cobra.Command {
|
||||
// NewCmdImageImport returns a new cobra command
|
||||
func NewCmdImageImport() *cobra.Command {
|
||||
|
||||
loadImageOpts := k3d.LoadImageOpts{}
|
||||
loadImageOpts := k3d.ImageImportOpts{}
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "image [IMAGE | ARCHIVE [IMAGE | ARCHIVE...]]",
|
||||
Short: "Load an image from docker into a k3d cluster.",
|
||||
Long: `Load an image from docker into a k3d cluster.`,
|
||||
Use: "import [IMAGE | ARCHIVE [IMAGE | ARCHIVE...]]",
|
||||
Short: "Import image(s) from docker into k3d cluster(s).",
|
||||
Long: `Import image(s) from docker into k3d cluster(s).`,
|
||||
Aliases: []string{"images"},
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
@ -49,7 +49,7 @@ func NewCmdLoadImage() *cobra.Command {
|
||||
log.Debugf("Load images [%+v] from runtime [%s] into clusters [%+v]", images, runtimes.SelectedRuntime, clusters)
|
||||
for _, cluster := range clusters {
|
||||
log.Infof("Loading images into '%s'", cluster.Name)
|
||||
if err := tools.LoadImagesIntoCluster(cmd.Context(), runtimes.SelectedRuntime, images, &cluster, loadImageOpts); err != nil {
|
||||
if err := tools.ImageImportIntoClusterMulti(cmd.Context(), runtimes.SelectedRuntime, images, &cluster, loadImageOpts); err != nil {
|
||||
log.Errorf("Failed to load images into cluster '%s'", cluster.Name)
|
||||
log.Errorln(err)
|
||||
}
|
@ -19,21 +19,21 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package create
|
||||
package kubeconfig
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCmdCreate returns a new cobra command
|
||||
func NewCmdCreate() *cobra.Command {
|
||||
// NewCmdKubeconfig returns a new cobra command
|
||||
func NewCmdKubeconfig() *cobra.Command {
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Create a resource [cluster, node].",
|
||||
Long: `Create a resource [cluster, node].`,
|
||||
Use: "kubeconfig",
|
||||
Short: "Manage kubeconfig(s)",
|
||||
Long: `Manage kubeconfig(s)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
log.Errorln("Couldn't get help text")
|
||||
@ -43,8 +43,8 @@ func NewCmdCreate() *cobra.Command {
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
cmd.AddCommand(NewCmdCreateCluster())
|
||||
cmd.AddCommand(NewCmdCreateNode())
|
||||
cmd.AddCommand(NewCmdKubeconfigGet())
|
||||
cmd.AddCommand(NewCmdKubeconfigMerge())
|
||||
|
||||
// add flags
|
||||
|
107
cmd/kubeconfig/kubeconfigGet.go
Normal file
107
cmd/kubeconfig/kubeconfigGet.go
Normal file
@ -0,0 +1,107 @@
|
||||
/*
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package kubeconfig
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/rancher/k3d/v3/cmd/util"
|
||||
"github.com/rancher/k3d/v3/pkg/cluster"
|
||||
"github.com/rancher/k3d/v3/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v3/pkg/types"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type getKubeconfigFlags struct {
|
||||
all bool
|
||||
}
|
||||
|
||||
// NewCmdKubeconfigGet returns a new cobra command
|
||||
func NewCmdKubeconfigGet() *cobra.Command {
|
||||
|
||||
writeKubeConfigOptions := cluster.WriteKubeConfigOptions{
|
||||
UpdateExisting: true,
|
||||
UpdateCurrentContext: true,
|
||||
OverwriteExisting: true,
|
||||
}
|
||||
|
||||
getKubeconfigFlags := getKubeconfigFlags{}
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "get [CLUSTER [CLUSTER [...]] | --all]",
|
||||
Short: "Get kubeconfig from cluster(s).",
|
||||
Long: `Get kubeconfig from cluster(s).`,
|
||||
ValidArgsFunction: util.ValidArgsAvailableClusters,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if (len(args) < 1 && !getKubeconfigFlags.all) || (len(args) > 0 && getKubeconfigFlags.all) {
|
||||
return fmt.Errorf("Need to specify one or more cluster names *or* set `--all` flag")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var clusters []*k3d.Cluster
|
||||
var err error
|
||||
|
||||
// generate list of clusters
|
||||
if getKubeconfigFlags.all {
|
||||
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
for _, clusterName := range args {
|
||||
retrievedCluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
clusters = append(clusters, retrievedCluster)
|
||||
}
|
||||
}
|
||||
|
||||
// get kubeconfigs from all clusters
|
||||
errorGettingKubeconfig := false
|
||||
for _, c := range clusters {
|
||||
log.Debugf("Getting kubeconfig for cluster '%s'", c.Name)
|
||||
fmt.Println("---") // YAML document separator
|
||||
if _, err := cluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, c, "-", &writeKubeConfigOptions); err != nil {
|
||||
log.Errorln(err)
|
||||
errorGettingKubeconfig = true
|
||||
}
|
||||
}
|
||||
|
||||
// return with non-zero exit code, if there was an error for one of the clusters
|
||||
if errorGettingKubeconfig {
|
||||
os.Exit(1)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add flags
|
||||
cmd.Flags().BoolVarP(&getKubeconfigFlags.all, "all", "a", false, "Get kubeconfigs from all existing clusters")
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
@ -19,7 +19,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package get
|
||||
package kubeconfig
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -38,37 +38,38 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type getKubeconfigFlags struct {
|
||||
type mergeKubeconfigFlags struct {
|
||||
all bool
|
||||
output string
|
||||
targetDefault bool
|
||||
}
|
||||
|
||||
// NewCmdGetKubeconfig returns a new cobra command
|
||||
func NewCmdGetKubeconfig() *cobra.Command {
|
||||
// NewCmdKubeconfigMerge returns a new cobra command
|
||||
func NewCmdKubeconfigMerge() *cobra.Command {
|
||||
|
||||
writeKubeConfigOptions := cluster.WriteKubeConfigOptions{}
|
||||
|
||||
getKubeconfigFlags := getKubeconfigFlags{}
|
||||
mergeKubeconfigFlags := mergeKubeconfigFlags{}
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "kubeconfig [CLUSTER [CLUSTER [...]] | --all]", // TODO: getKubeconfig: allow more than one cluster name or even --all
|
||||
Short: "Get kubeconfig",
|
||||
Long: `Get kubeconfig.`,
|
||||
Use: "merge [CLUSTER [CLUSTER [...]] | --all]",
|
||||
Aliases: []string{"write"},
|
||||
Long: `Merge/Write kubeconfig(s) from cluster(s) into existing kubeconfig/file.`,
|
||||
Short: "Merge/Write kubeconfig(s) from cluster(s) into existing kubeconfig/file.",
|
||||
ValidArgsFunction: util.ValidArgsAvailableClusters,
|
||||
Args: cobra.MinimumNArgs(0),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var clusters []*k3d.Cluster
|
||||
var err error
|
||||
|
||||
if getKubeconfigFlags.targetDefault && getKubeconfigFlags.output != "" {
|
||||
if mergeKubeconfigFlags.targetDefault && mergeKubeconfigFlags.output != "" {
|
||||
log.Fatalln("Cannot use both '--output' and '--merge-default-kubeconfig' at the same time")
|
||||
}
|
||||
|
||||
// generate list of clusters
|
||||
if getKubeconfigFlags.all {
|
||||
clusters, err = cluster.GetClusters(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if mergeKubeconfigFlags.all {
|
||||
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
@ -80,7 +81,7 @@ func NewCmdGetKubeconfig() *cobra.Command {
|
||||
}
|
||||
|
||||
for _, clusterName := range clusternames {
|
||||
retrievedCluster, err := cluster.GetCluster(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
retrievedCluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
@ -98,11 +99,11 @@ func NewCmdGetKubeconfig() *cobra.Command {
|
||||
}
|
||||
for _, c := range clusters {
|
||||
log.Debugf("Getting kubeconfig for cluster '%s'", c.Name)
|
||||
output := getKubeconfigFlags.output
|
||||
if output == "" && !getKubeconfigFlags.targetDefault {
|
||||
output := mergeKubeconfigFlags.output
|
||||
if output == "" && !mergeKubeconfigFlags.targetDefault {
|
||||
output = path.Join(outputDir, fmt.Sprintf("kubeconfig-%s.yaml", c.Name))
|
||||
}
|
||||
output, err = cluster.GetAndWriteKubeConfig(cmd.Context(), runtimes.SelectedRuntime, c, output, &writeKubeConfigOptions)
|
||||
output, err = cluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, c, output, &writeKubeConfigOptions)
|
||||
if err != nil {
|
||||
log.Errorln(err)
|
||||
errorGettingKubeconfig = true
|
||||
@ -112,7 +113,7 @@ func NewCmdGetKubeconfig() *cobra.Command {
|
||||
}
|
||||
|
||||
// only print kubeconfig file path if output is not stdout ("-")
|
||||
if getKubeconfigFlags.output != "-" {
|
||||
if mergeKubeconfigFlags.output != "-" {
|
||||
fmt.Println(strings.Join(outputs, ":"))
|
||||
}
|
||||
|
||||
@ -124,15 +125,15 @@ func NewCmdGetKubeconfig() *cobra.Command {
|
||||
}
|
||||
|
||||
// add flags
|
||||
cmd.Flags().StringVarP(&getKubeconfigFlags.output, "output", "o", "", fmt.Sprintf("Define output [ - | FILE ] (default from $KUBECONFIG or %s", clientcmd.RecommendedHomeFile))
|
||||
cmd.Flags().StringVarP(&mergeKubeconfigFlags.output, "output", "o", "", fmt.Sprintf("Define output [ - | FILE ] (default from $KUBECONFIG or %s", clientcmd.RecommendedHomeFile))
|
||||
if err := cmd.MarkFlagFilename("output"); err != nil {
|
||||
log.Fatalln("Failed to mark flag --output as filename")
|
||||
}
|
||||
cmd.Flags().BoolVarP(&getKubeconfigFlags.targetDefault, "merge-default-kubeconfig", "d", false, fmt.Sprintf("Merge into the default kubeconfig ($KUBECONFIG or %s)", clientcmd.RecommendedHomeFile))
|
||||
cmd.Flags().BoolVarP(&mergeKubeconfigFlags.targetDefault, "merge-default-kubeconfig", "d", false, fmt.Sprintf("Merge into the default kubeconfig ($KUBECONFIG or %s)", clientcmd.RecommendedHomeFile))
|
||||
cmd.Flags().BoolVarP(&writeKubeConfigOptions.UpdateExisting, "update", "u", true, "Update conflicting fields in existing kubeconfig")
|
||||
cmd.Flags().BoolVarP(&writeKubeConfigOptions.UpdateCurrentContext, "switch-context", "s", true, "Switch to new context")
|
||||
cmd.Flags().BoolVar(&writeKubeConfigOptions.OverwriteExisting, "overwrite", false, "[Careful!] Overwrite existing file, ignoring its contents")
|
||||
cmd.Flags().BoolVarP(&getKubeconfigFlags.all, "all", "a", false, "Get kubeconfigs from all existing clusters")
|
||||
cmd.Flags().BoolVarP(&mergeKubeconfigFlags.all, "all", "a", false, "Get kubeconfigs from all existing clusters")
|
||||
|
||||
// done
|
||||
return cmd
|
@ -19,22 +19,21 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package get
|
||||
package node
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCmdGet returns a new cobra command
|
||||
func NewCmdGet() *cobra.Command {
|
||||
// NewCmdNode returns a new cobra command
|
||||
func NewCmdNode() *cobra.Command {
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "get",
|
||||
Short: "Get a resource [cluster, node, kubeconfig].",
|
||||
Long: `Get a resource [cluster, node, kubeconfig].`,
|
||||
Use: "node",
|
||||
Short: "Manage node(s)",
|
||||
Long: `Manage node(s)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
log.Errorln("Couldn't get help text")
|
||||
@ -44,9 +43,13 @@ func NewCmdGet() *cobra.Command {
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
cmd.AddCommand(NewCmdGetCluster())
|
||||
cmd.AddCommand(NewCmdGetNode())
|
||||
cmd.AddCommand(NewCmdGetKubeconfig())
|
||||
cmd.AddCommand(NewCmdNodeCreate())
|
||||
cmd.AddCommand(NewCmdNodeStart())
|
||||
cmd.AddCommand(NewCmdNodeStop())
|
||||
cmd.AddCommand(NewCmdNodeDelete())
|
||||
cmd.AddCommand(NewCmdNodeList())
|
||||
|
||||
// add flags
|
||||
|
||||
// done
|
||||
return cmd
|
@ -19,7 +19,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package create
|
||||
package node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -35,20 +35,20 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewCmdCreateNode returns a new cobra command
|
||||
func NewCmdCreateNode() *cobra.Command {
|
||||
// NewCmdNodeCreate returns a new cobra command
|
||||
func NewCmdNodeCreate() *cobra.Command {
|
||||
|
||||
createNodeOpts := k3d.CreateNodeOpts{}
|
||||
createNodeOpts := k3d.NodeCreateOpts{}
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "node NAME",
|
||||
Use: "create NAME",
|
||||
Short: "Create a new k3s node in docker",
|
||||
Long: `Create a new containerized k3s node (k3s in docker).`,
|
||||
Args: cobra.ExactArgs(1), // exactly one name accepted // TODO: if not specified, inherit from cluster that the node shall belong to, if that is specified
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
nodes, cluster := parseCreateNodeCmd(cmd, args)
|
||||
if err := k3dc.AddNodesToCluster(cmd.Context(), runtimes.SelectedRuntime, nodes, cluster, createNodeOpts); err != nil {
|
||||
if err := k3dc.NodeAddToClusterMulti(cmd.Context(), runtimes.SelectedRuntime, nodes, cluster, createNodeOpts); err != nil {
|
||||
log.Errorf("Failed to add nodes to cluster '%s'", cluster.Name)
|
||||
log.Errorln(err)
|
||||
}
|
@ -19,7 +19,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package delete
|
||||
package node
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3d/v3/cmd/util"
|
||||
@ -30,14 +30,14 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCmdDeleteNode returns a new cobra command
|
||||
func NewCmdDeleteNode() *cobra.Command {
|
||||
// NewCmdNodeDelete returns a new cobra command
|
||||
func NewCmdNodeDelete() *cobra.Command {
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "node (NAME | --all)",
|
||||
Short: "Delete a node.",
|
||||
Long: `Delete a node.`,
|
||||
Use: "delete (NAME | --all)",
|
||||
Short: "Delete node(s).",
|
||||
Long: `Delete node(s).`,
|
||||
Args: cobra.MinimumNArgs(1), // at least one node has to be specified
|
||||
ValidArgsFunction: util.ValidArgsAvailableNodes,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
@ -48,7 +48,7 @@ func NewCmdDeleteNode() *cobra.Command {
|
||||
log.Infoln("No nodes found")
|
||||
} else {
|
||||
for _, node := range nodes {
|
||||
if err := cluster.DeleteNode(cmd.Context(), runtimes.SelectedRuntime, node); err != nil {
|
||||
if err := cluster.NodeDelete(cmd.Context(), runtimes.SelectedRuntime, node); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
@ -74,7 +74,7 @@ func parseDeleteNodeCmd(cmd *cobra.Command, args []string) []*k3d.Node {
|
||||
if all, err := cmd.Flags().GetBool("all"); err != nil {
|
||||
log.Fatalln(err)
|
||||
} else if all {
|
||||
nodes, err = cluster.GetNodes(cmd.Context(), runtimes.SelectedRuntime)
|
||||
nodes, err = cluster.NodeList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
@ -86,7 +86,7 @@ func parseDeleteNodeCmd(cmd *cobra.Command, args []string) []*k3d.Node {
|
||||
}
|
||||
|
||||
for _, name := range args {
|
||||
node, err := cluster.GetNode(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: name})
|
||||
node, err := cluster.NodeGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: name})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
@ -19,7 +19,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package get
|
||||
package node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -37,29 +37,29 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewCmdGetNode returns a new cobra command
|
||||
func NewCmdGetNode() *cobra.Command {
|
||||
// NewCmdNodeList returns a new cobra command
|
||||
func NewCmdNodeList() *cobra.Command {
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "node [NAME [NAME...]]",
|
||||
Aliases: []string{"nodes"},
|
||||
Short: "Get node(s)",
|
||||
Long: `Get node(s).`,
|
||||
Use: "list [NAME [NAME...]]",
|
||||
Aliases: []string{"ls", "get"},
|
||||
Short: "List node(s)",
|
||||
Long: `List node(s).`,
|
||||
Args: cobra.MinimumNArgs(0), // 0 or more; 0 = all
|
||||
ValidArgsFunction: util.ValidArgsAvailableNodes,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
nodes, headersOff := parseGetNodeCmd(cmd, args)
|
||||
var existingNodes []*k3d.Node
|
||||
if len(nodes) == 0 { // Option a) no name specified -> get all nodes
|
||||
found, err := cluster.GetNodes(cmd.Context(), runtimes.SelectedRuntime)
|
||||
found, err := cluster.NodeList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
existingNodes = append(existingNodes, found...)
|
||||
} else { // Option b) cluster name specified -> get specific cluster
|
||||
for _, node := range nodes {
|
||||
found, err := cluster.GetNode(cmd.Context(), runtimes.SelectedRuntime, node)
|
||||
found, err := cluster.NodeGet(cmd.Context(), runtimes.SelectedRuntime, node)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
@ -19,7 +19,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package start
|
||||
package node
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3d/v3/cmd/util"
|
||||
@ -30,12 +30,12 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewCmdStartNode returns a new cobra command
|
||||
func NewCmdStartNode() *cobra.Command {
|
||||
// NewCmdNodeStart returns a new cobra command
|
||||
func NewCmdNodeStart() *cobra.Command {
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "node NAME", // TODO: startNode: allow one or more names or --all
|
||||
Use: "start NAME", // TODO: startNode: allow one or more names or --all
|
||||
Short: "Start an existing k3d node",
|
||||
Long: `Start an existing k3d node.`,
|
||||
ValidArgsFunction: util.ValidArgsAvailableNodes,
|
@ -19,7 +19,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package stop
|
||||
package node
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3d/v3/cmd/util"
|
||||
@ -31,12 +31,12 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewCmdStopNode returns a new cobra command
|
||||
func NewCmdStopNode() *cobra.Command {
|
||||
// NewCmdNodeStop returns a new cobra command
|
||||
func NewCmdNodeStop() *cobra.Command {
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "node NAME", // TODO: stopNode: allow one or more names or --all",
|
||||
Use: "stop NAME", // TODO: stopNode: allow one or more names or --all",
|
||||
Short: "Stop an existing k3d node",
|
||||
Long: `Stop an existing k3d node.`,
|
||||
ValidArgsFunction: util.ValidArgsAvailableNodes,
|
20
cmd/root.go
20
cmd/root.go
@ -30,12 +30,10 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/rancher/k3d/v3/cmd/create"
|
||||
"github.com/rancher/k3d/v3/cmd/delete"
|
||||
"github.com/rancher/k3d/v3/cmd/get"
|
||||
"github.com/rancher/k3d/v3/cmd/load"
|
||||
"github.com/rancher/k3d/v3/cmd/start"
|
||||
"github.com/rancher/k3d/v3/cmd/stop"
|
||||
"github.com/rancher/k3d/v3/cmd/cluster"
|
||||
"github.com/rancher/k3d/v3/cmd/image"
|
||||
"github.com/rancher/k3d/v3/cmd/kubeconfig"
|
||||
"github.com/rancher/k3d/v3/cmd/node"
|
||||
"github.com/rancher/k3d/v3/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v3/version"
|
||||
|
||||
@ -92,12 +90,10 @@ func init() {
|
||||
|
||||
// add subcommands
|
||||
rootCmd.AddCommand(NewCmdCompletion())
|
||||
rootCmd.AddCommand(create.NewCmdCreate())
|
||||
rootCmd.AddCommand(delete.NewCmdDelete())
|
||||
rootCmd.AddCommand(get.NewCmdGet())
|
||||
rootCmd.AddCommand(stop.NewCmdStop())
|
||||
rootCmd.AddCommand(start.NewCmdStart())
|
||||
rootCmd.AddCommand(load.NewCmdLoad())
|
||||
rootCmd.AddCommand(cluster.NewCmdCluster())
|
||||
rootCmd.AddCommand(kubeconfig.NewCmdKubeconfig())
|
||||
rootCmd.AddCommand(node.NewCmdNode())
|
||||
rootCmd.AddCommand(image.NewCmdImage())
|
||||
|
||||
rootCmd.AddCommand(&cobra.Command{
|
||||
Use: "version",
|
||||
|
@ -1,52 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package start
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCmdStart returns a new cobra command
|
||||
func NewCmdStart() *cobra.Command {
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "start",
|
||||
Short: "Start a resource [cluster, node].",
|
||||
Long: `Start a resource [cluster, node].`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
log.Errorln("Couldn't get help text")
|
||||
log.Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
cmd.AddCommand(NewCmdStartCluster())
|
||||
cmd.AddCommand(NewCmdStartNode())
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
@ -1,52 +0,0 @@
|
||||
/*
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package stop
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCmdStop returns a new cobra command
|
||||
func NewCmdStop() *cobra.Command {
|
||||
|
||||
// create new cobra command
|
||||
cmd := &cobra.Command{
|
||||
Use: "stop",
|
||||
Short: "Stop a resource [cluster, node].",
|
||||
Long: `Stop a resource [cluster, node].`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := cmd.Help(); err != nil {
|
||||
log.Errorln("Couldn't get help text")
|
||||
log.Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// add subcommands
|
||||
cmd.AddCommand(NewCmdStopCluster())
|
||||
cmd.AddCommand(NewCmdStopNode())
|
||||
|
||||
// done
|
||||
return cmd
|
||||
}
|
@ -37,7 +37,7 @@ func ValidArgsAvailableClusters(cmd *cobra.Command, args []string, toComplete st
|
||||
|
||||
var completions []string
|
||||
var clusters []*k3d.Cluster
|
||||
clusters, err := k3dcluster.GetClusters(context.Background(), runtimes.SelectedRuntime)
|
||||
clusters, err := k3dcluster.ClusterList(context.Background(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to get list of clusters for shell completion")
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
@ -62,7 +62,7 @@ func ValidArgsAvailableNodes(cmd *cobra.Command, args []string, toComplete strin
|
||||
|
||||
var completions []string
|
||||
var nodes []*k3d.Node
|
||||
nodes, err := k3dcluster.GetNodes(context.Background(), runtimes.SelectedRuntime)
|
||||
nodes, err := k3dcluster.NodeList(context.Background(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to get list of nodes for shell completion")
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
|
@ -3,11 +3,11 @@
|
||||
## Issues with BTRFS
|
||||
|
||||
- As [@jaredallard](https://github.com/jaredallard) [pointed out](https://github.com/rancher/k3d/pull/48), people running `k3d` on a system with **btrfs**, may need to mount `/dev/mapper` into the nodes for the setup to work.
|
||||
- This will do: `k3d create cluster CLUSTER_NAME -v /dev/mapper:/dev/mapper`
|
||||
- This will do: `k3d cluster create CLUSTER_NAME -v /dev/mapper:/dev/mapper`
|
||||
|
||||
## Issues with ZFS
|
||||
|
||||
- k3s currently has [no support for ZFS](ttps://github.com/rancher/k3s/issues/66) and thus, creating multi-master setups (e.g. `k3d create cluster multimaster --masters 3`) fails, because the initializing master node (server flag `--cluster-init`) errors out with the following log:
|
||||
- k3s currently has [no support for ZFS](ttps://github.com/rancher/k3s/issues/66) and thus, creating multi-master setups (e.g. `k3d cluster create multimaster --masters 3`) fails, because the initializing master node (server flag `--cluster-init`) errors out with the following log:
|
||||
```bash
|
||||
starting kubernetes: preparing server: start cluster and https: raft_init(): io: create I/O capabilities probe file: posix_allocate: operation not supported on socket
|
||||
```
|
||||
@ -23,7 +23,7 @@
|
||||
- Possible [fix/workaround by @zer0def](https://github.com/rancher/k3d/issues/133#issuecomment-549065666):
|
||||
- use a docker storage driver which cleans up properly (e.g. overlay2)
|
||||
- clean up or expand docker root filesystem
|
||||
- change the kubelet's eviction thresholds upon cluster creation: `k3d create cluster --k3s-agent-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%' --k3s-agent-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%'`
|
||||
- change the kubelet's eviction thresholds upon cluster creation: `k3d cluster create --k3s-agent-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%' --k3s-agent-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%'`
|
||||
|
||||
## Restarting a multi-master cluster or the initializing master node fails
|
||||
|
||||
|
@ -10,7 +10,7 @@
|
||||
- --command -> planned: keep
|
||||
- --shell -> planned: keep (or second arg)
|
||||
- auto, bash, zsh
|
||||
- create -> `k3d create cluster CLUSTERNAME`
|
||||
- create -> `k3d cluster create CLUSTERNAME`
|
||||
- --name -> dropped, implemented via arg
|
||||
- --volume -> implemented
|
||||
- --port -> implemented
|
||||
@ -30,7 +30,7 @@
|
||||
- --registry-volume -> TBD
|
||||
- --registries-file -> TBD
|
||||
- --enable-registry-cache -> TBD
|
||||
- (add-node) -> `k3d create node NODENAME`
|
||||
- (add-node) -> `k3d node create NODENAME`
|
||||
- --role -> implemented
|
||||
- --name -> dropped, implemented as arg
|
||||
- --count -> implemented as `--replicas`
|
||||
@ -41,23 +41,23 @@
|
||||
- --k3s -> TBD
|
||||
- --k3s-secret -> TBD
|
||||
- --k3s-token -> TBD
|
||||
- delete -> `k3d delete cluster CLUSTERNAME`
|
||||
- delete -> `k3d cluster delete CLUSTERNAME`
|
||||
- --name -> dropped, implemented as arg
|
||||
- --all -> implemented
|
||||
- --prune -> TBD
|
||||
- --keep-registry-volume -> TBD
|
||||
- stop -> `k3d stop cluster CLUSTERNAME`
|
||||
- stop -> `k3d cluster stop CLUSTERNAME`
|
||||
- --name -> dropped, implemented as arg
|
||||
- --all -> implemented
|
||||
- start -> `k3d start cluster CLUSTERNAME`
|
||||
- start -> `k3d cluster start CLUSTERNAME`
|
||||
- --name -> dropped, implemented as arg
|
||||
- --all -> implemented
|
||||
- list -> dropped, implemented as `k3d get clusters`
|
||||
- get-kubeconfig -> `k3d get kubeconfig CLUSTERNAME`
|
||||
- get-kubeconfig -> `k3d kubeconfig get|merge CLUSTERNAME`
|
||||
- --name -> dropped, implemented as arg
|
||||
- --all -> implemented
|
||||
- --overwrite -> implemented
|
||||
- import-images -> `k3d load image [--cluster CLUSTERNAME] [--keep] IMAGES`
|
||||
- import-images -> `k3d image import [--cluster CLUSTERNAME] [--keep] IMAGES`
|
||||
- --name -> implemented as `--cluster`
|
||||
- --no-remove -> implemented as `--keep-tarball`
|
||||
```
|
||||
|
@ -50,13 +50,13 @@ You have several options there:
|
||||
Create a cluster named `mycluster` with just a single master node:
|
||||
|
||||
```bash
|
||||
k3d create cluster mycluster
|
||||
k3d cluster create mycluster
|
||||
```
|
||||
|
||||
Get the new cluster's connection details merged into your default kubeconfig (usually specified using the `KUBECONFIG` environment variable or the default path `#!bash $HOME/.kube/config`) and directly switch to the new context:
|
||||
|
||||
```bash
|
||||
k3d get kubeconfig mycluster --switch
|
||||
k3d kubeconfig merge mycluster --switch-context
|
||||
```
|
||||
|
||||
Use the new cluster with [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/), e.g.:
|
||||
|
@ -13,7 +13,7 @@ Existing networks won't be managed by k3d together with the cluster lifecycle.
|
||||
|
||||
### `host` network
|
||||
|
||||
When using the `--network` flag to connect to the host network (i.e. `k3d create cluster --network host`),
|
||||
When using the `--network` flag to connect to the host network (i.e. `k3d cluster create --network host`),
|
||||
you won't be able to create more than **one master node**.
|
||||
An edge case would be one master node (with agent disabled) and one worker node.
|
||||
|
||||
|
@ -2,10 +2,14 @@
|
||||
|
||||
```bash
|
||||
k3d
|
||||
--runtime # choose the container runtime (default: docker)
|
||||
--verbose # enable verbose (debug) logging (default: false)
|
||||
create
|
||||
cluster [CLUSTERNAME] # default cluster name is 'k3s-default'
|
||||
--version # show k3d and k3s version
|
||||
-h, --help # show help text
|
||||
version # show k3d and k3s version
|
||||
help [COMMAND] # show help text for any command
|
||||
completion [bash | zsh | (psh | powershell)] # generate completion scripts for common shells
|
||||
cluster [CLUSTERNAME] # default cluster name is 'k3s-default'
|
||||
create
|
||||
-a, --api-port # specify the port on which the cluster will be accessible (e.g. via kubectl)
|
||||
-i, --image # specify which k3s image should be used for the nodes
|
||||
--k3s-agent-arg # add additional arguments to the k3s agent (see https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/#k3s-agent-cli-help)
|
||||
@ -21,46 +25,43 @@ k3d
|
||||
-v, --volume # specify additional bind-mounts
|
||||
--wait # enable waiting for all master nodes to be ready before returning
|
||||
-w, --workers # specify how many worker nodes you want to create
|
||||
node NODENAME # Create new nodes (and add them to existing clusters)
|
||||
start CLUSTERNAME # start a (stopped) cluster
|
||||
-a, --all # start all clusters
|
||||
--wait # wait for all masters and master-loadbalancer to be up before returning
|
||||
--timeout # maximum waiting time for '--wait' before canceling/returning
|
||||
stop CLUSTERNAME # stop a cluster
|
||||
-a, --all # stop all clusters
|
||||
delete CLUSTERNAME # delete an existing cluster
|
||||
-a, --all # delete all existing clusters
|
||||
list [CLUSTERNAME [CLUSTERNAME ...]]
|
||||
--no-headers # do not print headers
|
||||
--token # show column with cluster tokens
|
||||
node
|
||||
create NODENAME # Create new nodes (and add them to existing clusters)
|
||||
-c, --cluster # specify the cluster that the node shall connect to
|
||||
-i, --image # specify which k3s image should be used for the node(s)
|
||||
--replicas # specify how many replicas you want to create with this spec
|
||||
--role # specify the node role
|
||||
--wait # wait for the node to be up and running before returning
|
||||
--timeout # specify a timeout duration, after which the node creation will be interrupted, if not done yet
|
||||
delete
|
||||
cluster CLUSTERNAME # delete an existing cluster
|
||||
-a, --all # delete all existing clusters
|
||||
node NODENAME # delete an existing node
|
||||
start NODENAME # start a (stopped) node
|
||||
stop NODENAME # stop a node
|
||||
delete NODENAME # delete an existing node
|
||||
-a, --all # delete all existing nodes
|
||||
start
|
||||
cluster CLUSTERNAME # start a (stopped) cluster
|
||||
-a, --all # start all clusters
|
||||
--wait # wait for all masters and master-loadbalancer to be up before returning
|
||||
--timeout # maximum waiting time for '--wait' before canceling/returning
|
||||
node NODENAME # start a (stopped) node
|
||||
stop
|
||||
cluster CLUSTERNAME # stop a cluster
|
||||
-a, --all # stop all clusters
|
||||
node # stop a node
|
||||
get
|
||||
cluster [CLUSTERNAME [CLUSTERNAME ...]]
|
||||
list NODENAME
|
||||
--no-headers # do not print headers
|
||||
--token # show column with cluster tokens
|
||||
node NODENAME
|
||||
--no-headers # do not print headers
|
||||
kubeconfig (CLUSTERNAME [CLUSTERNAME ...] | --all)
|
||||
kubeconfig
|
||||
get (CLUSTERNAME [CLUSTERNAME ...] | --all) # get kubeconfig from cluster(s) and save it into a file in $HOME/.k3d
|
||||
-a, --all # get kubeconfigs from all clusters
|
||||
merge (CLUSTERNAME [CLUSTERNAME ...] | --all) # get kubeconfig from cluster(s) and merge it/them into an existing kubeconfig
|
||||
-a, --all # get kubeconfigs from all clusters
|
||||
--output # specify the output file where the kubeconfig should be written to
|
||||
--overwrite # [Careful!] forcefully overwrite the output file, ignoring existing contents
|
||||
-s, --switch-context # switch current-context in kubeconfig to the new context
|
||||
-u, --update # update conflicting fields in existing kubeconfig (default: true)
|
||||
-d, --merge-default-kubeconfig # update the default kubeconfig (usually $KUBECONFIG or $HOME/.kube/config)
|
||||
load
|
||||
image [IMAGE | ARCHIVE [IMAGE | ARCHIVE ...]] # Load one or more images from the local runtime environment or tar-archives into k3d clusters
|
||||
image
|
||||
import [IMAGE | ARCHIVE [IMAGE | ARCHIVE ...]] # Load one or more images from the local runtime environment or tar-archives into k3d clusters
|
||||
-c, --cluster # clusters to load the image into
|
||||
-k, --keep-tarball # do not delete the image tarball from the shared volume after completion
|
||||
completion SHELL # Generate completion scripts
|
||||
version # show k3d build version
|
||||
help [COMMAND] # show help text for any command
|
||||
```
|
||||
|
@ -7,7 +7,7 @@ Therefore, we have to create the cluster in a way, that the internal port 80 (wh
|
||||
|
||||
1. Create a cluster, mapping the ingress port 80 to localhost:8081
|
||||
|
||||
`#!bash k3d create cluster --api-port 6550 -p 8081:80@loadbalancer --workers 2`
|
||||
`#!bash k3d cluster create --api-port 6550 -p 8081:80@loadbalancer --workers 2`
|
||||
|
||||
!!! info "Good to know"
|
||||
- `--api-port 6550` is not required for the example to work. It's used to have `k3s`'s API-Server listening on port 6550 with that port mapped to the host system.
|
||||
@ -18,7 +18,7 @@ Therefore, we have to create the cluster in a way, that the internal port 80 (wh
|
||||
|
||||
2. Get the kubeconfig file
|
||||
|
||||
`#!bash export KUBECONFIG="$(k3d get kubeconfig k3s-default)"`
|
||||
`#!bash export KUBECONFIG="$(k3d kubeconfig get k3s-default)"`
|
||||
|
||||
3. Create a nginx deployment
|
||||
|
||||
@ -56,7 +56,7 @@ Therefore, we have to create the cluster in a way, that the internal port 80 (wh
|
||||
|
||||
1. Create a cluster, mapping the port 30080 from worker-0 to localhost:8082
|
||||
|
||||
`#!bash k3d create cluster mycluster -p 8082:30080@worker[0] --workers 2`
|
||||
`#!bash k3d cluster create mycluster -p 8082:30080@worker[0] --workers 2`
|
||||
|
||||
- Note: Kubernetes' default NodePort range is [`30000-32767`](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport)
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
## Registries configuration file
|
||||
|
||||
You can add registries by specifying them in a `registries.yaml` and mounting them at creation time:
|
||||
`#!bash k3d create cluster mycluster --volume /home/YOU/my-registries.yaml:/etc/rancher/k3s/registries.yaml`.
|
||||
`#!bash k3d cluster create mycluster --volume /home/YOU/my-registries.yaml:/etc/rancher/k3s/registries.yaml`.
|
||||
|
||||
This file is a regular [k3s registries configuration file](https://rancher.com/docs/k3s/latest/en/installation/private-registry/), and looks like this:
|
||||
|
||||
@ -61,7 +61,7 @@ configs:
|
||||
|
||||
Finally, we can create the cluster, mounting the CA file in the path we specified in `ca_file`:
|
||||
|
||||
`#!bash k3d create cluster --volume ${HOME}/.k3d/my-registries.yaml:/etc/rancher/k3s/registries.yaml --volume ${HOME}/.k3d/my-company-root.pem:/etc/ssl/certs/my-company-root.pem`
|
||||
`#!bash k3d cluster create --volume ${HOME}/.k3d/my-registries.yaml:/etc/rancher/k3s/registries.yaml --volume ${HOME}/.k3d/my-company-root.pem:/etc/ssl/certs/my-company-root.pem`
|
||||
|
||||
## Using a local registry
|
||||
|
||||
@ -182,6 +182,6 @@ sandbox_image = "{{ .NodeConfig.AgentConfig.PauseImage }}"
|
||||
and then mount it at `/var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl` (where `containerd` in your k3d nodes will load it) when creating the k3d cluster:
|
||||
|
||||
```bash
|
||||
k3d create cluster mycluster \
|
||||
k3d cluster create mycluster \
|
||||
--volume ${HOME}/.k3d/config.toml.tmpl:/var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl
|
||||
```
|
||||
|
@ -12,33 +12,33 @@ To get a kubeconfig set up for you to connect to a k3d cluster, you can go diffe
|
||||
## Getting the kubeconfig for a newly created cluster
|
||||
|
||||
1. Create a new kubeconfig file **after** cluster creation
|
||||
- `#!bash k3d get kubeconfig mycluster`
|
||||
- `#!bash k3d kubeconfig get mycluster`
|
||||
- *Note:* this will create (or update) the file `$HOME/.k3d/kubeconfig-mycluster.yaml`
|
||||
- *Tip:* Use it: `#!bash export KUBECONFIG=$(k3d get kubeconfig mycluster)`
|
||||
- *Tip:* Use it: `#!bash export KUBECONFIG=$(k3d kubeconfig get mycluster)`
|
||||
2. Update your default kubeconfig **upon** cluster creation
|
||||
- `#!bash k3d create cluster mycluster --update-kubeconfig`
|
||||
- `#!bash k3d cluster create mycluster --update-kubeconfig`
|
||||
- *Note:* this won't switch the current-context (append `--switch-context` to do so)
|
||||
3. Update your default kubeconfig **after** cluster creation
|
||||
- `#!bash k3d get kubeconfig mycluster --merge-default-kubeconfig`
|
||||
- `#!bash k3d kubeconfig merge mycluster --merge-default-kubeconfig`
|
||||
- *Note:* this won't switch the current-context (append `--switch-context` to do so)
|
||||
4. Update a different kubeconfig **after** cluster creation
|
||||
- `#!bash k3d get kubeconfig mycluster --output some/other/file.yaml`
|
||||
- `#!bash k3d kubeconfig merge mycluster --output some/other/file.yaml`
|
||||
- *Note:* this won't switch the current-context
|
||||
- The file will be created if it doesn't exist
|
||||
|
||||
!!! info "Switching the current context"
|
||||
None of the above options switch the current-context by default.
|
||||
This is intended to be least intrusive, since the current-context has a global effect.
|
||||
You can switch the current-context directly with the `get kubeconfig` command by adding the `--switch-context` flag.
|
||||
You can switch the current-context directly with the `kubeconfig merge` command by adding the `--switch-context` flag.
|
||||
|
||||
## Removing cluster details from the kubeconfig
|
||||
|
||||
`#!bash k3d delete cluster mycluster` will always remove the details for `mycluster` from the default kubeconfig.
|
||||
`#!bash k3d cluster delete mycluster` will always remove the details for `mycluster` from the default kubeconfig.
|
||||
It will also delete the respective kubeconfig file in `$HOME/.k3d/` if it exists.
|
||||
|
||||
## Handling multiple clusters
|
||||
|
||||
`k3d get kubeconfig` let's you specify one or more clusters via arguments _or_ all via `--all`.
|
||||
`k3d kubeconfig merge` let's you specify one or more clusters via arguments _or_ all via `--all`.
|
||||
All kubeconfigs will then be merged into a single file if `--merge-default-kubeconfig` or `--output` is specified.
|
||||
If none of those two flags was specified, a new file will be created per cluster and the merged path (e.g. `$HOME/.k3d/kubeconfig-cluster1.yaml:$HOME/.k3d/cluster2.yaml`) will be returned.
|
||||
Note, that with multiple cluster specified, the `--switch-context` flag will change the current context to the cluster which was last in the list.
|
||||
|
@ -9,7 +9,7 @@ Create a cluster with 3 master nodes using k3s' embedded dqlite database.
|
||||
The first master to be created will use the `--cluster-init` flag and k3d will wait for it to be up and running before creating (and connecting) the other master nodes.
|
||||
|
||||
```bash
|
||||
k3d create cluster multimaster --masters 3
|
||||
k3d cluster create multimaster --masters 3
|
||||
```
|
||||
|
||||
## Adding master nodes to a running cluster
|
||||
@ -17,7 +17,7 @@ The first master to be created will use the `--cluster-init` flag and k3d will w
|
||||
In theory (and also in practice in most cases), this is as easy as executing the following command:
|
||||
|
||||
```bash
|
||||
k3d create node newmaster --cluster multimaster --role master
|
||||
k3d node create newmaster --cluster multimaster --role master
|
||||
```
|
||||
|
||||
!!! important "There's a trap!"
|
||||
|
@ -40,10 +40,10 @@ import (
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// CreateCluster creates a new cluster consisting of
|
||||
// ClusterCreate creates a new cluster consisting of
|
||||
// - some containerized k3s nodes
|
||||
// - a docker network
|
||||
func CreateCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
|
||||
func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
|
||||
if cluster.CreateClusterOpts.Timeout > 0*time.Second {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, cluster.CreateClusterOpts.Timeout)
|
||||
@ -157,7 +157,7 @@ func CreateCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
|
||||
// create node
|
||||
log.Infof("Creating node '%s'", node.Name)
|
||||
if err := CreateNode(ctx, runtime, node, k3d.CreateNodeOpts{}); err != nil {
|
||||
if err := NodeCreate(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
|
||||
log.Errorln("Failed to create node")
|
||||
return err
|
||||
}
|
||||
@ -257,7 +257,7 @@ func CreateCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
|
||||
// ... by scanning for this line in logs and restarting the container in case it appears
|
||||
log.Debugf("Starting to wait for master node '%s'", masterNode.Name)
|
||||
return WaitForNodeLogMessage(ctx, runtime, masterNode, k3d.ReadyLogMessageByRole[k3d.MasterRole], time.Time{})
|
||||
return NodeWaitForLogMessage(ctx, runtime, masterNode, k3d.ReadyLogMessageByRole[k3d.MasterRole], time.Time{})
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -303,7 +303,7 @@ func CreateCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
}
|
||||
cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback
|
||||
log.Infof("Creating LoadBalancer '%s'", lbNode.Name)
|
||||
if err := CreateNode(ctx, runtime, lbNode, k3d.CreateNodeOpts{}); err != nil {
|
||||
if err := NodeCreate(ctx, runtime, lbNode, k3d.NodeCreateOpts{}); err != nil {
|
||||
log.Errorln("Failed to create loadbalancer")
|
||||
return err
|
||||
}
|
||||
@ -312,7 +312,7 @@ func CreateCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
|
||||
// ... by scanning for this line in logs and restarting the container in case it appears
|
||||
log.Debugf("Starting to wait for loadbalancer node '%s'", lbNode.Name)
|
||||
return WaitForNodeLogMessage(ctx, runtime, lbNode, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], time.Time{})
|
||||
return NodeWaitForLogMessage(ctx, runtime, lbNode, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], time.Time{})
|
||||
})
|
||||
}
|
||||
} else {
|
||||
@ -329,8 +329,8 @@ func CreateCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteCluster deletes an existing cluster
|
||||
func DeleteCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
|
||||
// ClusterDelete deletes an existing cluster
|
||||
func ClusterDelete(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
|
||||
|
||||
log.Infof("Deleting cluster '%s'", cluster.Name)
|
||||
log.Debugf("Cluster Details: %+v", cluster)
|
||||
@ -375,8 +375,8 @@ func DeleteCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetClusters returns a list of all existing clusters
|
||||
func GetClusters(ctx context.Context, runtime k3drt.Runtime) ([]*k3d.Cluster, error) {
|
||||
// ClusterList returns a list of all existing clusters
|
||||
func ClusterList(ctx context.Context, runtime k3drt.Runtime) ([]*k3d.Cluster, error) {
|
||||
nodes, err := runtime.GetNodesByLabel(ctx, k3d.DefaultObjectLabels)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to get clusters")
|
||||
@ -455,8 +455,8 @@ func populateClusterFieldsFromLabels(cluster *k3d.Cluster) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetCluster returns an existing cluster with all fields and node lists populated
|
||||
func GetCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) (*k3d.Cluster, error) {
|
||||
// ClusterGet returns an existing cluster with all fields and node lists populated
|
||||
func ClusterGet(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) (*k3d.Cluster, error) {
|
||||
// get nodes that belong to the selected cluster
|
||||
nodes, err := runtime.GetNodesByLabel(ctx, map[string]string{k3d.LabelClusterName: cluster.Name})
|
||||
if err != nil {
|
||||
@ -504,8 +504,8 @@ func generateNodeName(cluster string, role k3d.Role, suffix int) string {
|
||||
return fmt.Sprintf("%s-%s-%s-%d", k3d.DefaultObjectNamePrefix, cluster, role, suffix)
|
||||
}
|
||||
|
||||
// StartCluster starts a whole cluster (i.e. all nodes of the cluster)
|
||||
func StartCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, startClusterOpts types.StartClusterOpts) error {
|
||||
// ClusterStart starts a whole cluster (i.e. all nodes of the cluster)
|
||||
func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, startClusterOpts types.ClusterStartOpts) error {
|
||||
log.Infof("Starting cluster '%s'", cluster.Name)
|
||||
|
||||
start := time.Now()
|
||||
@ -543,7 +543,7 @@ func StartCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
|
||||
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
|
||||
// ... by scanning for this line in logs and restarting the container in case it appears
|
||||
log.Debugf("Starting to wait for master node '%s'", masterNode.Name)
|
||||
return WaitForNodeLogMessage(ctx, runtime, masterNode, k3d.ReadyLogMessageByRole[k3d.MasterRole], start)
|
||||
return NodeWaitForLogMessage(ctx, runtime, masterNode, k3d.ReadyLogMessageByRole[k3d.MasterRole], start)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -559,7 +559,7 @@ func StartCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
|
||||
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
|
||||
// ... by scanning for this line in logs and restarting the container in case it appears
|
||||
log.Debugf("Starting to wait for loadbalancer node '%s'", masterlb.Name)
|
||||
return WaitForNodeLogMessage(ctx, runtime, masterlb, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], start)
|
||||
return NodeWaitForLogMessage(ctx, runtime, masterlb, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], start)
|
||||
})
|
||||
}
|
||||
|
||||
@ -575,8 +575,8 @@ func StartCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
|
||||
return nil
|
||||
}
|
||||
|
||||
// StopCluster stops a whole cluster (i.e. all nodes of the cluster)
|
||||
func StopCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
|
||||
// ClusterStop stops a whole cluster (i.e. all nodes of the cluster)
|
||||
func ClusterStop(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
|
||||
log.Infof("Stopping cluster '%s'", cluster.Name)
|
||||
|
||||
failed := 0
|
||||
|
@ -44,21 +44,21 @@ type WriteKubeConfigOptions struct {
|
||||
OverwriteExisting bool
|
||||
}
|
||||
|
||||
// GetAndWriteKubeConfig ...
|
||||
// KubeconfigGetWrite ...
|
||||
// 1. fetches the KubeConfig from the first master node retrieved for a given cluster
|
||||
// 2. modifies it by updating some fields with cluster-specific information
|
||||
// 3. writes it to the specified output
|
||||
func GetAndWriteKubeConfig(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster, output string, writeKubeConfigOptions *WriteKubeConfigOptions) (string, error) {
|
||||
func KubeconfigGetWrite(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster, output string, writeKubeConfigOptions *WriteKubeConfigOptions) (string, error) {
|
||||
|
||||
// get kubeconfig from cluster node
|
||||
kubeconfig, err := GetKubeconfig(ctx, runtime, cluster)
|
||||
kubeconfig, err := KubeconfigGet(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
return output, err
|
||||
}
|
||||
|
||||
// empty output parameter = write to default
|
||||
if output == "" {
|
||||
output, err = GetDefaultKubeConfigPath()
|
||||
output, err = KubeconfigGetDefaultPath()
|
||||
if err != nil {
|
||||
return output, err
|
||||
}
|
||||
@ -66,7 +66,7 @@ func GetAndWriteKubeConfig(ctx context.Context, runtime runtimes.Runtime, cluste
|
||||
|
||||
// simply write to the output, ignoring existing contents
|
||||
if writeKubeConfigOptions.OverwriteExisting || output == "-" {
|
||||
return output, WriteKubeConfigToPath(ctx, kubeconfig, output)
|
||||
return output, KubeconfigWriteToPath(ctx, kubeconfig, output)
|
||||
}
|
||||
|
||||
// load config from existing file or fail if it has non-kubeconfig contents
|
||||
@ -103,14 +103,14 @@ func GetAndWriteKubeConfig(ctx context.Context, runtime runtimes.Runtime, cluste
|
||||
}
|
||||
|
||||
// update existing kubeconfig, but error out if there are conflicting fields but we don't want to update them
|
||||
return output, UpdateKubeConfig(ctx, kubeconfig, existingKubeConfig, output, writeKubeConfigOptions.UpdateExisting, writeKubeConfigOptions.UpdateCurrentContext)
|
||||
return output, KubeconfigMerge(ctx, kubeconfig, existingKubeConfig, output, writeKubeConfigOptions.UpdateExisting, writeKubeConfigOptions.UpdateCurrentContext)
|
||||
|
||||
}
|
||||
|
||||
// GetKubeconfig grabs the kubeconfig file from /output from a master node container,
|
||||
// KubeconfigGet grabs the kubeconfig file from /output from a master node container,
|
||||
// modifies it by updating some fields with cluster-specific information
|
||||
// and returns a Config object for further processing
|
||||
func GetKubeconfig(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) (*clientcmdapi.Config, error) {
|
||||
func KubeconfigGet(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) (*clientcmdapi.Config, error) {
|
||||
// get all master nodes for the selected cluster
|
||||
// TODO: getKubeconfig: we should make sure, that the master node we're trying to fetch from is actually running
|
||||
masterNodes, err := runtime.GetNodesByLabel(ctx, map[string]string{k3d.LabelClusterName: cluster.Name, k3d.LabelRole: string(k3d.MasterRole)})
|
||||
@ -199,8 +199,8 @@ func GetKubeconfig(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.C
|
||||
return kc, nil
|
||||
}
|
||||
|
||||
// WriteKubeConfigToPath takes a kubeconfig and writes it to some path, which can be '-' for os.Stdout
|
||||
func WriteKubeConfigToPath(ctx context.Context, kubeconfig *clientcmdapi.Config, path string) error {
|
||||
// KubeconfigWriteToPath takes a kubeconfig and writes it to some path, which can be '-' for os.Stdout
|
||||
func KubeconfigWriteToPath(ctx context.Context, kubeconfig *clientcmdapi.Config, path string) error {
|
||||
var output *os.File
|
||||
defer output.Close()
|
||||
var err error
|
||||
@ -234,8 +234,8 @@ func WriteKubeConfigToPath(ctx context.Context, kubeconfig *clientcmdapi.Config,
|
||||
|
||||
}
|
||||
|
||||
// UpdateKubeConfig merges a new kubeconfig into an existing kubeconfig and returns the result
|
||||
func UpdateKubeConfig(ctx context.Context, newKubeConfig *clientcmdapi.Config, existingKubeConfig *clientcmdapi.Config, outPath string, overwriteConflicting bool, updateCurrentContext bool) error {
|
||||
// KubeconfigMerge merges a new kubeconfig into an existing kubeconfig and returns the result
|
||||
func KubeconfigMerge(ctx context.Context, newKubeConfig *clientcmdapi.Config, existingKubeConfig *clientcmdapi.Config, outPath string, overwriteConflicting bool, updateCurrentContext bool) error {
|
||||
|
||||
log.Debugf("Merging new KubeConfig:\n%+v\n>>> into existing KubeConfig:\n%+v", newKubeConfig, existingKubeConfig)
|
||||
|
||||
@ -278,11 +278,11 @@ func UpdateKubeConfig(ctx context.Context, newKubeConfig *clientcmdapi.Config, e
|
||||
|
||||
log.Debugf("Merged KubeConfig:\n%+v", existingKubeConfig)
|
||||
|
||||
return WriteKubeConfig(ctx, existingKubeConfig, outPath)
|
||||
return KubeconfigWrite(ctx, existingKubeConfig, outPath)
|
||||
}
|
||||
|
||||
// WriteKubeConfig writes a kubeconfig to a path atomically
|
||||
func WriteKubeConfig(ctx context.Context, kubeconfig *clientcmdapi.Config, path string) error {
|
||||
// KubeconfigWrite writes a kubeconfig to a path atomically
|
||||
func KubeconfigWrite(ctx context.Context, kubeconfig *clientcmdapi.Config, path string) error {
|
||||
tempPath := fmt.Sprintf("%s.k3d_%s", path, time.Now().Format("20060102_150405.000000"))
|
||||
if err := clientcmd.WriteToFile(*kubeconfig, tempPath); err != nil {
|
||||
log.Errorf("Failed to write merged kubeconfig to temporary file '%s'", tempPath)
|
||||
@ -300,9 +300,9 @@ func WriteKubeConfig(ctx context.Context, kubeconfig *clientcmdapi.Config, path
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDefaultKubeConfig loads the default KubeConfig file
|
||||
func GetDefaultKubeConfig() (*clientcmdapi.Config, error) {
|
||||
path, err := GetDefaultKubeConfigPath()
|
||||
// KubeconfigGetDefaultFile loads the default KubeConfig file
|
||||
func KubeconfigGetDefaultFile() (*clientcmdapi.Config, error) {
|
||||
path, err := KubeconfigGetDefaultPath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -310,8 +310,8 @@ func GetDefaultKubeConfig() (*clientcmdapi.Config, error) {
|
||||
return clientcmd.LoadFromFile(path)
|
||||
}
|
||||
|
||||
// GetDefaultKubeConfigPath returns the path of the default kubeconfig, but errors if the KUBECONFIG env var specifies more than one file
|
||||
func GetDefaultKubeConfigPath() (string, error) {
|
||||
// KubeconfigGetDefaultPath returns the path of the default kubeconfig, but errors if the KUBECONFIG env var specifies more than one file
|
||||
func KubeconfigGetDefaultPath() (string, error) {
|
||||
defaultKubeConfigLoadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
if len(defaultKubeConfigLoadingRules.GetLoadingPrecedence()) > 1 {
|
||||
return "", fmt.Errorf("Multiple kubeconfigs specified via KUBECONFIG env var: Please reduce to one entry, unset KUBECONFIG or explicitly choose an output")
|
||||
@ -319,22 +319,22 @@ func GetDefaultKubeConfigPath() (string, error) {
|
||||
return defaultKubeConfigLoadingRules.GetDefaultFilename(), nil
|
||||
}
|
||||
|
||||
// RemoveClusterFromDefaultKubeConfig removes a cluster's details from the default kubeconfig
|
||||
func RemoveClusterFromDefaultKubeConfig(ctx context.Context, cluster *k3d.Cluster) error {
|
||||
defaultKubeConfigPath, err := GetDefaultKubeConfigPath()
|
||||
// KubeconfigRemoveClusterFromDefaultConfig removes a cluster's details from the default kubeconfig
|
||||
func KubeconfigRemoveClusterFromDefaultConfig(ctx context.Context, cluster *k3d.Cluster) error {
|
||||
defaultKubeConfigPath, err := KubeconfigGetDefaultPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
kubeconfig, err := GetDefaultKubeConfig()
|
||||
kubeconfig, err := KubeconfigGetDefaultFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
kubeconfig = RemoveClusterFromKubeConfig(ctx, cluster, kubeconfig)
|
||||
return WriteKubeConfig(ctx, kubeconfig, defaultKubeConfigPath)
|
||||
kubeconfig = KubeconfigRemoveCluster(ctx, cluster, kubeconfig)
|
||||
return KubeconfigWrite(ctx, kubeconfig, defaultKubeConfigPath)
|
||||
}
|
||||
|
||||
// RemoveClusterFromKubeConfig removes a cluster's details from a given kubeconfig
|
||||
func RemoveClusterFromKubeConfig(ctx context.Context, cluster *k3d.Cluster, kubeconfig *clientcmdapi.Config) *clientcmdapi.Config {
|
||||
// KubeconfigRemoveCluster removes a cluster's details from a given kubeconfig
|
||||
func KubeconfigRemoveCluster(ctx context.Context, cluster *k3d.Cluster, kubeconfig *clientcmdapi.Config) *clientcmdapi.Config {
|
||||
clusterName := fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, cluster.Name)
|
||||
contextName := fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, cluster.Name)
|
||||
authInfoName := fmt.Sprintf("admin@%s-%s", k3d.DefaultObjectNamePrefix, cluster.Name)
|
||||
|
@ -36,7 +36,7 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu
|
||||
|
||||
var err error
|
||||
// update cluster details to ensure that we have the latest node list
|
||||
cluster, err = GetCluster(ctx, runtime, cluster)
|
||||
cluster, err = ClusterGet(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to update details for cluster '%s'", cluster.Name)
|
||||
return err
|
||||
|
@ -36,10 +36,10 @@ import (
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// AddNodeToCluster adds a node to an existing cluster
|
||||
func AddNodeToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, cluster *k3d.Cluster, createNodeOpts k3d.CreateNodeOpts) error {
|
||||
// NodeAddToCluster adds a node to an existing cluster
|
||||
func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, cluster *k3d.Cluster, createNodeOpts k3d.NodeCreateOpts) error {
|
||||
targetClusterName := cluster.Name
|
||||
cluster, err := GetCluster(ctx, runtime, cluster)
|
||||
cluster, err := ClusterGet(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to find specified cluster '%s'", targetClusterName)
|
||||
return err
|
||||
@ -77,7 +77,7 @@ func AddNodeToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
||||
}
|
||||
|
||||
// get node details
|
||||
chosenNode, err = GetNode(ctx, runtime, chosenNode)
|
||||
chosenNode, err = NodeGet(ctx, runtime, chosenNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -128,7 +128,7 @@ func AddNodeToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
||||
}
|
||||
}
|
||||
|
||||
if err := CreateNode(ctx, runtime, node, k3d.CreateNodeOpts{}); err != nil {
|
||||
if err := NodeCreate(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -143,8 +143,8 @@ func AddNodeToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddNodesToCluster adds multiple nodes to a chosen cluster
|
||||
func AddNodesToCluster(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d.Node, cluster *k3d.Cluster, createNodeOpts k3d.CreateNodeOpts) error {
|
||||
// NodeAddToClusterMulti adds multiple nodes to a chosen cluster
|
||||
func NodeAddToClusterMulti(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d.Node, cluster *k3d.Cluster, createNodeOpts k3d.NodeCreateOpts) error {
|
||||
if createNodeOpts.Timeout > 0*time.Second {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, createNodeOpts.Timeout)
|
||||
@ -153,14 +153,14 @@ func AddNodesToCluster(ctx context.Context, runtime runtimes.Runtime, nodes []*k
|
||||
|
||||
nodeWaitGroup, ctx := errgroup.WithContext(ctx)
|
||||
for _, node := range nodes {
|
||||
if err := AddNodeToCluster(ctx, runtime, node, cluster, k3d.CreateNodeOpts{}); err != nil {
|
||||
if err := NodeAddToCluster(ctx, runtime, node, cluster, k3d.NodeCreateOpts{}); err != nil {
|
||||
return err
|
||||
}
|
||||
if createNodeOpts.Wait {
|
||||
currentNode := node
|
||||
nodeWaitGroup.Go(func() error {
|
||||
log.Debugf("Starting to wait for node '%s'", currentNode.Name)
|
||||
return WaitForNodeLogMessage(ctx, runtime, currentNode, k3d.ReadyLogMessageByRole[currentNode.Role], time.Time{})
|
||||
return NodeWaitForLogMessage(ctx, runtime, currentNode, k3d.ReadyLogMessageByRole[currentNode.Role], time.Time{})
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -173,8 +173,8 @@ func AddNodesToCluster(ctx context.Context, runtime runtimes.Runtime, nodes []*k
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateNodes creates a list of nodes
|
||||
func CreateNodes(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d.Node, createNodeOpts k3d.CreateNodeOpts) error { // TODO: pass `--atomic` flag, so we stop and return an error if any node creation fails?
|
||||
// NodeCreateMulti creates a list of nodes
|
||||
func NodeCreateMulti(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d.Node, createNodeOpts k3d.NodeCreateOpts) error { // TODO: pass `--atomic` flag, so we stop and return an error if any node creation fails?
|
||||
if createNodeOpts.Timeout > 0*time.Second {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, createNodeOpts.Timeout)
|
||||
@ -183,14 +183,14 @@ func CreateNodes(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d.Nod
|
||||
|
||||
nodeWaitGroup, ctx := errgroup.WithContext(ctx)
|
||||
for _, node := range nodes {
|
||||
if err := CreateNode(ctx, runtime, node, k3d.CreateNodeOpts{}); err != nil {
|
||||
if err := NodeCreate(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
if createNodeOpts.Wait {
|
||||
currentNode := node
|
||||
nodeWaitGroup.Go(func() error {
|
||||
log.Debugf("Starting to wait for node '%s'", currentNode.Name)
|
||||
return WaitForNodeLogMessage(ctx, runtime, currentNode, k3d.ReadyLogMessageByRole[currentNode.Role], time.Time{})
|
||||
return NodeWaitForLogMessage(ctx, runtime, currentNode, k3d.ReadyLogMessageByRole[currentNode.Role], time.Time{})
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -205,8 +205,8 @@ func CreateNodes(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d.Nod
|
||||
|
||||
}
|
||||
|
||||
// CreateNode creates a new containerized k3s node
|
||||
func CreateNode(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, createNodeOpts k3d.CreateNodeOpts) error {
|
||||
// NodeCreate creates a new containerized k3s node
|
||||
func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, createNodeOpts k3d.NodeCreateOpts) error {
|
||||
log.Debugf("Creating node from spec\n%+v", node)
|
||||
|
||||
/*
|
||||
@ -251,14 +251,14 @@ func CreateNode(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, c
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteNode deletes an existing node
|
||||
func DeleteNode(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) error {
|
||||
// NodeDelete deletes an existing node
|
||||
func NodeDelete(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) error {
|
||||
|
||||
if err := runtime.DeleteNode(ctx, node); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
cluster, err := GetCluster(ctx, runtime, &k3d.Cluster{Name: node.Labels[k3d.LabelClusterName]})
|
||||
cluster, err := ClusterGet(ctx, runtime, &k3d.Cluster{Name: node.Labels[k3d.LabelClusterName]})
|
||||
if err != nil {
|
||||
log.Errorf("Failed to update loadbalancer: Failed to find cluster for node '%s'", node.Name)
|
||||
return err
|
||||
@ -302,8 +302,8 @@ func patchMasterSpec(node *k3d.Node) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNodes returns a list of all existing clusters
|
||||
func GetNodes(ctx context.Context, runtime runtimes.Runtime) ([]*k3d.Node, error) {
|
||||
// NodeList returns a list of all existing clusters
|
||||
func NodeList(ctx context.Context, runtime runtimes.Runtime) ([]*k3d.Node, error) {
|
||||
nodes, err := runtime.GetNodesByLabel(ctx, k3d.DefaultObjectLabels)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to get nodes")
|
||||
@ -313,8 +313,8 @@ func GetNodes(ctx context.Context, runtime runtimes.Runtime) ([]*k3d.Node, error
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// GetNode returns a node matching the specified node fields
|
||||
func GetNode(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) (*k3d.Node, error) {
|
||||
// NodeGet returns a node matching the specified node fields
|
||||
func NodeGet(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) (*k3d.Node, error) {
|
||||
// get node
|
||||
node, err := runtime.GetNode(ctx, node)
|
||||
if err != nil {
|
||||
@ -324,8 +324,8 @@ func GetNode(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) (*k3
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// WaitForNodeLogMessage follows the logs of a node container and returns if it finds a specific line in there (or timeout is reached)
|
||||
func WaitForNodeLogMessage(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, message string, since time.Time) error {
|
||||
// NodeWaitForLogMessage follows the logs of a node container and returns if it finds a specific line in there (or timeout is reached)
|
||||
func NodeWaitForLogMessage(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, message string, since time.Time) error {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
@ -37,9 +37,9 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// LoadImagesIntoCluster starts up a k3d tools container for the selected cluster and uses it to export
|
||||
// ImageImportIntoClusterMulti starts up a k3d tools container for the selected cluster and uses it to export
|
||||
// images from the runtime to import them into the nodes of the selected cluster
|
||||
func LoadImagesIntoCluster(ctx context.Context, runtime runtimes.Runtime, images []string, cluster *k3d.Cluster, loadImageOpts k3d.LoadImageOpts) error {
|
||||
func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime, images []string, cluster *k3d.Cluster, loadImageOpts k3d.ImageImportOpts) error {
|
||||
|
||||
var imagesFromRuntime []string
|
||||
var imagesFromTar []string
|
||||
@ -79,7 +79,7 @@ func LoadImagesIntoCluster(ctx context.Context, runtime runtimes.Runtime, images
|
||||
return fmt.Errorf("No valid images specified")
|
||||
}
|
||||
|
||||
cluster, err = k3dc.GetCluster(ctx, runtime, cluster)
|
||||
cluster, err = k3dc.ClusterGet(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to find the specified cluster")
|
||||
return err
|
||||
|
@ -128,8 +128,8 @@ var DoNotCopyMasterFlags = []string{
|
||||
"--cluster-init",
|
||||
}
|
||||
|
||||
// CreateClusterOpts describe a set of options one can set when creating a cluster
|
||||
type CreateClusterOpts struct {
|
||||
// ClusterCreateOpts describe a set of options one can set when creating a cluster
|
||||
type ClusterCreateOpts struct {
|
||||
DisableImageVolume bool
|
||||
WaitForMaster bool
|
||||
Timeout time.Duration
|
||||
@ -138,26 +138,26 @@ type CreateClusterOpts struct {
|
||||
K3sAgentArgs []string
|
||||
}
|
||||
|
||||
// StartClusterOpts describe a set of options one can set when (re-)starting a cluster
|
||||
type StartClusterOpts struct {
|
||||
// ClusterStartOpts describe a set of options one can set when (re-)starting a cluster
|
||||
type ClusterStartOpts struct {
|
||||
WaitForMaster bool
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// CreateNodeOpts describes a set of options one can set when creating a new node
|
||||
type CreateNodeOpts struct {
|
||||
// NodeCreateOpts describes a set of options one can set when creating a new node
|
||||
type NodeCreateOpts struct {
|
||||
Wait bool
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// StartNodeOpts describes a set of options one can set when (re-)starting a node
|
||||
type StartNodeOpts struct {
|
||||
// NodeStartOpts describes a set of options one can set when (re-)starting a node
|
||||
type NodeStartOpts struct {
|
||||
Wait bool
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// LoadImageOpts describes a set of options one can set for loading image(s) into cluster(s)
|
||||
type LoadImageOpts struct {
|
||||
// ImageImportOpts describes a set of options one can set for loading image(s) into cluster(s)
|
||||
type ImageImportOpts struct {
|
||||
KeepTar bool
|
||||
}
|
||||
|
||||
@ -175,7 +175,7 @@ type Cluster struct {
|
||||
Nodes []*Node `yaml:"nodes" json:"nodes,omitempty"`
|
||||
InitNode *Node // init master node
|
||||
ExternalDatastore ExternalDatastore `yaml:"external_datastore" json:"externalDatastore,omitempty"`
|
||||
CreateClusterOpts *CreateClusterOpts `yaml:"options" json:"options,omitempty"`
|
||||
CreateClusterOpts *ClusterCreateOpts `yaml:"options" json:"options,omitempty"`
|
||||
ExposeAPI ExposeAPI `yaml:"expose_api" json:"exposeAPI,omitempty"`
|
||||
MasterLoadBalancer *Node `yaml:"master_loadbalancer" json:"masterLoadBalancer,omitempty"`
|
||||
ImageVolume string `yaml:"image_volume" json:"imageVolume,omitempty"`
|
||||
|
@ -61,7 +61,7 @@ check_url() {
|
||||
check_clusters() {
|
||||
[ -n "$EXE" ] || abort "EXE is not defined"
|
||||
for c in "$@" ; do
|
||||
$EXE get kubeconfig "$c" --merge-default-kubeconfig --switch-context
|
||||
$EXE kubeconfig merge "$c" --switch-context
|
||||
if kubectl cluster-info ; then
|
||||
passed "cluster $c is reachable"
|
||||
else
|
||||
@ -75,7 +75,7 @@ check_clusters() {
|
||||
|
||||
check_cluster_count() {
|
||||
expectedClusterCount=$1
|
||||
actualClusterCount=$($EXE get clusters --no-headers | wc -l)
|
||||
actualClusterCount=$($EXE cluster list --no-headers | wc -l)
|
||||
if [[ $actualClusterCount != $expectedClusterCount ]]; then
|
||||
failed "incorrect number of clusters available: $actualClusterCount != $expectedClusterCount"
|
||||
return 1
|
||||
@ -87,7 +87,7 @@ check_cluster_count() {
|
||||
check_multi_node() {
|
||||
cluster=$1
|
||||
expectedNodeCount=$2
|
||||
$EXE get kubeconfig "$cluster" --merge-default-kubeconfig --switch-context
|
||||
$EXE kubeconfig merge "$cluster" --switch-context
|
||||
nodeCount=$(kubectl get nodes -o=custom-columns=NAME:.metadata.name --no-headers | wc -l)
|
||||
if [[ $nodeCount == $expectedNodeCount ]]; then
|
||||
passed "cluster $cluster has $expectedNodeCount nodes, as expected"
|
||||
@ -110,5 +110,5 @@ check_volume_exists() {
|
||||
|
||||
check_cluster_token_exist() {
|
||||
[ -n "$EXE" ] || abort "EXE is not defined"
|
||||
$EXE get cluster "$1" --token | grep "TOKEN" >/dev/null 2>&1
|
||||
$EXE cluster get "$1" --token | grep "TOKEN" >/dev/null 2>&1
|
||||
}
|
||||
|
@ -7,8 +7,8 @@ CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
source "$CURR_DIR/common.sh"
|
||||
|
||||
info "Creating two clusters..."
|
||||
$EXE create cluster c1 --wait --timeout 60s --api-port 6443 || failed "could not create cluster c1"
|
||||
$EXE create cluster c2 --wait --timeout 60s || failed "could not create cluster c2"
|
||||
$EXE cluster create c1 --wait --timeout 60s --api-port 6443 || failed "could not create cluster c1"
|
||||
$EXE cluster create c2 --wait --timeout 60s || failed "could not create cluster c2"
|
||||
|
||||
info "Checking that we can get both clusters..."
|
||||
check_cluster_count 2
|
||||
@ -21,8 +21,8 @@ check_cluster_token_exist "c1" || failed "could not find cluster token c1"
|
||||
check_cluster_token_exist "c2" || failed "could not find cluster token c2"
|
||||
|
||||
info "Deleting clusters..."
|
||||
$EXE delete cluster c1 || failed "could not delete the cluster c1"
|
||||
$EXE delete cluster c2 || failed "could not delete the cluster c2"
|
||||
$EXE cluster delete c1 || failed "could not delete the cluster c1"
|
||||
$EXE cluster delete c2 || failed "could not delete the cluster c2"
|
||||
|
||||
exit 0
|
||||
|
||||
|
@ -9,7 +9,7 @@ source "$CURR_DIR/common.sh"
|
||||
clustername="lifecycletest"
|
||||
|
||||
info "Creating cluster $clustername..."
|
||||
$EXE create cluster "$clustername" --workers 1 --api-port 6443 --wait --timeout 360s || failed "could not create cluster $clustername"
|
||||
$EXE cluster create "$clustername" --workers 1 --api-port 6443 --wait --timeout 360s || failed "could not create cluster $clustername"
|
||||
|
||||
info "Sleeping for 5 seconds to give the cluster enough time to get ready..."
|
||||
sleep 5
|
||||
@ -23,14 +23,14 @@ check_multi_node "$clustername" 2 || failed "failed to verify number of nodes"
|
||||
|
||||
# 2. stop the cluster
|
||||
info "Stopping cluster..."
|
||||
$EXE stop cluster "$clustername"
|
||||
$EXE cluster stop "$clustername"
|
||||
|
||||
info "Checking that cluster was stopped"
|
||||
check_clusters "$clustername" && failed "cluster was not stopped, since we still have access"
|
||||
|
||||
# 3. start the cluster
|
||||
info "Starting cluster..."
|
||||
$EXE start cluster "$clustername" --wait --timeout 360s || failed "cluster didn't come back in time"
|
||||
$EXE cluster start "$clustername" --wait --timeout 360s || failed "cluster didn't come back in time"
|
||||
|
||||
info "Checking that we have access to the cluster..."
|
||||
check_clusters "$clustername" || failed "error checking cluster"
|
||||
@ -40,7 +40,7 @@ check_multi_node "$clustername" 2 || failed "failed to verify number of nodes"
|
||||
|
||||
# 4. adding another worker node
|
||||
info "Adding one worker node..."
|
||||
$EXE create node "extra-worker" --cluster "$clustername" --role "worker" --wait --timeout 360s || failed "failed to add worker node"
|
||||
$EXE node create "extra-worker" --cluster "$clustername" --role "worker" --wait --timeout 360s || failed "failed to add worker node"
|
||||
|
||||
info "Checking that we have 3 nodes available now..."
|
||||
check_multi_node "$clustername" 3 || failed "failed to verify number of nodes"
|
||||
@ -49,12 +49,12 @@ check_multi_node "$clustername" 3 || failed "failed to verify number of nodes"
|
||||
info "Loading an image into the cluster..."
|
||||
docker pull nginx:latest > /dev/null
|
||||
docker tag nginx:latest nginx:local > /dev/null
|
||||
$EXE load image nginx:local -c $clustername || failed "could not import image in $clustername"
|
||||
$EXE image import nginx:local -c $clustername || failed "could not import image in $clustername"
|
||||
|
||||
# Cleanup
|
||||
|
||||
info "Deleting cluster $clustername..."
|
||||
$EXE delete cluster "$clustername" || failed "could not delete the cluster $clustername"
|
||||
$EXE cluster delete "$clustername" || failed "could not delete the cluster $clustername"
|
||||
|
||||
exit 0
|
||||
|
||||
|
@ -7,7 +7,7 @@ CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
source "$CURR_DIR/common.sh"
|
||||
|
||||
info "Creating cluster multimaster..."
|
||||
$EXE create cluster "multimaster" --masters 3 --api-port 6443 --wait --timeout 360s || failed "could not create cluster multimaster"
|
||||
$EXE cluster create "multimaster" --masters 3 --api-port 6443 --wait --timeout 360s || failed "could not create cluster multimaster"
|
||||
|
||||
info "Checking that we have access to the cluster..."
|
||||
check_clusters "multimaster" || failed "error checking cluster"
|
||||
@ -19,7 +19,7 @@ info "Checking that we have 3 master nodes online..."
|
||||
check_multi_node "multimaster" 3 || failed "failed to verify number of nodes"
|
||||
|
||||
info "Deleting cluster multimaster..."
|
||||
$EXE delete cluster "multimaster" || failed "could not delete the cluster multimaster"
|
||||
$EXE cluster delete "multimaster" || failed "could not delete the cluster multimaster"
|
||||
|
||||
exit 0
|
||||
|
||||
|
@ -31,6 +31,9 @@ import (
|
||||
// Version is the string that contains version
|
||||
var Version string
|
||||
|
||||
// HelperVersionOverride decouples the k3d helper image versions from the main version, if needed
|
||||
var HelperVersionOverride string
|
||||
|
||||
// K3sVersion should contain the latest version tag of k3s (hardcoded at build time)
|
||||
// we're setting a default version for edge cases, because the 'latest' tag is not actively maintained
|
||||
var K3sVersion = "v1.18.4+k3s1" // TODO: can we try to dynamically fetch the latest version at runtime and only fallback to this if it fails?
|
||||
@ -49,6 +52,9 @@ func GetHelperImageVersion() string {
|
||||
log.Infoln("Helper image tag set from env var")
|
||||
return tag
|
||||
}
|
||||
if len(HelperVersionOverride) > 0 {
|
||||
return HelperVersionOverride
|
||||
}
|
||||
if len(Version) == 0 {
|
||||
return "latest"
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user