Merge branch 'master' into enhancement/noun-verb-syntax
This commit is contained in:
commit
84d6063e4b
@ -52,7 +52,7 @@ Every cluster will consist of one or more containers:
|
|||||||
func NewCmdClusterCreate() *cobra.Command {
|
func NewCmdClusterCreate() *cobra.Command {
|
||||||
|
|
||||||
createClusterOpts := &k3d.ClusterCreateOpts{}
|
createClusterOpts := &k3d.ClusterCreateOpts{}
|
||||||
var updateKubeconfig, updateCurrentContext bool
|
var updateDefaultKubeconfig, updateCurrentContext bool
|
||||||
|
|
||||||
// create new command
|
// create new command
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
@ -70,8 +70,8 @@ func NewCmdClusterCreate() *cobra.Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// create cluster
|
// create cluster
|
||||||
if updateKubeconfig || updateCurrentContext {
|
if updateDefaultKubeconfig || updateCurrentContext {
|
||||||
log.Debugln("'--update-kubeconfig set: enabling wait-for-master")
|
log.Debugln("'--update-default-kubeconfig set: enabling wait-for-master")
|
||||||
cluster.CreateClusterOpts.WaitForMaster = true
|
cluster.CreateClusterOpts.WaitForMaster = true
|
||||||
}
|
}
|
||||||
if err := k3dCluster.ClusterCreate(cmd.Context(), runtimes.SelectedRuntime, cluster); err != nil {
|
if err := k3dCluster.ClusterCreate(cmd.Context(), runtimes.SelectedRuntime, cluster); err != nil {
|
||||||
@ -86,16 +86,16 @@ func NewCmdClusterCreate() *cobra.Command {
|
|||||||
}
|
}
|
||||||
log.Infof("Cluster '%s' created successfully!", cluster.Name)
|
log.Infof("Cluster '%s' created successfully!", cluster.Name)
|
||||||
|
|
||||||
if updateKubeconfig || updateCurrentContext {
|
if updateDefaultKubeconfig || updateCurrentContext {
|
||||||
log.Debugf("Updating default kubeconfig with a new context for cluster %s", cluster.Name)
|
log.Debugf("Updating default kubeconfig with a new context for cluster %s", cluster.Name)
|
||||||
if _, err := k3dCluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, cluster, "", &k3dCluster.WriteKubeConfigOptions{UpdateExisting: true, OverwriteExisting: false, UpdateCurrentContext: updateCurrentContext}); err != nil {
|
if _, err := k3dCluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, cluster, "", &k3dCluster.WriteKubeConfigOptions{UpdateExisting: true, OverwriteExisting: false, UpdateCurrentContext: updateCurrentContext}); err != nil {
|
||||||
log.Fatalln(err)
|
log.Warningln(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// print information on how to use the cluster with kubectl
|
// print information on how to use the cluster with kubectl
|
||||||
log.Infoln("You can now use it like this:")
|
log.Infoln("You can now use it like this:")
|
||||||
if updateKubeconfig && !updateCurrentContext {
|
if updateDefaultKubeconfig && !updateCurrentContext {
|
||||||
fmt.Printf("kubectl config use-context %s\n", fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, cluster.Name))
|
fmt.Printf("kubectl config use-context %s\n", fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, cluster.Name))
|
||||||
} else if !updateCurrentContext {
|
} else if !updateCurrentContext {
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
@ -121,8 +121,8 @@ func NewCmdClusterCreate() *cobra.Command {
|
|||||||
cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d create -w 2 -p 8080:80@worker[0] -p 8081@worker[1]`")
|
cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d create -w 2 -p 8080:80@worker[0] -p 8081@worker[1]`")
|
||||||
cmd.Flags().BoolVar(&createClusterOpts.WaitForMaster, "wait", true, "Wait for the master(s) to be ready before returning. Use '--timeout DURATION' to not wait forever.")
|
cmd.Flags().BoolVar(&createClusterOpts.WaitForMaster, "wait", true, "Wait for the master(s) to be ready before returning. Use '--timeout DURATION' to not wait forever.")
|
||||||
cmd.Flags().DurationVar(&createClusterOpts.Timeout, "timeout", 0*time.Second, "Rollback changes if cluster couldn't be created in specified duration.")
|
cmd.Flags().DurationVar(&createClusterOpts.Timeout, "timeout", 0*time.Second, "Rollback changes if cluster couldn't be created in specified duration.")
|
||||||
cmd.Flags().BoolVar(&updateKubeconfig, "update-kubeconfig", false, "Directly update the default kubeconfig with the new cluster's context")
|
cmd.Flags().BoolVar(&updateDefaultKubeconfig, "update-default-kubeconfig", true, "Directly update the default kubeconfig with the new cluster's context")
|
||||||
cmd.Flags().BoolVar(&updateCurrentContext, "switch", false, "Directly switch the default kubeconfig's current-context to the new cluster's context (implies --update-kubeconfig)")
|
cmd.Flags().BoolVar(&updateCurrentContext, "switch-context", true, "Directly switch the default kubeconfig's current-context to the new cluster's context (implies --update-default-kubeconfig)")
|
||||||
cmd.Flags().BoolVar(&createClusterOpts.DisableLoadBalancer, "no-lb", false, "Disable the creation of a LoadBalancer in front of the master nodes")
|
cmd.Flags().BoolVar(&createClusterOpts.DisableLoadBalancer, "no-lb", false, "Disable the creation of a LoadBalancer in front of the master nodes")
|
||||||
|
|
||||||
/* Image Importing */
|
/* Image Importing */
|
||||||
|
@ -22,10 +22,15 @@ THE SOFTWARE.
|
|||||||
package cluster
|
package cluster
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
|
||||||
"github.com/rancher/k3d/v3/cmd/util"
|
"github.com/rancher/k3d/v3/cmd/util"
|
||||||
"github.com/rancher/k3d/v3/pkg/cluster"
|
"github.com/rancher/k3d/v3/pkg/cluster"
|
||||||
"github.com/rancher/k3d/v3/pkg/runtimes"
|
"github.com/rancher/k3d/v3/pkg/runtimes"
|
||||||
k3d "github.com/rancher/k3d/v3/pkg/types"
|
k3d "github.com/rancher/k3d/v3/pkg/types"
|
||||||
|
k3dutil "github.com/rancher/k3d/v3/pkg/util"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@ -36,11 +41,11 @@ func NewCmdClusterDelete() *cobra.Command {
|
|||||||
|
|
||||||
// create new cobra command
|
// create new cobra command
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "delete (NAME | --all)",
|
Use: "delete [NAME [NAME ...] | --all]",
|
||||||
Aliases: []string{"del", "rm"},
|
Aliases: []string{"del", "rm"},
|
||||||
Short: "Delete cluster(s).",
|
Short: "Delete cluster(s).",
|
||||||
Long: `Delete cluster(s).`,
|
Long: `Delete cluster(s).`,
|
||||||
Args: cobra.MinimumNArgs(0), // 0 or n arguments; 0 only if --all is set
|
Args: cobra.MinimumNArgs(0), // 0 or n arguments; 0 = default cluster name
|
||||||
ValidArgsFunction: util.ValidArgsAvailableClusters,
|
ValidArgsFunction: util.ValidArgsAvailableClusters,
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
clusters := parseDeleteClusterCmd(cmd, args)
|
clusters := parseDeleteClusterCmd(cmd, args)
|
||||||
@ -52,11 +57,23 @@ func NewCmdClusterDelete() *cobra.Command {
|
|||||||
if err := cluster.ClusterDelete(cmd.Context(), runtimes.SelectedRuntime, c); err != nil {
|
if err := cluster.ClusterDelete(cmd.Context(), runtimes.SelectedRuntime, c); err != nil {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
log.Infoln("Removing cluster details from default kubeconfig")
|
log.Infoln("Removing cluster details from default kubeconfig...")
|
||||||
if err := cluster.KubeconfigRemoveClusterFromDefaultConfig(cmd.Context(), c); err != nil {
|
if err := cluster.KubeconfigRemoveClusterFromDefaultConfig(cmd.Context(), c); err != nil {
|
||||||
log.Warnln("Failed to remove cluster details from default kubeconfig")
|
log.Warnln("Failed to remove cluster details from default kubeconfig")
|
||||||
log.Warnln(err)
|
log.Warnln(err)
|
||||||
}
|
}
|
||||||
|
log.Infoln("Removing standalone kubeconfig file (if there is one)...")
|
||||||
|
configDir, err := k3dutil.GetConfigDirOrCreate()
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("Failed to delete kubeconfig file: %+v", err)
|
||||||
|
} else {
|
||||||
|
kubeconfigfile := path.Join(configDir, fmt.Sprintf("kubeconfig-%s.yaml", c.Name))
|
||||||
|
if err := os.Remove(kubeconfigfile); err != nil {
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
log.Warnf("Failed to delete kubeconfig file '%s'", kubeconfigfile)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
log.Infof("Successfully deleted cluster %s!", c.Name)
|
log.Infof("Successfully deleted cluster %s!", c.Name)
|
||||||
}
|
}
|
||||||
@ -90,11 +107,12 @@ func parseDeleteClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
|||||||
return clusters
|
return clusters
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(args) < 1 {
|
clusternames := []string{k3d.DefaultClusterName}
|
||||||
log.Fatalln("Expecting at least one cluster name if `--all` is not set")
|
if len(args) != 0 {
|
||||||
|
clusternames = args
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, name := range args {
|
for _, name := range clusternames {
|
||||||
cluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
cluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
|
@ -42,9 +42,9 @@ func NewCmdClusterStart() *cobra.Command {
|
|||||||
|
|
||||||
// create new command
|
// create new command
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "start (NAME [NAME...] | --all)",
|
Use: "start [NAME [NAME...] | --all]",
|
||||||
Short: "Start existing k3d cluster(s)",
|
|
||||||
Long: `Start existing k3d cluster(s)`,
|
Long: `Start existing k3d cluster(s)`,
|
||||||
|
Short: "Start existing k3d cluster(s)",
|
||||||
ValidArgsFunction: util.ValidArgsAvailableClusters,
|
ValidArgsFunction: util.ValidArgsAvailableClusters,
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
clusters := parseStartClusterCmd(cmd, args)
|
clusters := parseStartClusterCmd(cmd, args)
|
||||||
@ -86,11 +86,12 @@ func parseStartClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
|||||||
return clusters
|
return clusters
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(args) < 1 {
|
clusternames := []string{k3d.DefaultClusterName}
|
||||||
log.Fatalln("Expecting at least one cluster name if `--all` is not set")
|
if len(args) != 0 {
|
||||||
|
clusternames = args
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, name := range args {
|
for _, name := range clusternames {
|
||||||
cluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
cluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
|
@ -37,9 +37,9 @@ func NewCmdClusterStop() *cobra.Command {
|
|||||||
|
|
||||||
// create new command
|
// create new command
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "stop (NAME [NAME...] | --all)",
|
Use: "stop [NAME [NAME...] | --all]",
|
||||||
Short: "Stop an existing k3d cluster",
|
Short: "Stop existing k3d cluster(s)",
|
||||||
Long: `Stop an existing k3d cluster.`,
|
Long: `Stop existing k3d cluster(s).`,
|
||||||
ValidArgsFunction: util.ValidArgsAvailableClusters,
|
ValidArgsFunction: util.ValidArgsAvailableClusters,
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
clusters := parseStopClusterCmd(cmd, args)
|
clusters := parseStopClusterCmd(cmd, args)
|
||||||
@ -79,11 +79,12 @@ func parseStopClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
|||||||
return clusters
|
return clusters
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(args) < 1 {
|
clusternames := []string{k3d.DefaultClusterName}
|
||||||
log.Fatalln("Expecting at least one cluster name if `--all` is not set")
|
if len(args) != 0 {
|
||||||
|
clusternames = args
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, name := range args {
|
for _, name := range clusternames {
|
||||||
cluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
cluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
|
@ -24,11 +24,14 @@ package kubeconfig
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/rancher/k3d/v3/cmd/util"
|
"github.com/rancher/k3d/v3/cmd/util"
|
||||||
"github.com/rancher/k3d/v3/pkg/cluster"
|
"github.com/rancher/k3d/v3/pkg/cluster"
|
||||||
"github.com/rancher/k3d/v3/pkg/runtimes"
|
"github.com/rancher/k3d/v3/pkg/runtimes"
|
||||||
k3d "github.com/rancher/k3d/v3/pkg/types"
|
k3d "github.com/rancher/k3d/v3/pkg/types"
|
||||||
|
k3dutil "github.com/rancher/k3d/v3/pkg/util"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
|
|
||||||
@ -38,6 +41,7 @@ import (
|
|||||||
type mergeKubeconfigFlags struct {
|
type mergeKubeconfigFlags struct {
|
||||||
all bool
|
all bool
|
||||||
output string
|
output string
|
||||||
|
targetDefault bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCmdKubeconfigMerge returns a new cobra command
|
// NewCmdKubeconfigMerge returns a new cobra command
|
||||||
@ -54,16 +58,15 @@ func NewCmdKubeconfigMerge() *cobra.Command {
|
|||||||
Long: `Merge/Write kubeconfig(s) from cluster(s) into existing kubeconfig/file.`,
|
Long: `Merge/Write kubeconfig(s) from cluster(s) into existing kubeconfig/file.`,
|
||||||
Short: "Merge/Write kubeconfig(s) from cluster(s) into existing kubeconfig/file.",
|
Short: "Merge/Write kubeconfig(s) from cluster(s) into existing kubeconfig/file.",
|
||||||
ValidArgsFunction: util.ValidArgsAvailableClusters,
|
ValidArgsFunction: util.ValidArgsAvailableClusters,
|
||||||
Args: func(cmd *cobra.Command, args []string) error {
|
Args: cobra.MinimumNArgs(0),
|
||||||
if (len(args) < 1 && !mergeKubeconfigFlags.all) || (len(args) > 0 && mergeKubeconfigFlags.all) {
|
|
||||||
return fmt.Errorf("Need to specify one or more cluster names *or* set `--all` flag")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
var clusters []*k3d.Cluster
|
var clusters []*k3d.Cluster
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
|
if mergeKubeconfigFlags.targetDefault && mergeKubeconfigFlags.output != "" {
|
||||||
|
log.Fatalln("Cannot use both '--output' and '--merge-default-kubeconfig' at the same time")
|
||||||
|
}
|
||||||
|
|
||||||
// generate list of clusters
|
// generate list of clusters
|
||||||
if mergeKubeconfigFlags.all {
|
if mergeKubeconfigFlags.all {
|
||||||
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||||
@ -71,7 +74,13 @@ func NewCmdKubeconfigMerge() *cobra.Command {
|
|||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for _, clusterName := range args {
|
|
||||||
|
clusternames := []string{k3d.DefaultClusterName}
|
||||||
|
if len(args) != 0 {
|
||||||
|
clusternames = args
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, clusterName := range clusternames {
|
||||||
retrievedCluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
retrievedCluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
@ -82,17 +91,30 @@ func NewCmdKubeconfigMerge() *cobra.Command {
|
|||||||
|
|
||||||
// get kubeconfigs from all clusters
|
// get kubeconfigs from all clusters
|
||||||
errorGettingKubeconfig := false
|
errorGettingKubeconfig := false
|
||||||
|
var outputs []string
|
||||||
|
outputDir, err := k3dutil.GetConfigDirOrCreate()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorln(err)
|
||||||
|
log.Fatalln("Failed to save kubeconfig to local directory")
|
||||||
|
}
|
||||||
for _, c := range clusters {
|
for _, c := range clusters {
|
||||||
log.Debugf("Getting kubeconfig for cluster '%s'", c.Name)
|
log.Debugf("Getting kubeconfig for cluster '%s'", c.Name)
|
||||||
if mergeKubeconfigFlags.output, err = cluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, c, mergeKubeconfigFlags.output, &writeKubeConfigOptions); err != nil {
|
output := mergeKubeconfigFlags.output
|
||||||
|
if output == "" && !mergeKubeconfigFlags.targetDefault {
|
||||||
|
output = path.Join(outputDir, fmt.Sprintf("kubeconfig-%s.yaml", c.Name))
|
||||||
|
}
|
||||||
|
output, err = cluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, c, output, &writeKubeConfigOptions)
|
||||||
|
if err != nil {
|
||||||
log.Errorln(err)
|
log.Errorln(err)
|
||||||
errorGettingKubeconfig = true
|
errorGettingKubeconfig = true
|
||||||
|
} else {
|
||||||
|
outputs = append(outputs, output)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// only print kubeconfig file path if output is not stdout ("-")
|
// only print kubeconfig file path if output is not stdout ("-")
|
||||||
if mergeKubeconfigFlags.output != "-" {
|
if mergeKubeconfigFlags.output != "-" {
|
||||||
fmt.Println(mergeKubeconfigFlags.output)
|
fmt.Println(strings.Join(outputs, ":"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// return with non-zero exit code, if there was an error for one of the clusters
|
// return with non-zero exit code, if there was an error for one of the clusters
|
||||||
@ -107,8 +129,9 @@ func NewCmdKubeconfigMerge() *cobra.Command {
|
|||||||
if err := cmd.MarkFlagFilename("output"); err != nil {
|
if err := cmd.MarkFlagFilename("output"); err != nil {
|
||||||
log.Fatalln("Failed to mark flag --output as filename")
|
log.Fatalln("Failed to mark flag --output as filename")
|
||||||
}
|
}
|
||||||
cmd.Flags().BoolVarP(&writeKubeConfigOptions.UpdateExisting, "update", "u", true, "Update conflicting fields in existing KubeConfig")
|
cmd.Flags().BoolVarP(&mergeKubeconfigFlags.targetDefault, "merge-default-kubeconfig", "d", false, fmt.Sprintf("Merge into the default kubeconfig ($KUBECONFIG or %s)", clientcmd.RecommendedHomeFile))
|
||||||
cmd.Flags().BoolVarP(&writeKubeConfigOptions.UpdateCurrentContext, "switch", "s", false, "Switch to new context")
|
cmd.Flags().BoolVarP(&writeKubeConfigOptions.UpdateExisting, "update", "u", true, "Update conflicting fields in existing kubeconfig")
|
||||||
|
cmd.Flags().BoolVarP(&writeKubeConfigOptions.UpdateCurrentContext, "switch-context", "s", true, "Switch to new context")
|
||||||
cmd.Flags().BoolVar(&writeKubeConfigOptions.OverwriteExisting, "overwrite", false, "[Careful!] Overwrite existing file, ignoring its contents")
|
cmd.Flags().BoolVar(&writeKubeConfigOptions.OverwriteExisting, "overwrite", false, "[Careful!] Overwrite existing file, ignoring its contents")
|
||||||
cmd.Flags().BoolVarP(&mergeKubeconfigFlags.all, "all", "a", false, "Get kubeconfigs from all existing clusters")
|
cmd.Flags().BoolVarP(&mergeKubeconfigFlags.all, "all", "a", false, "Get kubeconfigs from all existing clusters")
|
||||||
|
|
||||||
|
@ -62,9 +62,6 @@ func NewCmdNodeCreate() *cobra.Command {
|
|||||||
log.Fatalln("Failed to register flag completion for '--role'", err)
|
log.Fatalln("Failed to register flag completion for '--role'", err)
|
||||||
}
|
}
|
||||||
cmd.Flags().StringP("cluster", "c", k3d.DefaultClusterName, "Select the cluster that the node shall connect to.")
|
cmd.Flags().StringP("cluster", "c", k3d.DefaultClusterName, "Select the cluster that the node shall connect to.")
|
||||||
if err := cmd.MarkFlagRequired("cluster"); err != nil {
|
|
||||||
log.Fatalln("Failed to mark required flag '--cluster'")
|
|
||||||
}
|
|
||||||
if err := cmd.RegisterFlagCompletionFunc("cluster", util.ValidArgsAvailableClusters); err != nil {
|
if err := cmd.RegisterFlagCompletionFunc("cluster", util.ValidArgsAvailableClusters); err != nil {
|
||||||
log.Fatalln("Failed to register flag completion for '--cluster'", err)
|
log.Fatalln("Failed to register flag completion for '--cluster'", err)
|
||||||
}
|
}
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
# Defaults
|
# Defaults
|
||||||
|
|
||||||
* multiple master nodes
|
- multiple master nodes
|
||||||
* by default, when `--master` > 1 and no `--datastore-x` option is set, the first master node (master-0) will be the initializing master node
|
- by default, when `--master` > 1 and no `--datastore-x` option is set, the first master node (master-0) will be the initializing master node
|
||||||
* the initializing master node will have the `--cluster-init` flag appended
|
- the initializing master node will have the `--cluster-init` flag appended
|
||||||
* all other master nodes will refer to the initializing master node via `--server https://<init-node>:6443`
|
- all other master nodes will refer to the initializing master node via `--server https://<init-node>:6443`
|
||||||
* API-Ports
|
- API-Ports
|
||||||
* by default, we don't expose any API-Port (no host port mapping)
|
- by default, we don't expose any API-Port (no host port mapping)
|
||||||
* kubeconfig
|
- kubeconfig
|
||||||
* if no output is set explicitly (via the `--output` flag), we use the default loading rules to get the default kubeconfig:
|
- if `--[update|merge]-default-kubeconfig` is set, we use the default loading rules to get the default kubeconfig:
|
||||||
* First: kubeconfig specified via the KUBECONFIG environment variable (error out if multiple are specified)
|
- First: kubeconfig specified via the KUBECONFIG environment variable (error out if multiple are specified)
|
||||||
* Second: default kubeconfig in home directory (e.g. `$HOME/.kube/config`)
|
- Second: default kubeconfig in home directory (e.g. `$HOME/.kube/config`)
|
||||||
|
4
docs/static/css/extra.css
vendored
4
docs/static/css/extra.css
vendored
@ -20,6 +20,10 @@
|
|||||||
display: inline;
|
display: inline;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.md-header-nav__topic {
|
||||||
|
position: relative;
|
||||||
|
}
|
||||||
|
|
||||||
[data-md-color-primary=black] .md-tabs {
|
[data-md-color-primary=black] .md-tabs {
|
||||||
background-color: #0DCEFF;
|
background-color: #0DCEFF;
|
||||||
}
|
}
|
||||||
|
@ -20,8 +20,8 @@ k3d
|
|||||||
-p, --port # add some more port mappings
|
-p, --port # add some more port mappings
|
||||||
--token # specify a cluster token (default: auto-generated)
|
--token # specify a cluster token (default: auto-generated)
|
||||||
--timeout # specify a timeout, after which the cluster creation will be interrupted and changes rolled back
|
--timeout # specify a timeout, after which the cluster creation will be interrupted and changes rolled back
|
||||||
--update-kubeconfig # enable the automated update of the default kubeconfig with the details of the newly created cluster (also sets '--wait=true')
|
--update-default-kubeconfig # enable the automated update of the default kubeconfig with the details of the newly created cluster (also sets '--wait=true')
|
||||||
--switch # (implies --update-kubeconfig) automatically sets the current-context of your default kubeconfig to the new cluster's context
|
--switch-context # (implies --update-default-kubeconfig) automatically sets the current-context of your default kubeconfig to the new cluster's context
|
||||||
-v, --volume # specify additional bind-mounts
|
-v, --volume # specify additional bind-mounts
|
||||||
--wait # enable waiting for all master nodes to be ready before returning
|
--wait # enable waiting for all master nodes to be ready before returning
|
||||||
-w, --workers # specify how many worker nodes you want to create
|
-w, --workers # specify how many worker nodes you want to create
|
||||||
@ -57,8 +57,9 @@ k3d
|
|||||||
-a, --all # get kubeconfigs from all clusters
|
-a, --all # get kubeconfigs from all clusters
|
||||||
--output # specify the output file where the kubeconfig should be written to
|
--output # specify the output file where the kubeconfig should be written to
|
||||||
--overwrite # [Careful!] forcefully overwrite the output file, ignoring existing contents
|
--overwrite # [Careful!] forcefully overwrite the output file, ignoring existing contents
|
||||||
-s, --switch # switch current-context in kubeconfig to the new context
|
-s, --switch-context # switch current-context in kubeconfig to the new context
|
||||||
-u, --update # update conflicting fields in existing kubeconfig (default: true)
|
-u, --update # update conflicting fields in existing kubeconfig (default: true)
|
||||||
|
-d, --merge-default-kubeconfig # update the default kubeconfig (usually $KUBECONFIG or $HOME/.kube/config)
|
||||||
image
|
image
|
||||||
import [IMAGE | ARCHIVE [IMAGE | ARCHIVE ...]] # Load one or more images from the local runtime environment or tar-archives into k3d clusters
|
import [IMAGE | ARCHIVE [IMAGE | ARCHIVE ...]] # Load one or more images from the local runtime environment or tar-archives into k3d clusters
|
||||||
-c, --cluster # clusters to load the image into
|
-c, --cluster # clusters to load the image into
|
||||||
|
@ -11,6 +11,7 @@ To get a kubeconfig set up for you to connect to a k3d cluster, you can go diffe
|
|||||||
|
|
||||||
## Getting the kubeconfig for a newly created cluster
|
## Getting the kubeconfig for a newly created cluster
|
||||||
|
|
||||||
|
<<<<<<< HEAD
|
||||||
1. Update your default kubeconfig **upon** cluster creation
|
1. Update your default kubeconfig **upon** cluster creation
|
||||||
- `#!bash k3d cluster create mycluster --update-kubeconfig`
|
- `#!bash k3d cluster create mycluster --update-kubeconfig`
|
||||||
- *Note:* this won't switch the current-context
|
- *Note:* this won't switch the current-context
|
||||||
@ -19,12 +20,27 @@ To get a kubeconfig set up for you to connect to a k3d cluster, you can go diffe
|
|||||||
- *Note:* this won't switch the current-context
|
- *Note:* this won't switch the current-context
|
||||||
3. Update a different kubeconfig **after** cluster creation
|
3. Update a different kubeconfig **after** cluster creation
|
||||||
- `#!bash k3d kubeconfig merge mycluster --output some/other/file.yaml`
|
- `#!bash k3d kubeconfig merge mycluster --output some/other/file.yaml`
|
||||||
|
=======
|
||||||
|
1. Create a new kubeconfig file **after** cluster creation
|
||||||
|
- `#!bash k3d kubeconfig get mycluster`
|
||||||
|
- *Note:* this will create (or update) the file `$HOME/.k3d/kubeconfig-mycluster.yaml`
|
||||||
|
- *Tip:* Use it: `#!bash export KUBECONFIG=$(k3d kubeconfig get mycluster)`
|
||||||
|
2. Update your default kubeconfig **upon** cluster creation
|
||||||
|
- `#!bash k3d cluster create mycluster --update-kubeconfig`
|
||||||
|
- *Note:* this won't switch the current-context (append `--switch-context` to do so)
|
||||||
|
3. Update your default kubeconfig **after** cluster creation
|
||||||
|
- `#!bash k3d kubeconfig merge mycluster --merge-default-kubeconfig`
|
||||||
|
- *Note:* this won't switch the current-context (append `--switch-context` to do so)
|
||||||
|
4. Update a different kubeconfig **after** cluster creation
|
||||||
|
- `#!bash k3d get kubeconfig mycluster --output some/other/file.yaml`
|
||||||
|
>>>>>>> master
|
||||||
- *Note:* this won't switch the current-context
|
- *Note:* this won't switch the current-context
|
||||||
- The file will be created if it doesn't exist
|
- The file will be created if it doesn't exist
|
||||||
|
|
||||||
!!! info "Switching the current context"
|
!!! info "Switching the current context"
|
||||||
None of the above options switch the current-context.
|
None of the above options switch the current-context by default.
|
||||||
This is intended to be least intrusive, since the current-context has a global effect.
|
This is intended to be least intrusive, since the current-context has a global effect.
|
||||||
|
<<<<<<< HEAD
|
||||||
You can switch the current-context directly with the `kubeconfig merge` command by adding the `--switch` flag.
|
You can switch the current-context directly with the `kubeconfig merge` command by adding the `--switch` flag.
|
||||||
|
|
||||||
## Removing cluster details from the kubeconfig
|
## Removing cluster details from the kubeconfig
|
||||||
@ -36,3 +52,18 @@ To get a kubeconfig set up for you to connect to a k3d cluster, you can go diffe
|
|||||||
`k3s kubeconfig merge` let's you specify one or more clusters via arguments _or_ all via `--all`.
|
`k3s kubeconfig merge` let's you specify one or more clusters via arguments _or_ all via `--all`.
|
||||||
All kubeconfigs will then be merged into a single file, which is either the default kubeconfig or the kubeconfig specified via `--output FILE`.
|
All kubeconfigs will then be merged into a single file, which is either the default kubeconfig or the kubeconfig specified via `--output FILE`.
|
||||||
Note, that with multiple cluster specified, the `--switch` flag will change the current context to the cluster which was last in the list.
|
Note, that with multiple cluster specified, the `--switch` flag will change the current context to the cluster which was last in the list.
|
||||||
|
=======
|
||||||
|
You can switch the current-context directly with the `get kubeconfig` command by adding the `--switch-context` flag.
|
||||||
|
|
||||||
|
## Removing cluster details from the kubeconfig
|
||||||
|
|
||||||
|
`#!bash k3d delete cluster mycluster` will always remove the details for `mycluster` from the default kubeconfig.
|
||||||
|
It will also delete the respective kubeconfig file in `$HOME/.k3d/` if it exists.
|
||||||
|
|
||||||
|
## Handling multiple clusters
|
||||||
|
|
||||||
|
`k3d get kubeconfig` let's you specify one or more clusters via arguments _or_ all via `--all`.
|
||||||
|
All kubeconfigs will then be merged into a single file if `--merge-default-kubeconfig` or `--output` is specified.
|
||||||
|
If none of those two flags was specified, a new file will be created per cluster and the merged path (e.g. `$HOME/.k3d/kubeconfig-cluster1.yaml:$HOME/.k3d/cluster2.yaml`) will be returned.
|
||||||
|
Note, that with multiple cluster specified, the `--switch-context` flag will change the current context to the cluster which was last in the list.
|
||||||
|
>>>>>>> master
|
||||||
|
@ -87,7 +87,11 @@ check_cluster_count() {
|
|||||||
check_multi_node() {
|
check_multi_node() {
|
||||||
cluster=$1
|
cluster=$1
|
||||||
expectedNodeCount=$2
|
expectedNodeCount=$2
|
||||||
|
<<<<<<< HEAD
|
||||||
$EXE kubeconfig merge "$cluster" --switch
|
$EXE kubeconfig merge "$cluster" --switch
|
||||||
|
=======
|
||||||
|
$EXE get kubeconfig "$cluster" --merge-default-kubeconfig --switch-context
|
||||||
|
>>>>>>> master
|
||||||
nodeCount=$(kubectl get nodes -o=custom-columns=NAME:.metadata.name --no-headers | wc -l)
|
nodeCount=$(kubectl get nodes -o=custom-columns=NAME:.metadata.name --no-headers | wc -l)
|
||||||
if [[ $nodeCount == $expectedNodeCount ]]; then
|
if [[ $nodeCount == $expectedNodeCount ]]; then
|
||||||
passed "cluster $cluster has $expectedNodeCount nodes, as expected"
|
passed "cluster $cluster has $expectedNodeCount nodes, as expected"
|
||||||
|
Loading…
Reference in New Issue
Block a user