adjust toplevel pkg functions to new syntax
This commit is contained in:
parent
96bdd9226c
commit
b02c783130
@ -51,7 +51,7 @@ Every cluster will consist of one or more containers:
|
||||
// NewCmdClusterCreate returns a new cobra command
|
||||
func NewCmdClusterCreate() *cobra.Command {
|
||||
|
||||
createClusterOpts := &k3d.CreateClusterOpts{}
|
||||
createClusterOpts := &k3d.ClusterCreateOpts{}
|
||||
var updateKubeconfig, updateCurrentContext bool
|
||||
|
||||
// create new command
|
||||
@ -65,7 +65,7 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
cluster := parseCreateClusterCmd(cmd, args, createClusterOpts)
|
||||
|
||||
// check if a cluster with that name exists already
|
||||
if _, err := k3dCluster.GetCluster(cmd.Context(), runtimes.SelectedRuntime, cluster); err == nil {
|
||||
if _, err := k3dCluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, cluster); err == nil {
|
||||
log.Fatalf("Failed to create cluster '%s' because a cluster with that name already exists", cluster.Name)
|
||||
}
|
||||
|
||||
@ -74,11 +74,11 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
log.Debugln("'--update-kubeconfig set: enabling wait-for-master")
|
||||
cluster.CreateClusterOpts.WaitForMaster = true
|
||||
}
|
||||
if err := k3dCluster.CreateCluster(cmd.Context(), runtimes.SelectedRuntime, cluster); err != nil {
|
||||
if err := k3dCluster.ClusterCreate(cmd.Context(), runtimes.SelectedRuntime, cluster); err != nil {
|
||||
// rollback if creation failed
|
||||
log.Errorln(err)
|
||||
log.Errorln("Failed to create cluster >>> Rolling Back")
|
||||
if err := k3dCluster.DeleteCluster(cmd.Context(), runtimes.SelectedRuntime, cluster); err != nil {
|
||||
if err := k3dCluster.ClusterDelete(cmd.Context(), runtimes.SelectedRuntime, cluster); err != nil {
|
||||
log.Errorln(err)
|
||||
log.Fatalln("Cluster creation FAILED, also FAILED to rollback changes!")
|
||||
}
|
||||
@ -88,7 +88,7 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
|
||||
if updateKubeconfig || updateCurrentContext {
|
||||
log.Debugf("Updating default kubeconfig with a new context for cluster %s", cluster.Name)
|
||||
if _, err := k3dCluster.GetAndWriteKubeConfig(cmd.Context(), runtimes.SelectedRuntime, cluster, "", &k3dCluster.WriteKubeConfigOptions{UpdateExisting: true, OverwriteExisting: false, UpdateCurrentContext: updateCurrentContext}); err != nil {
|
||||
if _, err := k3dCluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, cluster, "", &k3dCluster.WriteKubeConfigOptions{UpdateExisting: true, OverwriteExisting: false, UpdateCurrentContext: updateCurrentContext}); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
@ -153,7 +153,7 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
}
|
||||
|
||||
// parseCreateClusterCmd parses the command input into variables required to create a cluster
|
||||
func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts *k3d.CreateClusterOpts) *k3d.Cluster {
|
||||
func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts *k3d.ClusterCreateOpts) *k3d.Cluster {
|
||||
|
||||
/********************************
|
||||
* Parse and validate arguments *
|
||||
|
@ -49,11 +49,11 @@ func NewCmdClusterDelete() *cobra.Command {
|
||||
log.Infoln("No clusters found")
|
||||
} else {
|
||||
for _, c := range clusters {
|
||||
if err := cluster.DeleteCluster(cmd.Context(), runtimes.SelectedRuntime, c); err != nil {
|
||||
if err := cluster.ClusterDelete(cmd.Context(), runtimes.SelectedRuntime, c); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
log.Infoln("Removing cluster details from default kubeconfig")
|
||||
if err := cluster.RemoveClusterFromDefaultKubeConfig(cmd.Context(), c); err != nil {
|
||||
if err := cluster.KubeconfigRemoveClusterFromDefaultConfig(cmd.Context(), c); err != nil {
|
||||
log.Warnln("Failed to remove cluster details from default kubeconfig")
|
||||
log.Warnln(err)
|
||||
}
|
||||
@ -83,7 +83,7 @@ func parseDeleteClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
if all, err := cmd.Flags().GetBool("all"); err != nil {
|
||||
log.Fatalln(err)
|
||||
} else if all {
|
||||
clusters, err = cluster.GetClusters(cmd.Context(), runtimes.SelectedRuntime)
|
||||
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
@ -95,7 +95,7 @@ func parseDeleteClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
}
|
||||
|
||||
for _, name := range args {
|
||||
cluster, err := cluster.GetCluster(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
cluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
@ -78,14 +78,14 @@ func buildClusterList(ctx context.Context, args []string) []*k3d.Cluster {
|
||||
|
||||
if len(args) == 0 {
|
||||
// cluster name not specified : get all clusters
|
||||
clusters, err = k3cluster.GetClusters(ctx, runtimes.SelectedRuntime)
|
||||
clusters, err = k3cluster.ClusterList(ctx, runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
for _, clusterName := range args {
|
||||
// cluster name specified : get specific cluster
|
||||
retrievedCluster, err := k3cluster.GetCluster(ctx, runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
retrievedCluster, err := k3cluster.ClusterGet(ctx, runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ import (
|
||||
// NewCmdClusterStart returns a new cobra command
|
||||
func NewCmdClusterStart() *cobra.Command {
|
||||
|
||||
startClusterOpts := types.StartClusterOpts{}
|
||||
startClusterOpts := types.ClusterStartOpts{}
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
@ -52,7 +52,7 @@ func NewCmdClusterStart() *cobra.Command {
|
||||
log.Infoln("No clusters found")
|
||||
} else {
|
||||
for _, c := range clusters {
|
||||
if err := cluster.StartCluster(cmd.Context(), runtimes.SelectedRuntime, c, startClusterOpts); err != nil {
|
||||
if err := cluster.ClusterStart(cmd.Context(), runtimes.SelectedRuntime, c, startClusterOpts); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
@ -79,7 +79,7 @@ func parseStartClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
if all, err := cmd.Flags().GetBool("all"); err != nil {
|
||||
log.Fatalln(err)
|
||||
} else if all {
|
||||
clusters, err = cluster.GetClusters(cmd.Context(), runtimes.SelectedRuntime)
|
||||
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
@ -91,7 +91,7 @@ func parseStartClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
}
|
||||
|
||||
for _, name := range args {
|
||||
cluster, err := cluster.GetCluster(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
cluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ func NewCmdClusterStop() *cobra.Command {
|
||||
log.Infoln("No clusters found")
|
||||
} else {
|
||||
for _, c := range clusters {
|
||||
if err := cluster.StopCluster(cmd.Context(), runtimes.SelectedRuntime, c); err != nil {
|
||||
if err := cluster.ClusterStop(cmd.Context(), runtimes.SelectedRuntime, c); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
@ -72,7 +72,7 @@ func parseStopClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
if all, err := cmd.Flags().GetBool("all"); err != nil {
|
||||
log.Fatalln(err)
|
||||
} else if all {
|
||||
clusters, err = cluster.GetClusters(cmd.Context(), runtimes.SelectedRuntime)
|
||||
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
@ -84,7 +84,7 @@ func parseStopClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
|
||||
}
|
||||
|
||||
for _, name := range args {
|
||||
cluster, err := cluster.GetCluster(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
cluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ import (
|
||||
// NewCmdImageImport returns a new cobra command
|
||||
func NewCmdImageImport() *cobra.Command {
|
||||
|
||||
loadImageOpts := k3d.LoadImageOpts{}
|
||||
loadImageOpts := k3d.ImageImportOpts{}
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
@ -49,7 +49,7 @@ func NewCmdImageImport() *cobra.Command {
|
||||
log.Debugf("Load images [%+v] from runtime [%s] into clusters [%+v]", images, runtimes.SelectedRuntime, clusters)
|
||||
for _, cluster := range clusters {
|
||||
log.Infof("Loading images into '%s'", cluster.Name)
|
||||
if err := tools.LoadImagesIntoCluster(cmd.Context(), runtimes.SelectedRuntime, images, &cluster, loadImageOpts); err != nil {
|
||||
if err := tools.ImageImportIntoClusterMulti(cmd.Context(), runtimes.SelectedRuntime, images, &cluster, loadImageOpts); err != nil {
|
||||
log.Errorf("Failed to load images into cluster '%s'", cluster.Name)
|
||||
log.Errorln(err)
|
||||
}
|
||||
|
@ -67,13 +67,13 @@ func NewCmdKubeconfigGet() *cobra.Command {
|
||||
|
||||
// generate list of clusters
|
||||
if getKubeconfigFlags.all {
|
||||
clusters, err = cluster.GetClusters(cmd.Context(), runtimes.SelectedRuntime)
|
||||
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
for _, clusterName := range args {
|
||||
retrievedCluster, err := cluster.GetCluster(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
retrievedCluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
@ -86,7 +86,7 @@ func NewCmdKubeconfigGet() *cobra.Command {
|
||||
for _, c := range clusters {
|
||||
log.Debugf("Getting kubeconfig for cluster '%s'", c.Name)
|
||||
fmt.Println("---") // YAML document separator
|
||||
if _, err := cluster.GetAndWriteKubeConfig(cmd.Context(), runtimes.SelectedRuntime, c, "-", &writeKubeConfigOptions); err != nil {
|
||||
if _, err := cluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, c, "-", &writeKubeConfigOptions); err != nil {
|
||||
log.Errorln(err)
|
||||
errorGettingKubeconfig = true
|
||||
}
|
||||
|
@ -66,13 +66,13 @@ func NewCmdKubeconfigMerge() *cobra.Command {
|
||||
|
||||
// generate list of clusters
|
||||
if mergeKubeconfigFlags.all {
|
||||
clusters, err = cluster.GetClusters(cmd.Context(), runtimes.SelectedRuntime)
|
||||
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
for _, clusterName := range args {
|
||||
retrievedCluster, err := cluster.GetCluster(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
retrievedCluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
@ -84,7 +84,7 @@ func NewCmdKubeconfigMerge() *cobra.Command {
|
||||
errorGettingKubeconfig := false
|
||||
for _, c := range clusters {
|
||||
log.Debugf("Getting kubeconfig for cluster '%s'", c.Name)
|
||||
if mergeKubeconfigFlags.output, err = cluster.GetAndWriteKubeConfig(cmd.Context(), runtimes.SelectedRuntime, c, mergeKubeconfigFlags.output, &writeKubeConfigOptions); err != nil {
|
||||
if mergeKubeconfigFlags.output, err = cluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, c, mergeKubeconfigFlags.output, &writeKubeConfigOptions); err != nil {
|
||||
log.Errorln(err)
|
||||
errorGettingKubeconfig = true
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ import (
|
||||
// NewCmdNodeCreate returns a new cobra command
|
||||
func NewCmdNodeCreate() *cobra.Command {
|
||||
|
||||
createNodeOpts := k3d.CreateNodeOpts{}
|
||||
createNodeOpts := k3d.NodeCreateOpts{}
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
@ -48,7 +48,7 @@ func NewCmdNodeCreate() *cobra.Command {
|
||||
Args: cobra.ExactArgs(1), // exactly one name accepted // TODO: if not specified, inherit from cluster that the node shall belong to, if that is specified
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
nodes, cluster := parseCreateNodeCmd(cmd, args)
|
||||
if err := k3dc.AddNodesToCluster(cmd.Context(), runtimes.SelectedRuntime, nodes, cluster, createNodeOpts); err != nil {
|
||||
if err := k3dc.NodeAddToClusterMulti(cmd.Context(), runtimes.SelectedRuntime, nodes, cluster, createNodeOpts); err != nil {
|
||||
log.Errorf("Failed to add nodes to cluster '%s'", cluster.Name)
|
||||
log.Errorln(err)
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ func NewCmdNodeDelete() *cobra.Command {
|
||||
log.Infoln("No nodes found")
|
||||
} else {
|
||||
for _, node := range nodes {
|
||||
if err := cluster.DeleteNode(cmd.Context(), runtimes.SelectedRuntime, node); err != nil {
|
||||
if err := cluster.NodeDelete(cmd.Context(), runtimes.SelectedRuntime, node); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
@ -74,7 +74,7 @@ func parseDeleteNodeCmd(cmd *cobra.Command, args []string) []*k3d.Node {
|
||||
if all, err := cmd.Flags().GetBool("all"); err != nil {
|
||||
log.Fatalln(err)
|
||||
} else if all {
|
||||
nodes, err = cluster.GetNodes(cmd.Context(), runtimes.SelectedRuntime)
|
||||
nodes, err = cluster.NodeList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
@ -86,7 +86,7 @@ func parseDeleteNodeCmd(cmd *cobra.Command, args []string) []*k3d.Node {
|
||||
}
|
||||
|
||||
for _, name := range args {
|
||||
node, err := cluster.GetNode(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: name})
|
||||
node, err := cluster.NodeGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: name})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
@ -52,14 +52,14 @@ func NewCmdNodeList() *cobra.Command {
|
||||
nodes, headersOff := parseGetNodeCmd(cmd, args)
|
||||
var existingNodes []*k3d.Node
|
||||
if len(nodes) == 0 { // Option a) no name specified -> get all nodes
|
||||
found, err := cluster.GetNodes(cmd.Context(), runtimes.SelectedRuntime)
|
||||
found, err := cluster.NodeList(cmd.Context(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
existingNodes = append(existingNodes, found...)
|
||||
} else { // Option b) cluster name specified -> get specific cluster
|
||||
for _, node := range nodes {
|
||||
found, err := cluster.GetNode(cmd.Context(), runtimes.SelectedRuntime, node)
|
||||
found, err := cluster.NodeGet(cmd.Context(), runtimes.SelectedRuntime, node)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ func ValidArgsAvailableClusters(cmd *cobra.Command, args []string, toComplete st
|
||||
|
||||
var completions []string
|
||||
var clusters []*k3d.Cluster
|
||||
clusters, err := k3dcluster.GetClusters(context.Background(), runtimes.SelectedRuntime)
|
||||
clusters, err := k3dcluster.ClusterList(context.Background(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to get list of clusters for shell completion")
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
@ -62,7 +62,7 @@ func ValidArgsAvailableNodes(cmd *cobra.Command, args []string, toComplete strin
|
||||
|
||||
var completions []string
|
||||
var nodes []*k3d.Node
|
||||
nodes, err := k3dcluster.GetNodes(context.Background(), runtimes.SelectedRuntime)
|
||||
nodes, err := k3dcluster.NodeList(context.Background(), runtimes.SelectedRuntime)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to get list of nodes for shell completion")
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
|
@ -40,10 +40,10 @@ import (
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// CreateCluster creates a new cluster consisting of
|
||||
// ClusterCreate creates a new cluster consisting of
|
||||
// - some containerized k3s nodes
|
||||
// - a docker network
|
||||
func CreateCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
|
||||
func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
|
||||
if cluster.CreateClusterOpts.Timeout > 0*time.Second {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, cluster.CreateClusterOpts.Timeout)
|
||||
@ -157,7 +157,7 @@ func CreateCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
|
||||
// create node
|
||||
log.Infof("Creating node '%s'", node.Name)
|
||||
if err := CreateNode(ctx, runtime, node, k3d.CreateNodeOpts{}); err != nil {
|
||||
if err := NodeCreate(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
|
||||
log.Errorln("Failed to create node")
|
||||
return err
|
||||
}
|
||||
@ -257,7 +257,7 @@ func CreateCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
|
||||
// ... by scanning for this line in logs and restarting the container in case it appears
|
||||
log.Debugf("Starting to wait for master node '%s'", masterNode.Name)
|
||||
return WaitForNodeLogMessage(ctx, runtime, masterNode, k3d.ReadyLogMessageByRole[k3d.MasterRole], time.Time{})
|
||||
return NodeWaitForLogMessage(ctx, runtime, masterNode, k3d.ReadyLogMessageByRole[k3d.MasterRole], time.Time{})
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -303,7 +303,7 @@ func CreateCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
}
|
||||
cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback
|
||||
log.Infof("Creating LoadBalancer '%s'", lbNode.Name)
|
||||
if err := CreateNode(ctx, runtime, lbNode, k3d.CreateNodeOpts{}); err != nil {
|
||||
if err := NodeCreate(ctx, runtime, lbNode, k3d.NodeCreateOpts{}); err != nil {
|
||||
log.Errorln("Failed to create loadbalancer")
|
||||
return err
|
||||
}
|
||||
@ -312,7 +312,7 @@ func CreateCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
|
||||
// ... by scanning for this line in logs and restarting the container in case it appears
|
||||
log.Debugf("Starting to wait for loadbalancer node '%s'", lbNode.Name)
|
||||
return WaitForNodeLogMessage(ctx, runtime, lbNode, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], time.Time{})
|
||||
return NodeWaitForLogMessage(ctx, runtime, lbNode, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], time.Time{})
|
||||
})
|
||||
}
|
||||
} else {
|
||||
@ -329,8 +329,8 @@ func CreateCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteCluster deletes an existing cluster
|
||||
func DeleteCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
|
||||
// ClusterDelete deletes an existing cluster
|
||||
func ClusterDelete(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
|
||||
|
||||
log.Infof("Deleting cluster '%s'", cluster.Name)
|
||||
log.Debugf("Cluster Details: %+v", cluster)
|
||||
@ -375,8 +375,8 @@ func DeleteCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetClusters returns a list of all existing clusters
|
||||
func GetClusters(ctx context.Context, runtime k3drt.Runtime) ([]*k3d.Cluster, error) {
|
||||
// ClusterList returns a list of all existing clusters
|
||||
func ClusterList(ctx context.Context, runtime k3drt.Runtime) ([]*k3d.Cluster, error) {
|
||||
nodes, err := runtime.GetNodesByLabel(ctx, k3d.DefaultObjectLabels)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to get clusters")
|
||||
@ -455,8 +455,8 @@ func populateClusterFieldsFromLabels(cluster *k3d.Cluster) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetCluster returns an existing cluster with all fields and node lists populated
|
||||
func GetCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) (*k3d.Cluster, error) {
|
||||
// ClusterGet returns an existing cluster with all fields and node lists populated
|
||||
func ClusterGet(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) (*k3d.Cluster, error) {
|
||||
// get nodes that belong to the selected cluster
|
||||
nodes, err := runtime.GetNodesByLabel(ctx, map[string]string{k3d.LabelClusterName: cluster.Name})
|
||||
if err != nil {
|
||||
@ -504,8 +504,8 @@ func generateNodeName(cluster string, role k3d.Role, suffix int) string {
|
||||
return fmt.Sprintf("%s-%s-%s-%d", k3d.DefaultObjectNamePrefix, cluster, role, suffix)
|
||||
}
|
||||
|
||||
// StartCluster starts a whole cluster (i.e. all nodes of the cluster)
|
||||
func StartCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, startClusterOpts types.StartClusterOpts) error {
|
||||
// ClusterStart starts a whole cluster (i.e. all nodes of the cluster)
|
||||
func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, startClusterOpts types.ClusterStartOpts) error {
|
||||
log.Infof("Starting cluster '%s'", cluster.Name)
|
||||
|
||||
start := time.Now()
|
||||
@ -543,7 +543,7 @@ func StartCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
|
||||
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
|
||||
// ... by scanning for this line in logs and restarting the container in case it appears
|
||||
log.Debugf("Starting to wait for master node '%s'", masterNode.Name)
|
||||
return WaitForNodeLogMessage(ctx, runtime, masterNode, k3d.ReadyLogMessageByRole[k3d.MasterRole], start)
|
||||
return NodeWaitForLogMessage(ctx, runtime, masterNode, k3d.ReadyLogMessageByRole[k3d.MasterRole], start)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -559,7 +559,7 @@ func StartCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
|
||||
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
|
||||
// ... by scanning for this line in logs and restarting the container in case it appears
|
||||
log.Debugf("Starting to wait for loadbalancer node '%s'", masterlb.Name)
|
||||
return WaitForNodeLogMessage(ctx, runtime, masterlb, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], start)
|
||||
return NodeWaitForLogMessage(ctx, runtime, masterlb, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], start)
|
||||
})
|
||||
}
|
||||
|
||||
@ -575,8 +575,8 @@ func StartCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
|
||||
return nil
|
||||
}
|
||||
|
||||
// StopCluster stops a whole cluster (i.e. all nodes of the cluster)
|
||||
func StopCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
|
||||
// ClusterStop stops a whole cluster (i.e. all nodes of the cluster)
|
||||
func ClusterStop(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
|
||||
log.Infof("Stopping cluster '%s'", cluster.Name)
|
||||
|
||||
failed := 0
|
||||
|
@ -44,21 +44,21 @@ type WriteKubeConfigOptions struct {
|
||||
OverwriteExisting bool
|
||||
}
|
||||
|
||||
// GetAndWriteKubeConfig ...
|
||||
// KubeconfigGetWrite ...
|
||||
// 1. fetches the KubeConfig from the first master node retrieved for a given cluster
|
||||
// 2. modifies it by updating some fields with cluster-specific information
|
||||
// 3. writes it to the specified output
|
||||
func GetAndWriteKubeConfig(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster, output string, writeKubeConfigOptions *WriteKubeConfigOptions) (string, error) {
|
||||
func KubeconfigGetWrite(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster, output string, writeKubeConfigOptions *WriteKubeConfigOptions) (string, error) {
|
||||
|
||||
// get kubeconfig from cluster node
|
||||
kubeconfig, err := GetKubeconfig(ctx, runtime, cluster)
|
||||
kubeconfig, err := KubeconfigGet(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
return output, err
|
||||
}
|
||||
|
||||
// empty output parameter = write to default
|
||||
if output == "" {
|
||||
output, err = GetDefaultKubeConfigPath()
|
||||
output, err = KubeconfigGetDefaultPath()
|
||||
if err != nil {
|
||||
return output, err
|
||||
}
|
||||
@ -66,7 +66,7 @@ func GetAndWriteKubeConfig(ctx context.Context, runtime runtimes.Runtime, cluste
|
||||
|
||||
// simply write to the output, ignoring existing contents
|
||||
if writeKubeConfigOptions.OverwriteExisting || output == "-" {
|
||||
return output, WriteKubeConfigToPath(ctx, kubeconfig, output)
|
||||
return output, KubeconfigWriteToPath(ctx, kubeconfig, output)
|
||||
}
|
||||
|
||||
// load config from existing file or fail if it has non-kubeconfig contents
|
||||
@ -103,14 +103,14 @@ func GetAndWriteKubeConfig(ctx context.Context, runtime runtimes.Runtime, cluste
|
||||
}
|
||||
|
||||
// update existing kubeconfig, but error out if there are conflicting fields but we don't want to update them
|
||||
return output, UpdateKubeConfig(ctx, kubeconfig, existingKubeConfig, output, writeKubeConfigOptions.UpdateExisting, writeKubeConfigOptions.UpdateCurrentContext)
|
||||
return output, KubeconfigMerge(ctx, kubeconfig, existingKubeConfig, output, writeKubeConfigOptions.UpdateExisting, writeKubeConfigOptions.UpdateCurrentContext)
|
||||
|
||||
}
|
||||
|
||||
// GetKubeconfig grabs the kubeconfig file from /output from a master node container,
|
||||
// KubeconfigGet grabs the kubeconfig file from /output from a master node container,
|
||||
// modifies it by updating some fields with cluster-specific information
|
||||
// and returns a Config object for further processing
|
||||
func GetKubeconfig(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) (*clientcmdapi.Config, error) {
|
||||
func KubeconfigGet(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) (*clientcmdapi.Config, error) {
|
||||
// get all master nodes for the selected cluster
|
||||
// TODO: getKubeconfig: we should make sure, that the master node we're trying to fetch from is actually running
|
||||
masterNodes, err := runtime.GetNodesByLabel(ctx, map[string]string{k3d.LabelClusterName: cluster.Name, k3d.LabelRole: string(k3d.MasterRole)})
|
||||
@ -199,8 +199,8 @@ func GetKubeconfig(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.C
|
||||
return kc, nil
|
||||
}
|
||||
|
||||
// WriteKubeConfigToPath takes a kubeconfig and writes it to some path, which can be '-' for os.Stdout
|
||||
func WriteKubeConfigToPath(ctx context.Context, kubeconfig *clientcmdapi.Config, path string) error {
|
||||
// KubeconfigWriteToPath takes a kubeconfig and writes it to some path, which can be '-' for os.Stdout
|
||||
func KubeconfigWriteToPath(ctx context.Context, kubeconfig *clientcmdapi.Config, path string) error {
|
||||
var output *os.File
|
||||
defer output.Close()
|
||||
var err error
|
||||
@ -234,8 +234,8 @@ func WriteKubeConfigToPath(ctx context.Context, kubeconfig *clientcmdapi.Config,
|
||||
|
||||
}
|
||||
|
||||
// UpdateKubeConfig merges a new kubeconfig into an existing kubeconfig and returns the result
|
||||
func UpdateKubeConfig(ctx context.Context, newKubeConfig *clientcmdapi.Config, existingKubeConfig *clientcmdapi.Config, outPath string, overwriteConflicting bool, updateCurrentContext bool) error {
|
||||
// KubeconfigMerge merges a new kubeconfig into an existing kubeconfig and returns the result
|
||||
func KubeconfigMerge(ctx context.Context, newKubeConfig *clientcmdapi.Config, existingKubeConfig *clientcmdapi.Config, outPath string, overwriteConflicting bool, updateCurrentContext bool) error {
|
||||
|
||||
log.Debugf("Merging new KubeConfig:\n%+v\n>>> into existing KubeConfig:\n%+v", newKubeConfig, existingKubeConfig)
|
||||
|
||||
@ -278,11 +278,11 @@ func UpdateKubeConfig(ctx context.Context, newKubeConfig *clientcmdapi.Config, e
|
||||
|
||||
log.Debugf("Merged KubeConfig:\n%+v", existingKubeConfig)
|
||||
|
||||
return WriteKubeConfig(ctx, existingKubeConfig, outPath)
|
||||
return KubeconfigWrite(ctx, existingKubeConfig, outPath)
|
||||
}
|
||||
|
||||
// WriteKubeConfig writes a kubeconfig to a path atomically
|
||||
func WriteKubeConfig(ctx context.Context, kubeconfig *clientcmdapi.Config, path string) error {
|
||||
// KubeconfigWrite writes a kubeconfig to a path atomically
|
||||
func KubeconfigWrite(ctx context.Context, kubeconfig *clientcmdapi.Config, path string) error {
|
||||
tempPath := fmt.Sprintf("%s.k3d_%s", path, time.Now().Format("20060102_150405.000000"))
|
||||
if err := clientcmd.WriteToFile(*kubeconfig, tempPath); err != nil {
|
||||
log.Errorf("Failed to write merged kubeconfig to temporary file '%s'", tempPath)
|
||||
@ -300,9 +300,9 @@ func WriteKubeConfig(ctx context.Context, kubeconfig *clientcmdapi.Config, path
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDefaultKubeConfig loads the default KubeConfig file
|
||||
func GetDefaultKubeConfig() (*clientcmdapi.Config, error) {
|
||||
path, err := GetDefaultKubeConfigPath()
|
||||
// KubeconfigGetDefaultFile loads the default KubeConfig file
|
||||
func KubeconfigGetDefaultFile() (*clientcmdapi.Config, error) {
|
||||
path, err := KubeconfigGetDefaultPath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -310,8 +310,8 @@ func GetDefaultKubeConfig() (*clientcmdapi.Config, error) {
|
||||
return clientcmd.LoadFromFile(path)
|
||||
}
|
||||
|
||||
// GetDefaultKubeConfigPath returns the path of the default kubeconfig, but errors if the KUBECONFIG env var specifies more than one file
|
||||
func GetDefaultKubeConfigPath() (string, error) {
|
||||
// KubeconfigGetDefaultPath returns the path of the default kubeconfig, but errors if the KUBECONFIG env var specifies more than one file
|
||||
func KubeconfigGetDefaultPath() (string, error) {
|
||||
defaultKubeConfigLoadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
if len(defaultKubeConfigLoadingRules.GetLoadingPrecedence()) > 1 {
|
||||
return "", fmt.Errorf("Multiple kubeconfigs specified via KUBECONFIG env var: Please reduce to one entry, unset KUBECONFIG or explicitly choose an output")
|
||||
@ -319,22 +319,22 @@ func GetDefaultKubeConfigPath() (string, error) {
|
||||
return defaultKubeConfigLoadingRules.GetDefaultFilename(), nil
|
||||
}
|
||||
|
||||
// RemoveClusterFromDefaultKubeConfig removes a cluster's details from the default kubeconfig
|
||||
func RemoveClusterFromDefaultKubeConfig(ctx context.Context, cluster *k3d.Cluster) error {
|
||||
defaultKubeConfigPath, err := GetDefaultKubeConfigPath()
|
||||
// KubeconfigRemoveClusterFromDefaultConfig removes a cluster's details from the default kubeconfig
|
||||
func KubeconfigRemoveClusterFromDefaultConfig(ctx context.Context, cluster *k3d.Cluster) error {
|
||||
defaultKubeConfigPath, err := KubeconfigGetDefaultPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
kubeconfig, err := GetDefaultKubeConfig()
|
||||
kubeconfig, err := KubeconfigGetDefaultFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
kubeconfig = RemoveClusterFromKubeConfig(ctx, cluster, kubeconfig)
|
||||
return WriteKubeConfig(ctx, kubeconfig, defaultKubeConfigPath)
|
||||
kubeconfig = KubeconfigRemoveCluster(ctx, cluster, kubeconfig)
|
||||
return KubeconfigWrite(ctx, kubeconfig, defaultKubeConfigPath)
|
||||
}
|
||||
|
||||
// RemoveClusterFromKubeConfig removes a cluster's details from a given kubeconfig
|
||||
func RemoveClusterFromKubeConfig(ctx context.Context, cluster *k3d.Cluster, kubeconfig *clientcmdapi.Config) *clientcmdapi.Config {
|
||||
// KubeconfigRemoveCluster removes a cluster's details from a given kubeconfig
|
||||
func KubeconfigRemoveCluster(ctx context.Context, cluster *k3d.Cluster, kubeconfig *clientcmdapi.Config) *clientcmdapi.Config {
|
||||
clusterName := fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, cluster.Name)
|
||||
contextName := fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, cluster.Name)
|
||||
authInfoName := fmt.Sprintf("admin@%s-%s", k3d.DefaultObjectNamePrefix, cluster.Name)
|
||||
|
@ -36,7 +36,7 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu
|
||||
|
||||
var err error
|
||||
// update cluster details to ensure that we have the latest node list
|
||||
cluster, err = GetCluster(ctx, runtime, cluster)
|
||||
cluster, err = ClusterGet(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to update details for cluster '%s'", cluster.Name)
|
||||
return err
|
||||
|
@ -36,10 +36,10 @@ import (
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// AddNodeToCluster adds a node to an existing cluster
|
||||
func AddNodeToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, cluster *k3d.Cluster, createNodeOpts k3d.CreateNodeOpts) error {
|
||||
// NodeAddToCluster adds a node to an existing cluster
|
||||
func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, cluster *k3d.Cluster, createNodeOpts k3d.NodeCreateOpts) error {
|
||||
targetClusterName := cluster.Name
|
||||
cluster, err := GetCluster(ctx, runtime, cluster)
|
||||
cluster, err := ClusterGet(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to find specified cluster '%s'", targetClusterName)
|
||||
return err
|
||||
@ -77,7 +77,7 @@ func AddNodeToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
||||
}
|
||||
|
||||
// get node details
|
||||
chosenNode, err = GetNode(ctx, runtime, chosenNode)
|
||||
chosenNode, err = NodeGet(ctx, runtime, chosenNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -128,7 +128,7 @@ func AddNodeToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
||||
}
|
||||
}
|
||||
|
||||
if err := CreateNode(ctx, runtime, node, k3d.CreateNodeOpts{}); err != nil {
|
||||
if err := NodeCreate(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -143,8 +143,8 @@ func AddNodeToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddNodesToCluster adds multiple nodes to a chosen cluster
|
||||
func AddNodesToCluster(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d.Node, cluster *k3d.Cluster, createNodeOpts k3d.CreateNodeOpts) error {
|
||||
// NodeAddToClusterMulti adds multiple nodes to a chosen cluster
|
||||
func NodeAddToClusterMulti(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d.Node, cluster *k3d.Cluster, createNodeOpts k3d.NodeCreateOpts) error {
|
||||
if createNodeOpts.Timeout > 0*time.Second {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, createNodeOpts.Timeout)
|
||||
@ -153,14 +153,14 @@ func AddNodesToCluster(ctx context.Context, runtime runtimes.Runtime, nodes []*k
|
||||
|
||||
nodeWaitGroup, ctx := errgroup.WithContext(ctx)
|
||||
for _, node := range nodes {
|
||||
if err := AddNodeToCluster(ctx, runtime, node, cluster, k3d.CreateNodeOpts{}); err != nil {
|
||||
if err := NodeAddToCluster(ctx, runtime, node, cluster, k3d.NodeCreateOpts{}); err != nil {
|
||||
return err
|
||||
}
|
||||
if createNodeOpts.Wait {
|
||||
currentNode := node
|
||||
nodeWaitGroup.Go(func() error {
|
||||
log.Debugf("Starting to wait for node '%s'", currentNode.Name)
|
||||
return WaitForNodeLogMessage(ctx, runtime, currentNode, k3d.ReadyLogMessageByRole[currentNode.Role], time.Time{})
|
||||
return NodeWaitForLogMessage(ctx, runtime, currentNode, k3d.ReadyLogMessageByRole[currentNode.Role], time.Time{})
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -173,8 +173,8 @@ func AddNodesToCluster(ctx context.Context, runtime runtimes.Runtime, nodes []*k
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateNodes creates a list of nodes
|
||||
func CreateNodes(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d.Node, createNodeOpts k3d.CreateNodeOpts) error { // TODO: pass `--atomic` flag, so we stop and return an error if any node creation fails?
|
||||
// NodeCreateMulti creates a list of nodes
|
||||
func NodeCreateMulti(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d.Node, createNodeOpts k3d.NodeCreateOpts) error { // TODO: pass `--atomic` flag, so we stop and return an error if any node creation fails?
|
||||
if createNodeOpts.Timeout > 0*time.Second {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, createNodeOpts.Timeout)
|
||||
@ -183,14 +183,14 @@ func CreateNodes(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d.Nod
|
||||
|
||||
nodeWaitGroup, ctx := errgroup.WithContext(ctx)
|
||||
for _, node := range nodes {
|
||||
if err := CreateNode(ctx, runtime, node, k3d.CreateNodeOpts{}); err != nil {
|
||||
if err := NodeCreate(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
if createNodeOpts.Wait {
|
||||
currentNode := node
|
||||
nodeWaitGroup.Go(func() error {
|
||||
log.Debugf("Starting to wait for node '%s'", currentNode.Name)
|
||||
return WaitForNodeLogMessage(ctx, runtime, currentNode, k3d.ReadyLogMessageByRole[currentNode.Role], time.Time{})
|
||||
return NodeWaitForLogMessage(ctx, runtime, currentNode, k3d.ReadyLogMessageByRole[currentNode.Role], time.Time{})
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -205,8 +205,8 @@ func CreateNodes(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d.Nod
|
||||
|
||||
}
|
||||
|
||||
// CreateNode creates a new containerized k3s node
|
||||
func CreateNode(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, createNodeOpts k3d.CreateNodeOpts) error {
|
||||
// NodeCreate creates a new containerized k3s node
|
||||
func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, createNodeOpts k3d.NodeCreateOpts) error {
|
||||
log.Debugf("Creating node from spec\n%+v", node)
|
||||
|
||||
/*
|
||||
@ -251,14 +251,14 @@ func CreateNode(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, c
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteNode deletes an existing node
|
||||
func DeleteNode(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) error {
|
||||
// NodeDelete deletes an existing node
|
||||
func NodeDelete(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) error {
|
||||
|
||||
if err := runtime.DeleteNode(ctx, node); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
cluster, err := GetCluster(ctx, runtime, &k3d.Cluster{Name: node.Labels[k3d.LabelClusterName]})
|
||||
cluster, err := ClusterGet(ctx, runtime, &k3d.Cluster{Name: node.Labels[k3d.LabelClusterName]})
|
||||
if err != nil {
|
||||
log.Errorf("Failed to update loadbalancer: Failed to find cluster for node '%s'", node.Name)
|
||||
return err
|
||||
@ -302,8 +302,8 @@ func patchMasterSpec(node *k3d.Node) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNodes returns a list of all existing clusters
|
||||
func GetNodes(ctx context.Context, runtime runtimes.Runtime) ([]*k3d.Node, error) {
|
||||
// NodeList returns a list of all existing clusters
|
||||
func NodeList(ctx context.Context, runtime runtimes.Runtime) ([]*k3d.Node, error) {
|
||||
nodes, err := runtime.GetNodesByLabel(ctx, k3d.DefaultObjectLabels)
|
||||
if err != nil {
|
||||
log.Errorln("Failed to get nodes")
|
||||
@ -313,8 +313,8 @@ func GetNodes(ctx context.Context, runtime runtimes.Runtime) ([]*k3d.Node, error
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// GetNode returns a node matching the specified node fields
|
||||
func GetNode(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) (*k3d.Node, error) {
|
||||
// NodeGet returns a node matching the specified node fields
|
||||
func NodeGet(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) (*k3d.Node, error) {
|
||||
// get node
|
||||
node, err := runtime.GetNode(ctx, node)
|
||||
if err != nil {
|
||||
@ -324,8 +324,8 @@ func GetNode(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) (*k3
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// WaitForNodeLogMessage follows the logs of a node container and returns if it finds a specific line in there (or timeout is reached)
|
||||
func WaitForNodeLogMessage(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, message string, since time.Time) error {
|
||||
// NodeWaitForLogMessage follows the logs of a node container and returns if it finds a specific line in there (or timeout is reached)
|
||||
func NodeWaitForLogMessage(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, message string, since time.Time) error {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
@ -37,9 +37,9 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// LoadImagesIntoCluster starts up a k3d tools container for the selected cluster and uses it to export
|
||||
// ImageImportIntoClusterMulti starts up a k3d tools container for the selected cluster and uses it to export
|
||||
// images from the runtime to import them into the nodes of the selected cluster
|
||||
func LoadImagesIntoCluster(ctx context.Context, runtime runtimes.Runtime, images []string, cluster *k3d.Cluster, loadImageOpts k3d.LoadImageOpts) error {
|
||||
func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime, images []string, cluster *k3d.Cluster, loadImageOpts k3d.ImageImportOpts) error {
|
||||
|
||||
var imagesFromRuntime []string
|
||||
var imagesFromTar []string
|
||||
@ -79,7 +79,7 @@ func LoadImagesIntoCluster(ctx context.Context, runtime runtimes.Runtime, images
|
||||
return fmt.Errorf("No valid images specified")
|
||||
}
|
||||
|
||||
cluster, err = k3dc.GetCluster(ctx, runtime, cluster)
|
||||
cluster, err = k3dc.ClusterGet(ctx, runtime, cluster)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to find the specified cluster")
|
||||
return err
|
||||
|
@ -128,8 +128,8 @@ var DoNotCopyMasterFlags = []string{
|
||||
"--cluster-init",
|
||||
}
|
||||
|
||||
// CreateClusterOpts describe a set of options one can set when creating a cluster
|
||||
type CreateClusterOpts struct {
|
||||
// ClusterCreateOpts describe a set of options one can set when creating a cluster
|
||||
type ClusterCreateOpts struct {
|
||||
DisableImageVolume bool
|
||||
WaitForMaster bool
|
||||
Timeout time.Duration
|
||||
@ -138,26 +138,26 @@ type CreateClusterOpts struct {
|
||||
K3sAgentArgs []string
|
||||
}
|
||||
|
||||
// StartClusterOpts describe a set of options one can set when (re-)starting a cluster
|
||||
type StartClusterOpts struct {
|
||||
// ClusterStartOpts describe a set of options one can set when (re-)starting a cluster
|
||||
type ClusterStartOpts struct {
|
||||
WaitForMaster bool
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// CreateNodeOpts describes a set of options one can set when creating a new node
|
||||
type CreateNodeOpts struct {
|
||||
// NodeCreateOpts describes a set of options one can set when creating a new node
|
||||
type NodeCreateOpts struct {
|
||||
Wait bool
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// StartNodeOpts describes a set of options one can set when (re-)starting a node
|
||||
type StartNodeOpts struct {
|
||||
// NodeStartOpts describes a set of options one can set when (re-)starting a node
|
||||
type NodeStartOpts struct {
|
||||
Wait bool
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// LoadImageOpts describes a set of options one can set for loading image(s) into cluster(s)
|
||||
type LoadImageOpts struct {
|
||||
// ImageImportOpts describes a set of options one can set for loading image(s) into cluster(s)
|
||||
type ImageImportOpts struct {
|
||||
KeepTar bool
|
||||
}
|
||||
|
||||
@ -175,7 +175,7 @@ type Cluster struct {
|
||||
Nodes []*Node `yaml:"nodes" json:"nodes,omitempty"`
|
||||
InitNode *Node // init master node
|
||||
ExternalDatastore ExternalDatastore `yaml:"external_datastore" json:"externalDatastore,omitempty"`
|
||||
CreateClusterOpts *CreateClusterOpts `yaml:"options" json:"options,omitempty"`
|
||||
CreateClusterOpts *ClusterCreateOpts `yaml:"options" json:"options,omitempty"`
|
||||
ExposeAPI ExposeAPI `yaml:"expose_api" json:"exposeAPI,omitempty"`
|
||||
MasterLoadBalancer *Node `yaml:"master_loadbalancer" json:"masterLoadBalancer,omitempty"`
|
||||
ImageVolume string `yaml:"image_volume" json:"imageVolume,omitempty"`
|
||||
|
Loading…
Reference in New Issue
Block a user