changes when creating clusters + new nodefilter syntax

- generate node names when transforming from simple to cluster config
  - ClusterCreate(clusterconfig) should have a ready made config and not
  generate variables
- ClusterCreate() only prep LB if not already present (to be removed)
- cluster struct: serverloadbalancer is now of type LoadBalancer (Node +
Config)
- use new nodefilter syntax with 'id:index:suffix' instead of
'id[index]' everywhere
  - use suffix when creating the LB
This commit is contained in:
iwilltry42 2021-06-30 08:29:13 +02:00 committed by Thorsten Klein
parent 9574002b38
commit d15ed26875
15 changed files with 182 additions and 85 deletions

View File

@ -372,12 +372,12 @@ ClusterCreatOpts:
clusterCreateOpts.GlobalLabels[k3d.LabelClusterName] = cluster.Name clusterCreateOpts.GlobalLabels[k3d.LabelClusterName] = cluster.Name
// agent defaults (per cluster) // agent defaults (per cluster)
// connection url is always the name of the first server node (index 0) // connection url is always the name of the first server node (index 0) // TODO: change this to the server loadbalancer
connectionURL := fmt.Sprintf("https://%s:%s", generateNodeName(cluster.Name, k3d.ServerRole, 0), k3d.DefaultAPIPort) connectionURL := fmt.Sprintf("https://%s:%s", GenerateNodeName(cluster.Name, k3d.ServerRole, 0), k3d.DefaultAPIPort)
clusterCreateOpts.GlobalLabels[k3d.LabelClusterURL] = connectionURL clusterCreateOpts.GlobalLabels[k3d.LabelClusterURL] = connectionURL
clusterCreateOpts.GlobalEnv = append(clusterCreateOpts.GlobalEnv, fmt.Sprintf("K3S_TOKEN=%s", cluster.Token)) clusterCreateOpts.GlobalEnv = append(clusterCreateOpts.GlobalEnv, fmt.Sprintf("K3S_TOKEN=%s", cluster.Token))
nodeSetup := func(node *k3d.Node, suffix int) error { nodeSetup := func(node *k3d.Node) error {
// cluster specific settings // cluster specific settings
if node.RuntimeLabels == nil { if node.RuntimeLabels == nil {
node.RuntimeLabels = make(map[string]string) // TODO: maybe create an init function? node.RuntimeLabels = make(map[string]string) // TODO: maybe create an init function?
@ -417,7 +417,6 @@ ClusterCreatOpts:
node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", connectionURL)) node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", connectionURL))
} }
node.Name = generateNodeName(cluster.Name, node.Role, suffix)
node.Networks = []string{cluster.Network.Name} node.Networks = []string{cluster.Network.Name}
node.Restart = true node.Restart = true
node.GPURequest = clusterCreateOpts.GPURequest node.GPURequest = clusterCreateOpts.GPURequest
@ -437,8 +436,6 @@ ClusterCreatOpts:
// used for node suffices // used for node suffices
serverCount := 0 serverCount := 0
agentCount := 0
suffix := 0
// create init node first // create init node first
if cluster.InitNode != nil { if cluster.InitNode != nil {
@ -457,7 +454,7 @@ ClusterCreatOpts:
cluster.InitNode.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding} cluster.InitNode.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding}
} }
if err := nodeSetup(cluster.InitNode, serverCount); err != nil { if err := nodeSetup(cluster.InitNode); err != nil {
return err return err
} }
serverCount++ serverCount++
@ -481,17 +478,11 @@ ClusterCreatOpts:
time.Sleep(1 * time.Second) // FIXME: arbitrary wait for one second to avoid race conditions of servers registering time.Sleep(1 * time.Second) // FIXME: arbitrary wait for one second to avoid race conditions of servers registering
// name suffix
suffix = serverCount
serverCount++ serverCount++
} else if node.Role == k3d.AgentRole {
// name suffix
suffix = agentCount
agentCount++
} }
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole { if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
if err := nodeSetup(node, suffix); err != nil { if err := nodeSetup(node); err != nil {
return err return err
} }
} }
@ -499,7 +490,7 @@ ClusterCreatOpts:
// WARN, if there are exactly two server nodes: that means we're using etcd, but don't have fault tolerance // WARN, if there are exactly two server nodes: that means we're using etcd, but don't have fault tolerance
if serverCount == 2 { if serverCount == 2 {
log.Warnln("You're creating 2 server nodes: Please consider creating at least 3 to achieve quorum & fault tolerance") log.Warnln("You're creating 2 server nodes: Please consider creating at least 3 to achieve etcd quorum & fault tolerance")
} }
/* /*
@ -507,19 +498,26 @@ ClusterCreatOpts:
*/ */
// *** ServerLoadBalancer *** // *** ServerLoadBalancer ***
if !clusterCreateOpts.DisableLoadBalancer { if !clusterCreateOpts.DisableLoadBalancer {
lbNode, err := LoadbalancerPrepare(ctx, runtime, cluster, &k3d.LoadbalancerCreateOpts{Labels: clusterCreateOpts.GlobalLabels}) if cluster.ServerLoadBalancer == nil {
if err != nil { lbNode, err := LoadbalancerPrepare(ctx, runtime, cluster, &k3d.LoadbalancerCreateOpts{Labels: clusterCreateOpts.GlobalLabels})
return err if err != nil {
return err
}
cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback
} }
cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback
lbConfig, err := LoadbalancerGenerateConfig(cluster) if len(cluster.ServerLoadBalancer.Config.Ports) == 0 {
if err != nil { lbConfig, err := LoadbalancerGenerateConfig(cluster)
return fmt.Errorf("error generating loadbalancer config: %v", err) if err != nil {
return fmt.Errorf("error generating loadbalancer config: %v", err)
}
cluster.ServerLoadBalancer.Config = lbConfig
} }
cluster.ServerLoadBalancer.Node.RuntimeLabels = clusterCreateOpts.GlobalLabels
// prepare to write config to lb container // prepare to write config to lb container
configyaml, err := yaml.Marshal(lbConfig) configyaml, err := yaml.Marshal(cluster.ServerLoadBalancer.Config)
if err != nil { if err != nil {
return err return err
} }
@ -534,13 +532,13 @@ ClusterCreatOpts:
}, },
} }
lbNode.HookActions = append(lbNode.HookActions, writeLbConfigAction) cluster.ServerLoadBalancer.Node.HookActions = append(cluster.ServerLoadBalancer.Node.HookActions, writeLbConfigAction)
log.Infof("Creating LoadBalancer '%s'", lbNode.Name) log.Infof("Creating LoadBalancer '%s'", cluster.ServerLoadBalancer.Node.Name)
if err := NodeCreate(ctx, runtime, lbNode, k3d.NodeCreateOpts{}); err != nil { if err := NodeCreate(ctx, runtime, cluster.ServerLoadBalancer.Node, k3d.NodeCreateOpts{}); err != nil {
return fmt.Errorf("error creating loadbalancer: %v", err) return fmt.Errorf("error creating loadbalancer: %v", err)
} }
log.Debugf("Created loadbalancer '%s'", lbNode.Name) log.Debugf("Created loadbalancer '%s'", cluster.ServerLoadBalancer.Node.Name)
return err return err
} }
@ -795,7 +793,7 @@ func GenerateClusterToken() string {
return util.GenerateRandomString(20) return util.GenerateRandomString(20)
} }
func generateNodeName(cluster string, role k3d.Role, suffix int) string { func GenerateNodeName(cluster string, role k3d.Role, suffix int) string {
return fmt.Sprintf("%s-%s-%s-%d", k3d.DefaultObjectNamePrefix, cluster, role, suffix) return fmt.Sprintf("%s-%s-%s-%d", k3d.DefaultObjectNamePrefix, cluster, role, suffix)
} }

View File

@ -77,18 +77,18 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu
} }
log.Debugf("Writing lb config:\n%s", string(newLbConfigYaml)) log.Debugf("Writing lb config:\n%s", string(newLbConfigYaml))
startTime := time.Now().Truncate(time.Second).UTC() startTime := time.Now().Truncate(time.Second).UTC()
if err := runtime.WriteToNode(ctx, newLbConfigYaml, k3d.DefaultLoadbalancerConfigPath, 0744, cluster.ServerLoadBalancer); err != nil { if err := runtime.WriteToNode(ctx, newLbConfigYaml, k3d.DefaultLoadbalancerConfigPath, 0744, cluster.ServerLoadBalancer.Node); err != nil {
return fmt.Errorf("error writing new loadbalancer config to container: %w", err) return fmt.Errorf("error writing new loadbalancer config to container: %w", err)
} }
successCtx, successCtxCancel := context.WithDeadline(ctx, time.Now().Add(5*time.Second)) successCtx, successCtxCancel := context.WithDeadline(ctx, time.Now().Add(5*time.Second))
defer successCtxCancel() defer successCtxCancel()
err = NodeWaitForLogMessage(successCtx, runtime, cluster.ServerLoadBalancer, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], startTime) err = NodeWaitForLogMessage(successCtx, runtime, cluster.ServerLoadBalancer.Node, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], startTime)
if err != nil { if err != nil {
if errors.Is(err, context.DeadlineExceeded) { if errors.Is(err, context.DeadlineExceeded) {
failureCtx, failureCtxCancel := context.WithDeadline(ctx, time.Now().Add(5*time.Second)) failureCtx, failureCtxCancel := context.WithDeadline(ctx, time.Now().Add(5*time.Second))
defer failureCtxCancel() defer failureCtxCancel()
err = NodeWaitForLogMessage(failureCtx, runtime, cluster.ServerLoadBalancer, "host not found in upstream", startTime) err = NodeWaitForLogMessage(failureCtx, runtime, cluster.ServerLoadBalancer.Node, "host not found in upstream", startTime)
if err != nil { if err != nil {
log.Warnf("Failed to check if the loadbalancer was configured correctly or if it broke. Please check it manually or try again: %v", err) log.Warnf("Failed to check if the loadbalancer was configured correctly or if it broke. Please check it manually or try again: %v", err)
return LBConfigErrFailedTest return LBConfigErrFailedTest
@ -101,7 +101,7 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu
return LBConfigErrFailedTest return LBConfigErrFailedTest
} }
} }
log.Infof("Successfully configured loadbalancer %s!", cluster.ServerLoadBalancer.Name) log.Infof("Successfully configured loadbalancer %s!", cluster.ServerLoadBalancer.Node.Name)
time.Sleep(1 * time.Second) // waiting for a second, to avoid issues with too fast lb updates which would screw up the log waits time.Sleep(1 * time.Second) // waiting for a second, to avoid issues with too fast lb updates which would screw up the log waits
@ -116,7 +116,7 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste
for _, node := range cluster.Nodes { for _, node := range cluster.Nodes {
if node.Role == types.LoadBalancerRole { if node.Role == types.LoadBalancerRole {
var err error var err error
cluster.ServerLoadBalancer, err = NodeGet(ctx, runtime, node) cluster.ServerLoadBalancer.Node, err = NodeGet(ctx, runtime, node)
if err != nil { if err != nil {
return cfg, err return cfg, err
} }
@ -124,7 +124,7 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste
} }
} }
reader, err := runtime.ReadFromNode(ctx, types.DefaultLoadbalancerConfigPath, cluster.ServerLoadBalancer) reader, err := runtime.ReadFromNode(ctx, types.DefaultLoadbalancerConfigPath, cluster.ServerLoadBalancer.Node)
if err != nil { if err != nil {
return cfg, err return cfg, err
} }
@ -162,31 +162,36 @@ func LoadbalancerGenerateConfig(cluster *k3d.Cluster) (k3d.LoadbalancerConfig, e
lbConfig.Ports[fmt.Sprintf("%s.tcp", k3d.DefaultAPIPort)] = servers lbConfig.Ports[fmt.Sprintf("%s.tcp", k3d.DefaultAPIPort)] = servers
// generate comma-separated list of extra ports to forward // TODO: no default targets? // generate comma-separated list of extra ports to forward // TODO: no default targets?
for exposedPort := range cluster.ServerLoadBalancer.Ports { for exposedPort := range cluster.ServerLoadBalancer.Node.Ports {
// TODO: catch duplicates here? // TODO: catch duplicates here?
lbConfig.Ports[fmt.Sprintf("%s.%s", exposedPort.Port(), exposedPort.Proto())] = servers lbConfig.Ports[fmt.Sprintf("%s.%s", exposedPort.Port(), exposedPort.Proto())] = servers
} }
// some additional nginx settings // some additional nginx settings
lbConfig.Settings.WorkerProcesses = k3d.DefaultLoadbalancerWorkerProcesses + len(cluster.ServerLoadBalancer.Ports)*len(servers) lbConfig.Settings.WorkerProcesses = k3d.DefaultLoadbalancerWorkerProcesses + len(cluster.ServerLoadBalancer.Node.Ports)*len(servers)
return lbConfig, nil return lbConfig, nil
} }
func LoadbalancerPrepare(ctx context.Context, runtime runtimes.Runtime, cluster *types.Cluster, opts *k3d.LoadbalancerCreateOpts) (*k3d.Node, error) { func LoadbalancerPrepare(ctx context.Context, runtime runtimes.Runtime, cluster *types.Cluster, opts *k3d.LoadbalancerCreateOpts) (*k3d.Node, error) {
labels := map[string]string{}
if cluster.ServerLoadBalancer.Ports == nil { if opts != nil && opts.Labels == nil && len(opts.Labels) == 0 {
cluster.ServerLoadBalancer.Ports = nat.PortMap{} labels = opts.Labels
} }
cluster.ServerLoadBalancer.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding}
if cluster.ServerLoadBalancer.Node.Ports == nil {
cluster.ServerLoadBalancer.Node.Ports = nat.PortMap{}
}
cluster.ServerLoadBalancer.Node.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding}
// Create LB as a modified node with loadbalancerRole // Create LB as a modified node with loadbalancerRole
lbNode := &k3d.Node{ lbNode := &k3d.Node{
Name: fmt.Sprintf("%s-%s-serverlb", k3d.DefaultObjectNamePrefix, cluster.Name), Name: fmt.Sprintf("%s-%s-serverlb", k3d.DefaultObjectNamePrefix, cluster.Name),
Image: k3d.GetLoadbalancerImage(), Image: k3d.GetLoadbalancerImage(),
Ports: cluster.ServerLoadBalancer.Ports, Ports: cluster.ServerLoadBalancer.Node.Ports,
Role: k3d.LoadBalancerRole, Role: k3d.LoadBalancerRole,
RuntimeLabels: opts.Labels, // TODO: createLoadBalancer: add more expressive labels RuntimeLabels: labels, // TODO: createLoadBalancer: add more expressive labels
Networks: []string{cluster.Network.Name}, Networks: []string{cluster.Network.Name},
Restart: true, Restart: true,
} }

View File

@ -684,7 +684,7 @@ func NodeEdit(ctx context.Context, runtime runtimes.Runtime, existingNode, chang
if err != nil { if err != nil {
return fmt.Errorf("error updating loadbalancer config: %w", err) return fmt.Errorf("error updating loadbalancer config: %w", err)
} }
cluster.ServerLoadBalancer = result cluster.ServerLoadBalancer.Node = result
lbConfig, err := LoadbalancerGenerateConfig(cluster) lbConfig, err := LoadbalancerGenerateConfig(cluster)
if err != nil { if err != nil {
return fmt.Errorf("error generating loadbalancer config: %v", err) return fmt.Errorf("error generating loadbalancer config: %v", err)

View File

@ -81,13 +81,13 @@ func TestReadSimpleConfig(t *testing.T) {
ExtraArgs: []conf.K3sArgWithNodeFilters{ ExtraArgs: []conf.K3sArgWithNodeFilters{
{ {
Arg: "--tls-san=127.0.0.1", Arg: "--tls-san=127.0.0.1",
NodeFilters: []string{"server[*]"}, NodeFilters: []string{"server:*"},
}, },
}, },
NodeLabels: []conf.LabelWithNodeFilters{ NodeLabels: []conf.LabelWithNodeFilters{
{ {
Label: "foo=bar", Label: "foo=bar",
NodeFilters: []string{"server[0]", "loadbalancer"}, NodeFilters: []string{"server:0", "loadbalancer"},
}, },
}, },
}, },
@ -99,7 +99,7 @@ func TestReadSimpleConfig(t *testing.T) {
Labels: []conf.LabelWithNodeFilters{ Labels: []conf.LabelWithNodeFilters{
{ {
Label: "foo=bar", Label: "foo=bar",
NodeFilters: []string{"server[0]", "loadbalancer"}, NodeFilters: []string{"server:0", "loadbalancer"},
}, },
}, },
}, },

View File

@ -33,11 +33,11 @@ options:
extraArgs: extraArgs:
- arg: --tls-san=127.0.0.1 - arg: --tls-san=127.0.0.1
nodeFilters: nodeFilters:
- "server[*]" - server:*
nodeLabels: nodeLabels:
- label: foo=bar - label: foo=bar
nodeFilters: nodeFilters:
- server[0] - server:0
- loadbalancer - loadbalancer
kubeconfig: kubeconfig:
updateDefaultKubeconfig: true updateDefaultKubeconfig: true
@ -46,5 +46,5 @@ options:
labels: labels:
- label: foo=bar - label: foo=bar
nodeFilters: nodeFilters:
- server[0] - server:0
- loadbalancer - loadbalancer

View File

@ -33,11 +33,11 @@ options:
extraArgs: extraArgs:
- arg: --tls-san=127.0.0.1 - arg: --tls-san=127.0.0.1
nodeFilters: nodeFilters:
- "server[*]" - "server:*"
nodeLabels: nodeLabels:
- label: foo=bar - label: foo=bar
nodeFilters: nodeFilters:
- server[0] - server:0
- loadbalancer - loadbalancer
kubeconfig: kubeconfig:
updateDefaultKubeconfig: true updateDefaultKubeconfig: true
@ -46,5 +46,5 @@ options:
labels: labels:
- label: foo=bar - label: foo=bar
nodeFilters: nodeFilters:
- server[0] - server:0
- loadbalancer - loadbalancer

View File

@ -31,6 +31,7 @@ import (
"github.com/docker/go-connections/nat" "github.com/docker/go-connections/nat"
cliutil "github.com/rancher/k3d/v4/cmd/util" // TODO: move parseapiport to pkg cliutil "github.com/rancher/k3d/v4/cmd/util" // TODO: move parseapiport to pkg
"github.com/rancher/k3d/v4/pkg/client"
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3" conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
"github.com/rancher/k3d/v4/pkg/runtimes" "github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types" k3d "github.com/rancher/k3d/v4/pkg/types"
@ -102,8 +103,11 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
newCluster.Nodes = []*k3d.Node{} newCluster.Nodes = []*k3d.Node{}
if !simpleConfig.Options.K3dOptions.DisableLoadbalancer { if !simpleConfig.Options.K3dOptions.DisableLoadbalancer {
newCluster.ServerLoadBalancer = &k3d.Node{ newCluster.ServerLoadBalancer = k3d.NewLoadbalancer()
Role: k3d.LoadBalancerRole, var err error
newCluster.ServerLoadBalancer.Node, err = client.LoadbalancerPrepare(ctx, runtime, &newCluster, nil)
if err != nil {
return nil, fmt.Errorf("error preparing the loadbalancer: %w", err)
} }
} else { } else {
log.Debugln("Disabling the load balancer") log.Debugln("Disabling the load balancer")
@ -115,6 +119,7 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
for i := 0; i < simpleConfig.Servers; i++ { for i := 0; i < simpleConfig.Servers; i++ {
serverNode := k3d.Node{ serverNode := k3d.Node{
Name: client.GenerateNodeName(newCluster.Name, k3d.ServerRole, i),
Role: k3d.ServerRole, Role: k3d.ServerRole,
Image: simpleConfig.Image, Image: simpleConfig.Image,
ServerOpts: k3d.ServerOpts{}, ServerOpts: k3d.ServerOpts{},
@ -132,6 +137,7 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
for i := 0; i < simpleConfig.Agents; i++ { for i := 0; i < simpleConfig.Agents; i++ {
agentNode := k3d.Node{ agentNode := k3d.Node{
Name: client.GenerateNodeName(newCluster.Name, k3d.AgentRole, i),
Role: k3d.AgentRole, Role: k3d.AgentRole,
Image: simpleConfig.Image, Image: simpleConfig.Image,
Memory: simpleConfig.Options.Runtime.AgentsMemory, Memory: simpleConfig.Options.Runtime.AgentsMemory,
@ -148,7 +154,7 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
nodeList := newCluster.Nodes nodeList := newCluster.Nodes
if !simpleConfig.Options.K3dOptions.DisableLoadbalancer { if !simpleConfig.Options.K3dOptions.DisableLoadbalancer {
nodeCount++ nodeCount++
nodeList = append(nodeList, newCluster.ServerLoadBalancer) nodeList = append(nodeList, newCluster.ServerLoadBalancer.Node)
} }
for _, volumeWithNodeFilters := range simpleConfig.Volumes { for _, volumeWithNodeFilters := range simpleConfig.Volumes {
nodes, err := util.FilterNodes(nodeList, volumeWithNodeFilters.NodeFilters) nodes, err := util.FilterNodes(nodeList, volumeWithNodeFilters.NodeFilters)
@ -167,27 +173,35 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
return nil, fmt.Errorf("Portmapping '%s' lacks a node filter, but there's more than one node", portWithNodeFilters.Port) return nil, fmt.Errorf("Portmapping '%s' lacks a node filter, but there's more than one node", portWithNodeFilters.Port)
} }
nodes, err := util.FilterNodes(nodeList, portWithNodeFilters.NodeFilters) x, err := util.FilterNodesWithSuffix(nodeList, portWithNodeFilters.NodeFilters)
if err != nil { if err != nil {
return nil, err return nil, err
} }
for _, node := range nodes { for suffix, nodes := range x {
portmappings, err := nat.ParsePortSpec(portWithNodeFilters.Port) portmappings, err := nat.ParsePortSpec(portWithNodeFilters.Port)
if err != nil { if err != nil {
return nil, fmt.Errorf("Failed to parse port spec '%s': %+v", portWithNodeFilters.Port, err) return nil, fmt.Errorf("error parsing port spec '%s': %+v", portWithNodeFilters.Port, err)
} }
if node.Ports == nil { if suffix == "proxy" || suffix == util.NodeFilterSuffixNone { // proxy is the default suffix for port mappings
node.Ports = nat.PortMap{} if newCluster.ServerLoadBalancer == nil {
} return nil, fmt.Errorf("port-mapping of type 'proxy' specified, but loadbalancer is disabled")
for _, pm := range portmappings { }
if _, exists := node.Ports[pm.Port]; exists { if err := addPortMappings(newCluster.ServerLoadBalancer.Node, portmappings); err != nil {
node.Ports[pm.Port] = append(node.Ports[pm.Port], pm.Binding) return nil, err
} else { }
node.Ports[pm.Port] = []nat.PortBinding{pm.Binding} for _, pm := range portmappings {
loadbalancerAddPortConfigs(newCluster.ServerLoadBalancer, pm, nodes)
}
} else if suffix == "direct" {
for _, node := range nodes {
if err := addPortMappings(node, portmappings); err != nil {
return nil, err
}
} }
} }
} }
} }
// -> K3S NODE LABELS // -> K3S NODE LABELS
@ -358,3 +372,44 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
return clusterConfig, nil return clusterConfig, nil
} }
func addPortMappings(node *k3d.Node, portmappings []nat.PortMapping) error {
if node.Ports == nil {
node.Ports = nat.PortMap{}
}
for _, pm := range portmappings {
if _, exists := node.Ports[pm.Port]; exists {
node.Ports[pm.Port] = append(node.Ports[pm.Port], pm.Binding)
} else {
node.Ports[pm.Port] = []nat.PortBinding{pm.Binding}
}
}
return nil
}
func loadbalancerAddPortConfigs(loadbalancer *k3d.Loadbalancer, pm nat.PortMapping, nodes []*k3d.Node) error {
portconfig := fmt.Sprintf("%s.%s", pm.Port.Port(), pm.Port.Proto())
nodenames := []string{}
for _, node := range nodes {
nodenames = append(nodenames, node.Name)
}
// entry for that port doesn't exist yet, so we simply create it with the list of node names
if _, ok := loadbalancer.Config.Ports[portconfig]; !ok {
loadbalancer.Config.Ports[portconfig] = nodenames
return nil
}
nodenameLoop:
for _, nodename := range nodenames {
for _, existingNames := range loadbalancer.Config.Ports[portconfig] {
if nodename == existingNames {
continue nodenameLoop
}
loadbalancer.Config.Ports[portconfig] = append(loadbalancer.Config.Ports[portconfig], nodename)
}
}
return nil
}

View File

@ -34,16 +34,17 @@ package types
*/ */
type Loadbalancer struct { type Loadbalancer struct {
Node Node `mapstructure:",squash" yaml:",inline"` // the underlying node Node *Node `mapstructure:",squash" yaml:",inline"` // the underlying node
Config LoadbalancerConfig `mapstructure:"config" yaml:"config"` // its configuration Config LoadbalancerConfig `mapstructure:"config" yaml:"config"` // its configuration
} }
func NewLoadbalancer() *Loadbalancer { func NewLoadbalancer() *Loadbalancer {
return &Loadbalancer{ return &Loadbalancer{
Node: Node{ Node: &Node{
Role: LoadBalancerRole, Role: LoadBalancerRole,
Image: GetLoadbalancerImage(), Image: GetLoadbalancerImage(),
}, },
Config: LoadbalancerConfig{Ports: map[string][]string{}},
} }
} }

View File

@ -262,7 +262,7 @@ type Cluster struct {
InitNode *Node // init server node InitNode *Node // init server node
ExternalDatastore *ExternalDatastore `yaml:"externalDatastore,omitempty" json:"externalDatastore,omitempty"` ExternalDatastore *ExternalDatastore `yaml:"externalDatastore,omitempty" json:"externalDatastore,omitempty"`
KubeAPI *ExposureOpts `yaml:"kubeAPI" json:"kubeAPI,omitempty"` KubeAPI *ExposureOpts `yaml:"kubeAPI" json:"kubeAPI,omitempty"`
ServerLoadBalancer *Node `yaml:"serverLoadbalancer,omitempty" json:"serverLoadBalancer,omitempty"` ServerLoadBalancer *Loadbalancer `yaml:"serverLoadbalancer,omitempty" json:"serverLoadBalancer,omitempty"`
ImageVolume string `yaml:"imageVolume" json:"imageVolume,omitempty"` ImageVolume string `yaml:"imageVolume" json:"imageVolume,omitempty"`
} }

View File

@ -31,26 +31,65 @@ import (
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
type NodeFilterSuffix string
const ( const (
NodeFilterSuffixNone NodeFilterSuffix = "none" NodeFilterSuffixNone = "nosuffix"
NodeFilterMapKeyAll = "all" NodeFilterMapKeyAll = "all"
) )
// Regexp pattern to match node filters // Regexp pattern to match node filters
var NodeFilterRegexp = regexp.MustCompile(`^(?P<group>server|servers|agent|agents|loadbalancer|all)(?P<subsetSpec>:(?P<subset>(?P<subsetList>(\d+,?)+)|(?P<subsetRange>\d*-\d*)|(?P<subsetWildcard>\*)))?(?P<suffix>:[[:alpha:]]+)?$`) var NodeFilterRegexp = regexp.MustCompile(`^(?P<group>server|servers|agent|agents|loadbalancer|all)(?P<subsetSpec>:(?P<subset>(?P<subsetList>(\d+,?)+)|(?P<subsetRange>\d*-\d*)|(?P<subsetWildcard>\*)))?(?P<suffix>:[[:alpha:]]+)?$`)
// FilterNodes takes a string filter to return a filtered list of nodes // FilterNodesBySuffix properly interprets NodeFilters with suffix
func FilterNodes(nodes []*k3d.Node, filters []string) (map[string][]*k3d.Node, error) { func FilterNodesWithSuffix(nodes []*k3d.Node, nodefilters []string) (map[string][]*k3d.Node, error) {
if len(nodefilters) == 0 || len(nodefilters[0]) == 0 {
return nil, fmt.Errorf("No nodefilters specified")
}
result := map[string][]*k3d.Node{ result := map[string][]*k3d.Node{
NodeFilterMapKeyAll: nodes, NodeFilterMapKeyAll: nodes,
} }
for _, nf := range nodefilters {
suffix := NodeFilterSuffixNone
// match regex with capturing groups
match := NodeFilterRegexp.FindStringSubmatch(nf)
if len(match) == 0 {
return nil, fmt.Errorf("Failed to parse node filters: invalid format or empty subset in '%s'", nf)
}
// map capturing group names to submatches
submatches := MapSubexpNames(NodeFilterRegexp.SubexpNames(), match)
// get suffix
if sf, ok := submatches["suffix"]; ok && sf != "" {
suffix = sf
}
result[suffix] = make([]*k3d.Node, 0) // init map for this suffix
filteredNodes, err := FilterNodes(nodes, []string{nf})
if err != nil {
return nil, err
}
log.Tracef("Adding %d nodes for suffix >%s< (filter: %s)", len(filteredNodes), suffix, nf)
result[suffix] = filteredNodes
}
return result, nil
}
// FilterNodes takes a string filter to return a filtered list of nodes
func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
log.Tracef("Filtering %d nodes by %s", len(nodes), filters)
if len(filters) == 0 || len(filters[0]) == 0 { if len(filters) == 0 || len(filters[0]) == 0 {
log.Warnln("No node filter specified") log.Warnln("No node filter specified")
return result, nil return nodes, nil
} }
// map roles to subsets // map roles to subsets
@ -58,7 +97,6 @@ func FilterNodes(nodes []*k3d.Node, filters []string) (map[string][]*k3d.Node, e
agentNodes := []*k3d.Node{} agentNodes := []*k3d.Node{}
var serverlb *k3d.Node var serverlb *k3d.Node
for _, node := range nodes { for _, node := range nodes {
log.Tracef("FilterNodes (%+v): Checking node role %s", filters, node.Role)
if node.Role == k3d.ServerRole { if node.Role == k3d.ServerRole {
serverNodes = append(serverNodes, node) serverNodes = append(serverNodes, node)
} else if node.Role == k3d.AgentRole { } else if node.Role == k3d.AgentRole {
@ -89,7 +127,7 @@ func FilterNodes(nodes []*k3d.Node, filters []string) (map[string][]*k3d.Node, e
if len(filters) > 1 { if len(filters) > 1 {
log.Warnf("Node filter 'all' set, but more were specified in '%+v'", filters) log.Warnf("Node filter 'all' set, but more were specified in '%+v'", filters)
} }
return result, nil return nodes, nil
} }
// Choose the group of nodes to operate on // Choose the group of nodes to operate on

View File

@ -41,11 +41,11 @@ options:
extraArgs: extraArgs:
- arg: --tls-san=127.0.0.1 - arg: --tls-san=127.0.0.1
nodeFilters: nodeFilters:
- server[*] - server:*
nodeLabels: nodeLabels:
- label: foo=bar - label: foo=bar
nodeFilters: nodeFilters:
- server[0] - server:0
- loadbalancer - loadbalancer
kubeconfig: kubeconfig:
updateDefaultKubeconfig: true updateDefaultKubeconfig: true
@ -54,5 +54,5 @@ options:
labels: labels:
- label: foo=bar - label: foo=bar
nodeFilters: nodeFilters:
- server[0] - server:0
- loadbalancer - loadbalancer

View File

@ -25,7 +25,7 @@ env:
labels: labels:
- label: foo=bar - label: foo=bar
nodeFilters: nodeFilters:
- server[0] - server:0
- loadbalancer - loadbalancer
registries: registries:
create: true create: true

View File

@ -41,7 +41,7 @@ options:
extraArgs: extraArgs:
- arg: --tls-san=127.0.0.1 - arg: --tls-san=127.0.0.1
nodeFilters: nodeFilters:
- server[*] - server:*
kubeconfig: kubeconfig:
updateDefaultKubeconfig: true updateDefaultKubeconfig: true
switchCurrentContext: true switchCurrentContext: true
@ -49,5 +49,5 @@ options:
labels: labels:
- label: foo=bar - label: foo=bar
nodeFilters: nodeFilters:
- server[0] - server:0
- loadbalancer - loadbalancer

View File

@ -9,7 +9,7 @@ source "$CURR_DIR/common.sh"
export CURRENT_STAGE="Test | basic" export CURRENT_STAGE="Test | basic"
info "Creating two clusters..." info "Creating two clusters..."
$EXE cluster create c1 --wait --timeout 60s --api-port 6443 --env 'TEST_VAR=user\@pass\\@server[0]' || failed "could not create cluster c1" $EXE cluster create c1 --wait --timeout 60s --api-port 6443 --env 'TEST_VAR=user\@pass\\@server:0' || failed "could not create cluster c1"
$EXE cluster create c2 --wait --timeout 60s || failed "could not create cluster c2" $EXE cluster create c2 --wait --timeout 60s || failed "could not create cluster c2"
info "Checking that we can get both clusters..." info "Checking that we can get both clusters..."

View File

@ -21,7 +21,7 @@ clustername="cfgoverridetest"
highlight "[START] Config With Override $EXTRA_TITLE" highlight "[START] Config With Override $EXTRA_TITLE"
info "Creating cluster $clustername..." info "Creating cluster $clustername..."
$EXE cluster create "$clustername" --config "$CURR_DIR/assets/config_test_simple.yaml" --servers 4 -v /tmp/test:/tmp/test@loadbalancer --registry-create=false --env "x=y@agent[1]" $EXTRA_FLAG || failed "could not create cluster $clustername $EXTRA_TITLE" $EXE cluster create "$clustername" --config "$CURR_DIR/assets/config_test_simple.yaml" --servers 4 -v /tmp/test:/tmp/test@loadbalancer --registry-create=false --env "x=y@agent:1" $EXTRA_FLAG || failed "could not create cluster $clustername $EXTRA_TITLE"
info "Sleeping for 5 seconds to give the cluster enough time to get ready..." info "Sleeping for 5 seconds to give the cluster enough time to get ready..."
sleep 5 sleep 5