fix usage of the new loadbalancer type and ordering when cluster is created
This commit is contained in:
parent
31a1ac1d70
commit
6e8b27f99f
@ -112,7 +112,8 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste
|
||||
|
||||
var cfg k3d.LoadbalancerConfig
|
||||
|
||||
if cluster.ServerLoadBalancer == nil {
|
||||
if cluster.ServerLoadBalancer == nil || cluster.ServerLoadBalancer.Node == nil {
|
||||
cluster.ServerLoadBalancer = &k3d.Loadbalancer{}
|
||||
for _, node := range cluster.Nodes {
|
||||
if node.Role == types.LoadBalancerRole {
|
||||
var err error
|
||||
@ -185,6 +186,12 @@ func LoadbalancerPrepare(ctx context.Context, runtime runtimes.Runtime, cluster
|
||||
}
|
||||
cluster.ServerLoadBalancer.Node.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding}
|
||||
|
||||
if cluster.ServerLoadBalancer.Config == nil {
|
||||
cluster.ServerLoadBalancer.Config = &k3d.LoadbalancerConfig{
|
||||
Ports: map[string][]string{},
|
||||
}
|
||||
}
|
||||
|
||||
// Create LB as a modified node with loadbalancerRole
|
||||
lbNode := &k3d.Node{
|
||||
Name: fmt.Sprintf("%s-%s-serverlb", k3d.DefaultObjectNamePrefix, cluster.Name),
|
||||
|
@ -61,6 +61,9 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
|
||||
if simpleConfig.Network != "" {
|
||||
clusterNetwork.Name = simpleConfig.Network
|
||||
clusterNetwork.External = true
|
||||
} else {
|
||||
clusterNetwork.Name = fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, simpleConfig.Name)
|
||||
clusterNetwork.External = false
|
||||
}
|
||||
|
||||
if simpleConfig.Subnet != "" {
|
||||
@ -109,6 +112,7 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error preparing the loadbalancer: %w", err)
|
||||
}
|
||||
newCluster.Nodes = append(newCluster.Nodes, newCluster.ServerLoadBalancer.Node)
|
||||
} else {
|
||||
log.Debugln("Disabling the load balancer")
|
||||
}
|
||||
@ -133,6 +137,8 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
|
||||
}
|
||||
|
||||
newCluster.Nodes = append(newCluster.Nodes, &serverNode)
|
||||
|
||||
newCluster.ServerLoadBalancer.Config.Ports[fmt.Sprintf("%s.tcp", k3d.DefaultAPIPort)] = append(newCluster.ServerLoadBalancer.Config.Ports[fmt.Sprintf("%s.tcp", k3d.DefaultAPIPort)], serverNode.Name)
|
||||
}
|
||||
|
||||
for i := 0; i < simpleConfig.Agents; i++ {
|
||||
@ -150,12 +156,8 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
|
||||
****************************/
|
||||
|
||||
// -> VOLUMES
|
||||
nodeCount := simpleConfig.Servers + simpleConfig.Agents
|
||||
nodeCount := len(newCluster.Nodes)
|
||||
nodeList := newCluster.Nodes
|
||||
if !simpleConfig.Options.K3dOptions.DisableLoadbalancer {
|
||||
nodeCount++
|
||||
nodeList = append(nodeList, newCluster.ServerLoadBalancer.Node)
|
||||
}
|
||||
for _, volumeWithNodeFilters := range simpleConfig.Volumes {
|
||||
nodes, err := util.FilterNodes(nodeList, volumeWithNodeFilters.NodeFilters)
|
||||
if err != nil {
|
||||
@ -191,7 +193,9 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
|
||||
return nil, err
|
||||
}
|
||||
for _, pm := range portmappings {
|
||||
loadbalancerAddPortConfigs(newCluster.ServerLoadBalancer, pm, nodes)
|
||||
if err := loadbalancerAddPortConfigs(newCluster.ServerLoadBalancer, pm, nodes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else if suffix == "direct" {
|
||||
for _, node := range nodes {
|
||||
|
@ -145,8 +145,7 @@ func TranslateNodeToContainer(node *k3d.Node) (*NodeInDocker, error) {
|
||||
if len(node.Networks) > 0 {
|
||||
netInfo, err := GetNetwork(context.Background(), node.Networks[0]) // FIXME: only considering first network here, as that's the one k3d creates for a cluster
|
||||
if err != nil {
|
||||
log.Warnln("Failed to get network information")
|
||||
log.Warnln(err)
|
||||
log.Warnf("Failed to get network information: %v", err)
|
||||
} else if netInfo.Driver == "host" {
|
||||
hostConfig.NetworkMode = "host"
|
||||
}
|
||||
|
@ -34,8 +34,8 @@ package types
|
||||
*/
|
||||
|
||||
type Loadbalancer struct {
|
||||
Node *Node `mapstructure:",squash" yaml:",inline"` // the underlying node
|
||||
Config LoadbalancerConfig `mapstructure:"config" yaml:"config"` // its configuration
|
||||
Node *Node `mapstructure:",squash" yaml:",inline"` // the underlying node
|
||||
Config *LoadbalancerConfig `mapstructure:"config" yaml:"config"` // its configuration
|
||||
}
|
||||
|
||||
func NewLoadbalancer() *Loadbalancer {
|
||||
@ -44,7 +44,12 @@ func NewLoadbalancer() *Loadbalancer {
|
||||
Role: LoadBalancerRole,
|
||||
Image: GetLoadbalancerImage(),
|
||||
},
|
||||
Config: LoadbalancerConfig{Ports: map[string][]string{}},
|
||||
Config: &LoadbalancerConfig{
|
||||
Ports: map[string][]string{},
|
||||
Settings: LoadBalancerSettings{
|
||||
WorkerProcesses: DefaultLoadbalancerWorkerProcesses,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -37,7 +37,7 @@ const (
|
||||
)
|
||||
|
||||
// Regexp pattern to match node filters
|
||||
var NodeFilterRegexp = regexp.MustCompile(`^(?P<group>server|servers|agent|agents|loadbalancer|all)(?P<subsetSpec>:(?P<subset>(?P<subsetList>(\d+,?)+)|(?P<subsetRange>\d*-\d*)|(?P<subsetWildcard>\*)))?(?P<suffix>:[[:alpha:]]+)?$`)
|
||||
var NodeFilterRegexp = regexp.MustCompile(`^(?P<group>server|servers|agent|agents|loadbalancer|all)(?P<subsetSpec>:(?P<subset>(?P<subsetList>(\d+,?)+)|(?P<subsetRange>\d*-\d*)|(?P<subsetWildcard>\*)))?(?P<suffixSpec>:(?P<suffix>[[:alpha:]]+))?$`)
|
||||
|
||||
// FilterNodesBySuffix properly interprets NodeFilters with suffix
|
||||
func FilterNodesWithSuffix(nodes []*k3d.Node, nodefilters []string) (map[string][]*k3d.Node, error) {
|
||||
|
Loading…
Reference in New Issue
Block a user