changes when creating clusters + new nodefilter syntax
- generate node names when transforming from simple to cluster config - ClusterCreate(clusterconfig) should have a ready made config and not generate variables - ClusterCreate() only prep LB if not already present (to be removed) - cluster struct: serverloadbalancer is now of type LoadBalancer (Node + Config) - use new nodefilter syntax with 'id:index:suffix' instead of 'id[index]' everywhere - use suffix when creating the LB
This commit is contained in:
parent
9574002b38
commit
d15ed26875
@ -372,12 +372,12 @@ ClusterCreatOpts:
|
||||
clusterCreateOpts.GlobalLabels[k3d.LabelClusterName] = cluster.Name
|
||||
|
||||
// agent defaults (per cluster)
|
||||
// connection url is always the name of the first server node (index 0)
|
||||
connectionURL := fmt.Sprintf("https://%s:%s", generateNodeName(cluster.Name, k3d.ServerRole, 0), k3d.DefaultAPIPort)
|
||||
// connection url is always the name of the first server node (index 0) // TODO: change this to the server loadbalancer
|
||||
connectionURL := fmt.Sprintf("https://%s:%s", GenerateNodeName(cluster.Name, k3d.ServerRole, 0), k3d.DefaultAPIPort)
|
||||
clusterCreateOpts.GlobalLabels[k3d.LabelClusterURL] = connectionURL
|
||||
clusterCreateOpts.GlobalEnv = append(clusterCreateOpts.GlobalEnv, fmt.Sprintf("K3S_TOKEN=%s", cluster.Token))
|
||||
|
||||
nodeSetup := func(node *k3d.Node, suffix int) error {
|
||||
nodeSetup := func(node *k3d.Node) error {
|
||||
// cluster specific settings
|
||||
if node.RuntimeLabels == nil {
|
||||
node.RuntimeLabels = make(map[string]string) // TODO: maybe create an init function?
|
||||
@ -417,7 +417,6 @@ ClusterCreatOpts:
|
||||
node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", connectionURL))
|
||||
}
|
||||
|
||||
node.Name = generateNodeName(cluster.Name, node.Role, suffix)
|
||||
node.Networks = []string{cluster.Network.Name}
|
||||
node.Restart = true
|
||||
node.GPURequest = clusterCreateOpts.GPURequest
|
||||
@ -437,8 +436,6 @@ ClusterCreatOpts:
|
||||
|
||||
// used for node suffices
|
||||
serverCount := 0
|
||||
agentCount := 0
|
||||
suffix := 0
|
||||
|
||||
// create init node first
|
||||
if cluster.InitNode != nil {
|
||||
@ -457,7 +454,7 @@ ClusterCreatOpts:
|
||||
cluster.InitNode.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding}
|
||||
}
|
||||
|
||||
if err := nodeSetup(cluster.InitNode, serverCount); err != nil {
|
||||
if err := nodeSetup(cluster.InitNode); err != nil {
|
||||
return err
|
||||
}
|
||||
serverCount++
|
||||
@ -481,17 +478,11 @@ ClusterCreatOpts:
|
||||
|
||||
time.Sleep(1 * time.Second) // FIXME: arbitrary wait for one second to avoid race conditions of servers registering
|
||||
|
||||
// name suffix
|
||||
suffix = serverCount
|
||||
serverCount++
|
||||
|
||||
} else if node.Role == k3d.AgentRole {
|
||||
// name suffix
|
||||
suffix = agentCount
|
||||
agentCount++
|
||||
}
|
||||
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
|
||||
if err := nodeSetup(node, suffix); err != nil {
|
||||
if err := nodeSetup(node); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -499,7 +490,7 @@ ClusterCreatOpts:
|
||||
|
||||
// WARN, if there are exactly two server nodes: that means we're using etcd, but don't have fault tolerance
|
||||
if serverCount == 2 {
|
||||
log.Warnln("You're creating 2 server nodes: Please consider creating at least 3 to achieve quorum & fault tolerance")
|
||||
log.Warnln("You're creating 2 server nodes: Please consider creating at least 3 to achieve etcd quorum & fault tolerance")
|
||||
}
|
||||
|
||||
/*
|
||||
@ -507,19 +498,26 @@ ClusterCreatOpts:
|
||||
*/
|
||||
// *** ServerLoadBalancer ***
|
||||
if !clusterCreateOpts.DisableLoadBalancer {
|
||||
lbNode, err := LoadbalancerPrepare(ctx, runtime, cluster, &k3d.LoadbalancerCreateOpts{Labels: clusterCreateOpts.GlobalLabels})
|
||||
if err != nil {
|
||||
return err
|
||||
if cluster.ServerLoadBalancer == nil {
|
||||
lbNode, err := LoadbalancerPrepare(ctx, runtime, cluster, &k3d.LoadbalancerCreateOpts{Labels: clusterCreateOpts.GlobalLabels})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback
|
||||
}
|
||||
cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback
|
||||
|
||||
lbConfig, err := LoadbalancerGenerateConfig(cluster)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error generating loadbalancer config: %v", err)
|
||||
if len(cluster.ServerLoadBalancer.Config.Ports) == 0 {
|
||||
lbConfig, err := LoadbalancerGenerateConfig(cluster)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error generating loadbalancer config: %v", err)
|
||||
}
|
||||
cluster.ServerLoadBalancer.Config = lbConfig
|
||||
}
|
||||
|
||||
cluster.ServerLoadBalancer.Node.RuntimeLabels = clusterCreateOpts.GlobalLabels
|
||||
|
||||
// prepare to write config to lb container
|
||||
configyaml, err := yaml.Marshal(lbConfig)
|
||||
configyaml, err := yaml.Marshal(cluster.ServerLoadBalancer.Config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -534,13 +532,13 @@ ClusterCreatOpts:
|
||||
},
|
||||
}
|
||||
|
||||
lbNode.HookActions = append(lbNode.HookActions, writeLbConfigAction)
|
||||
cluster.ServerLoadBalancer.Node.HookActions = append(cluster.ServerLoadBalancer.Node.HookActions, writeLbConfigAction)
|
||||
|
||||
log.Infof("Creating LoadBalancer '%s'", lbNode.Name)
|
||||
if err := NodeCreate(ctx, runtime, lbNode, k3d.NodeCreateOpts{}); err != nil {
|
||||
log.Infof("Creating LoadBalancer '%s'", cluster.ServerLoadBalancer.Node.Name)
|
||||
if err := NodeCreate(ctx, runtime, cluster.ServerLoadBalancer.Node, k3d.NodeCreateOpts{}); err != nil {
|
||||
return fmt.Errorf("error creating loadbalancer: %v", err)
|
||||
}
|
||||
log.Debugf("Created loadbalancer '%s'", lbNode.Name)
|
||||
log.Debugf("Created loadbalancer '%s'", cluster.ServerLoadBalancer.Node.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -795,7 +793,7 @@ func GenerateClusterToken() string {
|
||||
return util.GenerateRandomString(20)
|
||||
}
|
||||
|
||||
func generateNodeName(cluster string, role k3d.Role, suffix int) string {
|
||||
func GenerateNodeName(cluster string, role k3d.Role, suffix int) string {
|
||||
return fmt.Sprintf("%s-%s-%s-%d", k3d.DefaultObjectNamePrefix, cluster, role, suffix)
|
||||
}
|
||||
|
||||
|
@ -77,18 +77,18 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu
|
||||
}
|
||||
log.Debugf("Writing lb config:\n%s", string(newLbConfigYaml))
|
||||
startTime := time.Now().Truncate(time.Second).UTC()
|
||||
if err := runtime.WriteToNode(ctx, newLbConfigYaml, k3d.DefaultLoadbalancerConfigPath, 0744, cluster.ServerLoadBalancer); err != nil {
|
||||
if err := runtime.WriteToNode(ctx, newLbConfigYaml, k3d.DefaultLoadbalancerConfigPath, 0744, cluster.ServerLoadBalancer.Node); err != nil {
|
||||
return fmt.Errorf("error writing new loadbalancer config to container: %w", err)
|
||||
}
|
||||
|
||||
successCtx, successCtxCancel := context.WithDeadline(ctx, time.Now().Add(5*time.Second))
|
||||
defer successCtxCancel()
|
||||
err = NodeWaitForLogMessage(successCtx, runtime, cluster.ServerLoadBalancer, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], startTime)
|
||||
err = NodeWaitForLogMessage(successCtx, runtime, cluster.ServerLoadBalancer.Node, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], startTime)
|
||||
if err != nil {
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
failureCtx, failureCtxCancel := context.WithDeadline(ctx, time.Now().Add(5*time.Second))
|
||||
defer failureCtxCancel()
|
||||
err = NodeWaitForLogMessage(failureCtx, runtime, cluster.ServerLoadBalancer, "host not found in upstream", startTime)
|
||||
err = NodeWaitForLogMessage(failureCtx, runtime, cluster.ServerLoadBalancer.Node, "host not found in upstream", startTime)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to check if the loadbalancer was configured correctly or if it broke. Please check it manually or try again: %v", err)
|
||||
return LBConfigErrFailedTest
|
||||
@ -101,7 +101,7 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu
|
||||
return LBConfigErrFailedTest
|
||||
}
|
||||
}
|
||||
log.Infof("Successfully configured loadbalancer %s!", cluster.ServerLoadBalancer.Name)
|
||||
log.Infof("Successfully configured loadbalancer %s!", cluster.ServerLoadBalancer.Node.Name)
|
||||
|
||||
time.Sleep(1 * time.Second) // waiting for a second, to avoid issues with too fast lb updates which would screw up the log waits
|
||||
|
||||
@ -116,7 +116,7 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste
|
||||
for _, node := range cluster.Nodes {
|
||||
if node.Role == types.LoadBalancerRole {
|
||||
var err error
|
||||
cluster.ServerLoadBalancer, err = NodeGet(ctx, runtime, node)
|
||||
cluster.ServerLoadBalancer.Node, err = NodeGet(ctx, runtime, node)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
@ -124,7 +124,7 @@ func GetLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluste
|
||||
}
|
||||
}
|
||||
|
||||
reader, err := runtime.ReadFromNode(ctx, types.DefaultLoadbalancerConfigPath, cluster.ServerLoadBalancer)
|
||||
reader, err := runtime.ReadFromNode(ctx, types.DefaultLoadbalancerConfigPath, cluster.ServerLoadBalancer.Node)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
@ -162,31 +162,36 @@ func LoadbalancerGenerateConfig(cluster *k3d.Cluster) (k3d.LoadbalancerConfig, e
|
||||
lbConfig.Ports[fmt.Sprintf("%s.tcp", k3d.DefaultAPIPort)] = servers
|
||||
|
||||
// generate comma-separated list of extra ports to forward // TODO: no default targets?
|
||||
for exposedPort := range cluster.ServerLoadBalancer.Ports {
|
||||
for exposedPort := range cluster.ServerLoadBalancer.Node.Ports {
|
||||
// TODO: catch duplicates here?
|
||||
lbConfig.Ports[fmt.Sprintf("%s.%s", exposedPort.Port(), exposedPort.Proto())] = servers
|
||||
}
|
||||
|
||||
// some additional nginx settings
|
||||
lbConfig.Settings.WorkerProcesses = k3d.DefaultLoadbalancerWorkerProcesses + len(cluster.ServerLoadBalancer.Ports)*len(servers)
|
||||
lbConfig.Settings.WorkerProcesses = k3d.DefaultLoadbalancerWorkerProcesses + len(cluster.ServerLoadBalancer.Node.Ports)*len(servers)
|
||||
|
||||
return lbConfig, nil
|
||||
}
|
||||
|
||||
func LoadbalancerPrepare(ctx context.Context, runtime runtimes.Runtime, cluster *types.Cluster, opts *k3d.LoadbalancerCreateOpts) (*k3d.Node, error) {
|
||||
labels := map[string]string{}
|
||||
|
||||
if cluster.ServerLoadBalancer.Ports == nil {
|
||||
cluster.ServerLoadBalancer.Ports = nat.PortMap{}
|
||||
if opts != nil && opts.Labels == nil && len(opts.Labels) == 0 {
|
||||
labels = opts.Labels
|
||||
}
|
||||
cluster.ServerLoadBalancer.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding}
|
||||
|
||||
if cluster.ServerLoadBalancer.Node.Ports == nil {
|
||||
cluster.ServerLoadBalancer.Node.Ports = nat.PortMap{}
|
||||
}
|
||||
cluster.ServerLoadBalancer.Node.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding}
|
||||
|
||||
// Create LB as a modified node with loadbalancerRole
|
||||
lbNode := &k3d.Node{
|
||||
Name: fmt.Sprintf("%s-%s-serverlb", k3d.DefaultObjectNamePrefix, cluster.Name),
|
||||
Image: k3d.GetLoadbalancerImage(),
|
||||
Ports: cluster.ServerLoadBalancer.Ports,
|
||||
Ports: cluster.ServerLoadBalancer.Node.Ports,
|
||||
Role: k3d.LoadBalancerRole,
|
||||
RuntimeLabels: opts.Labels, // TODO: createLoadBalancer: add more expressive labels
|
||||
RuntimeLabels: labels, // TODO: createLoadBalancer: add more expressive labels
|
||||
Networks: []string{cluster.Network.Name},
|
||||
Restart: true,
|
||||
}
|
||||
|
@ -684,7 +684,7 @@ func NodeEdit(ctx context.Context, runtime runtimes.Runtime, existingNode, chang
|
||||
if err != nil {
|
||||
return fmt.Errorf("error updating loadbalancer config: %w", err)
|
||||
}
|
||||
cluster.ServerLoadBalancer = result
|
||||
cluster.ServerLoadBalancer.Node = result
|
||||
lbConfig, err := LoadbalancerGenerateConfig(cluster)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error generating loadbalancer config: %v", err)
|
||||
|
@ -81,13 +81,13 @@ func TestReadSimpleConfig(t *testing.T) {
|
||||
ExtraArgs: []conf.K3sArgWithNodeFilters{
|
||||
{
|
||||
Arg: "--tls-san=127.0.0.1",
|
||||
NodeFilters: []string{"server[*]"},
|
||||
NodeFilters: []string{"server:*"},
|
||||
},
|
||||
},
|
||||
NodeLabels: []conf.LabelWithNodeFilters{
|
||||
{
|
||||
Label: "foo=bar",
|
||||
NodeFilters: []string{"server[0]", "loadbalancer"},
|
||||
NodeFilters: []string{"server:0", "loadbalancer"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -99,7 +99,7 @@ func TestReadSimpleConfig(t *testing.T) {
|
||||
Labels: []conf.LabelWithNodeFilters{
|
||||
{
|
||||
Label: "foo=bar",
|
||||
NodeFilters: []string{"server[0]", "loadbalancer"},
|
||||
NodeFilters: []string{"server:0", "loadbalancer"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -33,11 +33,11 @@ options:
|
||||
extraArgs:
|
||||
- arg: --tls-san=127.0.0.1
|
||||
nodeFilters:
|
||||
- "server[*]"
|
||||
- server:*
|
||||
nodeLabels:
|
||||
- label: foo=bar
|
||||
nodeFilters:
|
||||
- server[0]
|
||||
- server:0
|
||||
- loadbalancer
|
||||
kubeconfig:
|
||||
updateDefaultKubeconfig: true
|
||||
@ -46,5 +46,5 @@ options:
|
||||
labels:
|
||||
- label: foo=bar
|
||||
nodeFilters:
|
||||
- server[0]
|
||||
- server:0
|
||||
- loadbalancer
|
||||
|
@ -33,11 +33,11 @@ options:
|
||||
extraArgs:
|
||||
- arg: --tls-san=127.0.0.1
|
||||
nodeFilters:
|
||||
- "server[*]"
|
||||
- "server:*"
|
||||
nodeLabels:
|
||||
- label: foo=bar
|
||||
nodeFilters:
|
||||
- server[0]
|
||||
- server:0
|
||||
- loadbalancer
|
||||
kubeconfig:
|
||||
updateDefaultKubeconfig: true
|
||||
@ -46,5 +46,5 @@ options:
|
||||
labels:
|
||||
- label: foo=bar
|
||||
nodeFilters:
|
||||
- server[0]
|
||||
- server:0
|
||||
- loadbalancer
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
cliutil "github.com/rancher/k3d/v4/cmd/util" // TODO: move parseapiport to pkg
|
||||
"github.com/rancher/k3d/v4/pkg/client"
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha3"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
@ -102,8 +103,11 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
|
||||
newCluster.Nodes = []*k3d.Node{}
|
||||
|
||||
if !simpleConfig.Options.K3dOptions.DisableLoadbalancer {
|
||||
newCluster.ServerLoadBalancer = &k3d.Node{
|
||||
Role: k3d.LoadBalancerRole,
|
||||
newCluster.ServerLoadBalancer = k3d.NewLoadbalancer()
|
||||
var err error
|
||||
newCluster.ServerLoadBalancer.Node, err = client.LoadbalancerPrepare(ctx, runtime, &newCluster, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error preparing the loadbalancer: %w", err)
|
||||
}
|
||||
} else {
|
||||
log.Debugln("Disabling the load balancer")
|
||||
@ -115,6 +119,7 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
|
||||
|
||||
for i := 0; i < simpleConfig.Servers; i++ {
|
||||
serverNode := k3d.Node{
|
||||
Name: client.GenerateNodeName(newCluster.Name, k3d.ServerRole, i),
|
||||
Role: k3d.ServerRole,
|
||||
Image: simpleConfig.Image,
|
||||
ServerOpts: k3d.ServerOpts{},
|
||||
@ -132,6 +137,7 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
|
||||
|
||||
for i := 0; i < simpleConfig.Agents; i++ {
|
||||
agentNode := k3d.Node{
|
||||
Name: client.GenerateNodeName(newCluster.Name, k3d.AgentRole, i),
|
||||
Role: k3d.AgentRole,
|
||||
Image: simpleConfig.Image,
|
||||
Memory: simpleConfig.Options.Runtime.AgentsMemory,
|
||||
@ -148,7 +154,7 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
|
||||
nodeList := newCluster.Nodes
|
||||
if !simpleConfig.Options.K3dOptions.DisableLoadbalancer {
|
||||
nodeCount++
|
||||
nodeList = append(nodeList, newCluster.ServerLoadBalancer)
|
||||
nodeList = append(nodeList, newCluster.ServerLoadBalancer.Node)
|
||||
}
|
||||
for _, volumeWithNodeFilters := range simpleConfig.Volumes {
|
||||
nodes, err := util.FilterNodes(nodeList, volumeWithNodeFilters.NodeFilters)
|
||||
@ -167,27 +173,35 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
|
||||
return nil, fmt.Errorf("Portmapping '%s' lacks a node filter, but there's more than one node", portWithNodeFilters.Port)
|
||||
}
|
||||
|
||||
nodes, err := util.FilterNodes(nodeList, portWithNodeFilters.NodeFilters)
|
||||
x, err := util.FilterNodesWithSuffix(nodeList, portWithNodeFilters.NodeFilters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
for suffix, nodes := range x {
|
||||
portmappings, err := nat.ParsePortSpec(portWithNodeFilters.Port)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse port spec '%s': %+v", portWithNodeFilters.Port, err)
|
||||
return nil, fmt.Errorf("error parsing port spec '%s': %+v", portWithNodeFilters.Port, err)
|
||||
}
|
||||
if node.Ports == nil {
|
||||
node.Ports = nat.PortMap{}
|
||||
}
|
||||
for _, pm := range portmappings {
|
||||
if _, exists := node.Ports[pm.Port]; exists {
|
||||
node.Ports[pm.Port] = append(node.Ports[pm.Port], pm.Binding)
|
||||
} else {
|
||||
node.Ports[pm.Port] = []nat.PortBinding{pm.Binding}
|
||||
if suffix == "proxy" || suffix == util.NodeFilterSuffixNone { // proxy is the default suffix for port mappings
|
||||
if newCluster.ServerLoadBalancer == nil {
|
||||
return nil, fmt.Errorf("port-mapping of type 'proxy' specified, but loadbalancer is disabled")
|
||||
}
|
||||
if err := addPortMappings(newCluster.ServerLoadBalancer.Node, portmappings); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, pm := range portmappings {
|
||||
loadbalancerAddPortConfigs(newCluster.ServerLoadBalancer, pm, nodes)
|
||||
}
|
||||
} else if suffix == "direct" {
|
||||
for _, node := range nodes {
|
||||
if err := addPortMappings(node, portmappings); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// -> K3S NODE LABELS
|
||||
@ -358,3 +372,44 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
|
||||
|
||||
return clusterConfig, nil
|
||||
}
|
||||
|
||||
func addPortMappings(node *k3d.Node, portmappings []nat.PortMapping) error {
|
||||
|
||||
if node.Ports == nil {
|
||||
node.Ports = nat.PortMap{}
|
||||
}
|
||||
for _, pm := range portmappings {
|
||||
if _, exists := node.Ports[pm.Port]; exists {
|
||||
node.Ports[pm.Port] = append(node.Ports[pm.Port], pm.Binding)
|
||||
} else {
|
||||
node.Ports[pm.Port] = []nat.PortBinding{pm.Binding}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadbalancerAddPortConfigs(loadbalancer *k3d.Loadbalancer, pm nat.PortMapping, nodes []*k3d.Node) error {
|
||||
portconfig := fmt.Sprintf("%s.%s", pm.Port.Port(), pm.Port.Proto())
|
||||
nodenames := []string{}
|
||||
for _, node := range nodes {
|
||||
nodenames = append(nodenames, node.Name)
|
||||
}
|
||||
|
||||
// entry for that port doesn't exist yet, so we simply create it with the list of node names
|
||||
if _, ok := loadbalancer.Config.Ports[portconfig]; !ok {
|
||||
loadbalancer.Config.Ports[portconfig] = nodenames
|
||||
return nil
|
||||
}
|
||||
|
||||
nodenameLoop:
|
||||
for _, nodename := range nodenames {
|
||||
for _, existingNames := range loadbalancer.Config.Ports[portconfig] {
|
||||
if nodename == existingNames {
|
||||
continue nodenameLoop
|
||||
}
|
||||
loadbalancer.Config.Ports[portconfig] = append(loadbalancer.Config.Ports[portconfig], nodename)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -34,16 +34,17 @@ package types
|
||||
*/
|
||||
|
||||
type Loadbalancer struct {
|
||||
Node Node `mapstructure:",squash" yaml:",inline"` // the underlying node
|
||||
Node *Node `mapstructure:",squash" yaml:",inline"` // the underlying node
|
||||
Config LoadbalancerConfig `mapstructure:"config" yaml:"config"` // its configuration
|
||||
}
|
||||
|
||||
func NewLoadbalancer() *Loadbalancer {
|
||||
return &Loadbalancer{
|
||||
Node: Node{
|
||||
Node: &Node{
|
||||
Role: LoadBalancerRole,
|
||||
Image: GetLoadbalancerImage(),
|
||||
},
|
||||
Config: LoadbalancerConfig{Ports: map[string][]string{}},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -262,7 +262,7 @@ type Cluster struct {
|
||||
InitNode *Node // init server node
|
||||
ExternalDatastore *ExternalDatastore `yaml:"externalDatastore,omitempty" json:"externalDatastore,omitempty"`
|
||||
KubeAPI *ExposureOpts `yaml:"kubeAPI" json:"kubeAPI,omitempty"`
|
||||
ServerLoadBalancer *Node `yaml:"serverLoadbalancer,omitempty" json:"serverLoadBalancer,omitempty"`
|
||||
ServerLoadBalancer *Loadbalancer `yaml:"serverLoadbalancer,omitempty" json:"serverLoadBalancer,omitempty"`
|
||||
ImageVolume string `yaml:"imageVolume" json:"imageVolume,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -31,26 +31,65 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type NodeFilterSuffix string
|
||||
|
||||
const (
|
||||
NodeFilterSuffixNone NodeFilterSuffix = "none"
|
||||
NodeFilterMapKeyAll = "all"
|
||||
NodeFilterSuffixNone = "nosuffix"
|
||||
NodeFilterMapKeyAll = "all"
|
||||
)
|
||||
|
||||
// Regexp pattern to match node filters
|
||||
var NodeFilterRegexp = regexp.MustCompile(`^(?P<group>server|servers|agent|agents|loadbalancer|all)(?P<subsetSpec>:(?P<subset>(?P<subsetList>(\d+,?)+)|(?P<subsetRange>\d*-\d*)|(?P<subsetWildcard>\*)))?(?P<suffix>:[[:alpha:]]+)?$`)
|
||||
|
||||
// FilterNodes takes a string filter to return a filtered list of nodes
|
||||
func FilterNodes(nodes []*k3d.Node, filters []string) (map[string][]*k3d.Node, error) {
|
||||
// FilterNodesBySuffix properly interprets NodeFilters with suffix
|
||||
func FilterNodesWithSuffix(nodes []*k3d.Node, nodefilters []string) (map[string][]*k3d.Node, error) {
|
||||
if len(nodefilters) == 0 || len(nodefilters[0]) == 0 {
|
||||
return nil, fmt.Errorf("No nodefilters specified")
|
||||
}
|
||||
|
||||
result := map[string][]*k3d.Node{
|
||||
NodeFilterMapKeyAll: nodes,
|
||||
}
|
||||
|
||||
for _, nf := range nodefilters {
|
||||
suffix := NodeFilterSuffixNone
|
||||
|
||||
// match regex with capturing groups
|
||||
match := NodeFilterRegexp.FindStringSubmatch(nf)
|
||||
|
||||
if len(match) == 0 {
|
||||
return nil, fmt.Errorf("Failed to parse node filters: invalid format or empty subset in '%s'", nf)
|
||||
}
|
||||
|
||||
// map capturing group names to submatches
|
||||
submatches := MapSubexpNames(NodeFilterRegexp.SubexpNames(), match)
|
||||
|
||||
// get suffix
|
||||
if sf, ok := submatches["suffix"]; ok && sf != "" {
|
||||
suffix = sf
|
||||
}
|
||||
|
||||
result[suffix] = make([]*k3d.Node, 0) // init map for this suffix
|
||||
|
||||
filteredNodes, err := FilterNodes(nodes, []string{nf})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Tracef("Adding %d nodes for suffix >%s< (filter: %s)", len(filteredNodes), suffix, nf)
|
||||
|
||||
result[suffix] = filteredNodes
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// FilterNodes takes a string filter to return a filtered list of nodes
|
||||
func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
|
||||
|
||||
log.Tracef("Filtering %d nodes by %s", len(nodes), filters)
|
||||
|
||||
if len(filters) == 0 || len(filters[0]) == 0 {
|
||||
log.Warnln("No node filter specified")
|
||||
return result, nil
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// map roles to subsets
|
||||
@ -58,7 +97,6 @@ func FilterNodes(nodes []*k3d.Node, filters []string) (map[string][]*k3d.Node, e
|
||||
agentNodes := []*k3d.Node{}
|
||||
var serverlb *k3d.Node
|
||||
for _, node := range nodes {
|
||||
log.Tracef("FilterNodes (%+v): Checking node role %s", filters, node.Role)
|
||||
if node.Role == k3d.ServerRole {
|
||||
serverNodes = append(serverNodes, node)
|
||||
} else if node.Role == k3d.AgentRole {
|
||||
@ -89,7 +127,7 @@ func FilterNodes(nodes []*k3d.Node, filters []string) (map[string][]*k3d.Node, e
|
||||
if len(filters) > 1 {
|
||||
log.Warnf("Node filter 'all' set, but more were specified in '%+v'", filters)
|
||||
}
|
||||
return result, nil
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// Choose the group of nodes to operate on
|
||||
|
@ -41,11 +41,11 @@ options:
|
||||
extraArgs:
|
||||
- arg: --tls-san=127.0.0.1
|
||||
nodeFilters:
|
||||
- server[*]
|
||||
- server:*
|
||||
nodeLabels:
|
||||
- label: foo=bar
|
||||
nodeFilters:
|
||||
- server[0]
|
||||
- server:0
|
||||
- loadbalancer
|
||||
kubeconfig:
|
||||
updateDefaultKubeconfig: true
|
||||
@ -54,5 +54,5 @@ options:
|
||||
labels:
|
||||
- label: foo=bar
|
||||
nodeFilters:
|
||||
- server[0]
|
||||
- server:0
|
||||
- loadbalancer
|
||||
|
@ -25,7 +25,7 @@ env:
|
||||
labels:
|
||||
- label: foo=bar
|
||||
nodeFilters:
|
||||
- server[0]
|
||||
- server:0
|
||||
- loadbalancer
|
||||
registries:
|
||||
create: true
|
||||
|
@ -41,7 +41,7 @@ options:
|
||||
extraArgs:
|
||||
- arg: --tls-san=127.0.0.1
|
||||
nodeFilters:
|
||||
- server[*]
|
||||
- server:*
|
||||
kubeconfig:
|
||||
updateDefaultKubeconfig: true
|
||||
switchCurrentContext: true
|
||||
@ -49,5 +49,5 @@ options:
|
||||
labels:
|
||||
- label: foo=bar
|
||||
nodeFilters:
|
||||
- server[0]
|
||||
- server:0
|
||||
- loadbalancer
|
||||
|
@ -9,7 +9,7 @@ source "$CURR_DIR/common.sh"
|
||||
export CURRENT_STAGE="Test | basic"
|
||||
|
||||
info "Creating two clusters..."
|
||||
$EXE cluster create c1 --wait --timeout 60s --api-port 6443 --env 'TEST_VAR=user\@pass\\@server[0]' || failed "could not create cluster c1"
|
||||
$EXE cluster create c1 --wait --timeout 60s --api-port 6443 --env 'TEST_VAR=user\@pass\\@server:0' || failed "could not create cluster c1"
|
||||
$EXE cluster create c2 --wait --timeout 60s || failed "could not create cluster c2"
|
||||
|
||||
info "Checking that we can get both clusters..."
|
||||
|
@ -21,7 +21,7 @@ clustername="cfgoverridetest"
|
||||
highlight "[START] Config With Override $EXTRA_TITLE"
|
||||
|
||||
info "Creating cluster $clustername..."
|
||||
$EXE cluster create "$clustername" --config "$CURR_DIR/assets/config_test_simple.yaml" --servers 4 -v /tmp/test:/tmp/test@loadbalancer --registry-create=false --env "x=y@agent[1]" $EXTRA_FLAG || failed "could not create cluster $clustername $EXTRA_TITLE"
|
||||
$EXE cluster create "$clustername" --config "$CURR_DIR/assets/config_test_simple.yaml" --servers 4 -v /tmp/test:/tmp/test@loadbalancer --registry-create=false --env "x=y@agent:1" $EXTRA_FLAG || failed "could not create cluster $clustername $EXTRA_TITLE"
|
||||
|
||||
info "Sleeping for 5 seconds to give the cluster enough time to get ready..."
|
||||
sleep 5
|
||||
|
Loading…
Reference in New Issue
Block a user