[ENHANCEMENT] Add config processing and fix hostmode (#477, @konradmalik)
This commit is contained in:
parent
601bef45d7
commit
df9859eae6
@ -134,7 +134,7 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
log.Debugf("========== Merged Simple Config ==========\n%+v\n==========================\n", cfg)
|
||||
|
||||
/**************************************
|
||||
* Transform & Validate Configuration *
|
||||
* Transform, Process & Validate Configuration *
|
||||
**************************************/
|
||||
|
||||
// Set the name
|
||||
@ -146,7 +146,14 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
log.Debugf("===== Cluster Config =====\n%+v\n===== ===== =====\n", clusterConfig)
|
||||
log.Debugf("===== Merged Cluster Config =====\n%+v\n===== ===== =====\n", clusterConfig)
|
||||
|
||||
clusterConfig, err = config.ProcessClusterConfig(*clusterConfig)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
log.Debugf("===== Processed Cluster Config =====\n%+v\n===== ===== =====\n", clusterConfig)
|
||||
|
||||
if err := config.ValidateClusterConfig(cmd.Context(), runtimes.SelectedRuntime, *clusterConfig); err != nil {
|
||||
log.Fatalln("Failed Cluster Configuration Validation: ", err)
|
||||
}
|
||||
|
@ -93,11 +93,7 @@ func ClusterRun(ctx context.Context, runtime k3drt.Runtime, clusterConfig *confi
|
||||
|
||||
// add /etc/hosts and CoreDNS entry for host.k3d.internal, referring to the host system
|
||||
if !clusterConfig.ClusterCreateOpts.PrepDisableHostIPInjection {
|
||||
if clusterConfig.Cluster.Network.Name != "host" {
|
||||
prepInjectHostIP(ctx, runtime, &clusterConfig.Cluster)
|
||||
} else {
|
||||
log.Infoln("Hostnetwork selected -> Skipping injection of docker host into the cluster")
|
||||
}
|
||||
prepInjectHostIP(ctx, runtime, &clusterConfig.Cluster)
|
||||
}
|
||||
|
||||
// create the registry hosting configmap
|
||||
@ -492,55 +488,51 @@ ClusterCreatOpts:
|
||||
*/
|
||||
// *** ServerLoadBalancer ***
|
||||
if !clusterCreateOpts.DisableLoadBalancer {
|
||||
if cluster.Network.Name != "host" { // serverlb not supported in hostnetwork mode due to port collisions with server node
|
||||
// Generate a comma-separated list of server/server names to pass to the LB container
|
||||
servers := ""
|
||||
for _, node := range cluster.Nodes {
|
||||
if node.Role == k3d.ServerRole {
|
||||
if servers == "" {
|
||||
servers = node.Name
|
||||
} else {
|
||||
servers = fmt.Sprintf("%s,%s", servers, node.Name)
|
||||
}
|
||||
// Generate a comma-separated list of server/server names to pass to the LB container
|
||||
servers := ""
|
||||
for _, node := range cluster.Nodes {
|
||||
if node.Role == k3d.ServerRole {
|
||||
if servers == "" {
|
||||
servers = node.Name
|
||||
} else {
|
||||
servers = fmt.Sprintf("%s,%s", servers, node.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// generate comma-separated list of extra ports to forward
|
||||
ports := k3d.DefaultAPIPort
|
||||
for exposedPort := range cluster.ServerLoadBalancer.Ports {
|
||||
ports += "," + exposedPort.Port()
|
||||
}
|
||||
|
||||
if cluster.ServerLoadBalancer.Ports == nil {
|
||||
cluster.ServerLoadBalancer.Ports = nat.PortMap{}
|
||||
}
|
||||
cluster.ServerLoadBalancer.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding}
|
||||
|
||||
// Create LB as a modified node with loadbalancerRole
|
||||
lbNode := &k3d.Node{
|
||||
Name: fmt.Sprintf("%s-%s-serverlb", k3d.DefaultObjectNamePrefix, cluster.Name),
|
||||
Image: fmt.Sprintf("%s:%s", k3d.DefaultLBImageRepo, version.GetHelperImageVersion()),
|
||||
Ports: cluster.ServerLoadBalancer.Ports,
|
||||
Env: []string{
|
||||
fmt.Sprintf("SERVERS=%s", servers),
|
||||
fmt.Sprintf("PORTS=%s", ports),
|
||||
fmt.Sprintf("WORKER_PROCESSES=%d", len(strings.Split(ports, ","))),
|
||||
},
|
||||
Role: k3d.LoadBalancerRole,
|
||||
Labels: clusterCreateOpts.GlobalLabels, // TODO: createLoadBalancer: add more expressive labels
|
||||
Networks: []string{cluster.Network.Name},
|
||||
Restart: true,
|
||||
}
|
||||
cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback
|
||||
log.Infof("Creating LoadBalancer '%s'", lbNode.Name)
|
||||
if err := NodeCreate(clusterCreateCtx, runtime, lbNode, k3d.NodeCreateOpts{}); err != nil {
|
||||
log.Errorln("Failed to create loadbalancer")
|
||||
return err
|
||||
}
|
||||
log.Debugf("Created loadbalancer '%s'", lbNode.Name)
|
||||
} else {
|
||||
log.Infoln("Hostnetwork selected -> Skipping creation of server LoadBalancer")
|
||||
}
|
||||
|
||||
// generate comma-separated list of extra ports to forward
|
||||
ports := k3d.DefaultAPIPort
|
||||
for exposedPort := range cluster.ServerLoadBalancer.Ports {
|
||||
ports += "," + exposedPort.Port()
|
||||
}
|
||||
|
||||
if cluster.ServerLoadBalancer.Ports == nil {
|
||||
cluster.ServerLoadBalancer.Ports = nat.PortMap{}
|
||||
}
|
||||
cluster.ServerLoadBalancer.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding}
|
||||
|
||||
// Create LB as a modified node with loadbalancerRole
|
||||
lbNode := &k3d.Node{
|
||||
Name: fmt.Sprintf("%s-%s-serverlb", k3d.DefaultObjectNamePrefix, cluster.Name),
|
||||
Image: fmt.Sprintf("%s:%s", k3d.DefaultLBImageRepo, version.GetHelperImageVersion()),
|
||||
Ports: cluster.ServerLoadBalancer.Ports,
|
||||
Env: []string{
|
||||
fmt.Sprintf("SERVERS=%s", servers),
|
||||
fmt.Sprintf("PORTS=%s", ports),
|
||||
fmt.Sprintf("WORKER_PROCESSES=%d", len(strings.Split(ports, ","))),
|
||||
},
|
||||
Role: k3d.LoadBalancerRole,
|
||||
Labels: clusterCreateOpts.GlobalLabels, // TODO: createLoadBalancer: add more expressive labels
|
||||
Networks: []string{cluster.Network.Name},
|
||||
Restart: true,
|
||||
}
|
||||
cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback
|
||||
log.Infof("Creating LoadBalancer '%s'", lbNode.Name)
|
||||
if err := NodeCreate(clusterCreateCtx, runtime, lbNode, k3d.NodeCreateOpts{}); err != nil {
|
||||
log.Errorln("Failed to create loadbalancer")
|
||||
return err
|
||||
}
|
||||
log.Debugf("Created loadbalancer '%s'", lbNode.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
50
pkg/config/process.go
Normal file
50
pkg/config/process.go
Normal file
@ -0,0 +1,50 @@
|
||||
/*
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ProcessClusterConfig applies processing to the config sanitizing it and doing
|
||||
// some final modifications
|
||||
func ProcessClusterConfig(clusterConfig conf.ClusterConfig) (*conf.ClusterConfig, error) {
|
||||
cluster := clusterConfig.Cluster
|
||||
if cluster.Network.Name == "host" {
|
||||
log.Infoln("Hostnetwork selected - disabling injection of docker host into the cluster, server load balancer and setting the api port to the k3s default")
|
||||
// if network is set to host, exposed api port must be the one imposed by k3s
|
||||
k3sPort := cluster.KubeAPI.Port.Port()
|
||||
log.Debugf("Host network was chosen, changing provided/random api port to k3s:%s", k3sPort)
|
||||
cluster.KubeAPI.PortMapping.Binding.HostPort = k3sPort
|
||||
|
||||
// if network is host, dont inject docker host into the cluster
|
||||
clusterConfig.ClusterCreateOpts.PrepDisableHostIPInjection = true
|
||||
|
||||
// if network is host, disable load balancer
|
||||
// serverlb not supported in hostnetwork mode due to port collisions with server node
|
||||
clusterConfig.ClusterCreateOpts.DisableLoadBalancer = true
|
||||
}
|
||||
|
||||
return &clusterConfig, nil
|
||||
}
|
70
pkg/config/process_test.go
Normal file
70
pkg/config/process_test.go
Normal file
@ -0,0 +1,70 @@
|
||||
/*
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/spf13/viper"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func TestProcessClusterConfig(t *testing.T) {
|
||||
cfgFile := "./test_assets/config_test_simple.yaml"
|
||||
|
||||
vip := viper.New()
|
||||
vip.SetConfigFile(cfgFile)
|
||||
_ = vip.ReadInConfig()
|
||||
|
||||
cfg, err := FromViperSimple(vip)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
t.Logf("\n========== Read Config and transform to cluster ==========\n%+v\n=================================\n", cfg)
|
||||
|
||||
clusterCfg, err := TransformSimpleToClusterConfig(context.Background(), runtimes.Docker, cfg)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
t.Logf("\n========== Process Cluster Config (non-host network) ==========\n%+v\n=================================\n", cfg)
|
||||
|
||||
clusterCfg, err = ProcessClusterConfig(*clusterCfg)
|
||||
assert.Assert(t, clusterCfg.ClusterCreateOpts.DisableLoadBalancer == false, "The load balancer should be enabled")
|
||||
assert.Assert(t, clusterCfg.ClusterCreateOpts.PrepDisableHostIPInjection == false, "The host ip injection should be enabled")
|
||||
|
||||
t.Logf("\n===== Resulting Cluster Config (non-host network) =====\n%+v\n===============\n", clusterCfg)
|
||||
|
||||
t.Logf("\n========== Process Cluster Config (host network) ==========\n%+v\n=================================\n", cfg)
|
||||
|
||||
clusterCfg.Cluster.Network.Name = "host"
|
||||
clusterCfg, err = ProcessClusterConfig(*clusterCfg)
|
||||
assert.Assert(t, clusterCfg.ClusterCreateOpts.DisableLoadBalancer == true, "The load balancer should be disabled")
|
||||
assert.Assert(t, clusterCfg.ClusterCreateOpts.PrepDisableHostIPInjection == true, "The host ip injection should be disabled")
|
||||
|
||||
t.Logf("\n===== Resulting Cluster Config (host network) =====\n%+v\n===============\n", clusterCfg)
|
||||
|
||||
}
|
Loading…
Reference in New Issue
Block a user