Merge pull request #218 from rancher/feature/master-loadbalancer

[v3/Feature] Add cluster-loadbalancer
This commit is contained in:
Thorsten Klein 2020-04-15 07:34:39 +02:00 committed by GitHub
commit d8eb206e44
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 138 additions and 121 deletions

View File

@ -72,7 +72,7 @@ func NewCmdCreateCluster() *cobra.Command {
/*********
* Flags *
*********/
cmd.Flags().StringArrayP("api-port", "a", []string{"6443"}, "Specify the Kubernetes API server port (Format: `--api-port [HOST:]HOSTPORT[@NODEFILTER]`\n - Example: `k3d create -m 3 -a 0.0.0.0:6550@master[0] -a 0.0.0.0:6551@master[1]` ")
cmd.Flags().StringP("api-port", "a", k3d.DefaultAPIPort, "Specify the Kubernetes API server port (Format: `--api-port [HOST:]HOSTPORT`\n - Example: `k3d create -m 3 -a 0.0.0.0:6550` ")
cmd.Flags().IntP("masters", "m", 1, "Specify how many masters you want to create")
cmd.Flags().IntP("workers", "w", 0, "Specify how many workers you want to create")
cmd.Flags().String("image", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), "Specify k3s image that you want to use for the nodes")
@ -86,10 +86,6 @@ func NewCmdCreateCluster() *cobra.Command {
cmd.Flags().BoolVar(&createClusterOpts.DisableImageVolume, "no-image-volume", false, "Disable the creation of a volume for importing images")
/* Multi Master Configuration */
// multi-master - general
// TODO: implement load-balancer/proxy for multi-master setups
cmd.Flags().BoolVar(&createClusterOpts.DisableLoadbalancer, "no-lb", false, "[WIP] Disable automatic deployment of a load balancer in Multi-Master setups")
cmd.Flags().String("lb-port", "0.0.0.0:6443", "[WIP] Specify port to be exposed by the master load balancer (Format: `[HOST:]HOSTPORT)")
// multi-master - datastore
// TODO: implement multi-master setups with external data store
@ -177,65 +173,22 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
}
// --api-port
apiPortFlags, err := cmd.Flags().GetStringArray("api-port")
apiPort, err := cmd.Flags().GetString("api-port")
if err != nil {
log.Fatalln(err)
}
// error out if we have more api-ports than masters specified
if len(apiPortFlags) > masterCount {
log.Fatalf("Cannot expose more api-ports than master nodes exist (%d > %d)", len(apiPortFlags), masterCount)
}
ipPortCombinations := map[string]struct{}{} // only for finding duplicates
apiPortFilters := map[string]struct{}{} // only for deduplication
exposeAPIToFiltersMap := map[k3d.ExposeAPI][]string{}
for _, apiPortFlag := range apiPortFlags {
// split the flag value from the node filter
apiPortString, filters, err := cliutil.SplitFiltersFromFlag(apiPortFlag)
if err != nil {
log.Fatalln(err)
}
// if there's only one master node, we don't need a node filter, but if there's more than one, we need exactly one node filter per api-port flag
if len(filters) > 1 || (len(filters) == 0 && masterCount > 1) {
log.Fatalf("Exactly one node filter required per '--api-port' flag, but got %d on flag %s", len(filters), apiPortFlag)
}
// add default, if no filter was set and we only have a single master node
if len(filters) == 0 && masterCount == 1 {
filters = []string{"master[0]"}
}
// only one api-port mapping allowed per master node
if _, exists := apiPortFilters[filters[0]]; exists {
log.Fatalf("Cannot assign multiple api-port mappings to the same node: duplicate '%s'", filters[0])
}
apiPortFilters[filters[0]] = struct{}{}
// parse the port mapping
exposeAPI, err := cliutil.ParseAPIPort(apiPortString)
if err != nil {
log.Fatalln(err)
}
// error out on duplicates
ipPort := fmt.Sprintf("%s:%s", exposeAPI.HostIP, exposeAPI.Port)
if _, exists := ipPortCombinations[ipPort]; exists {
log.Fatalf("Duplicate IP:PORT combination '%s' for the Api Port is not allowed", ipPort)
}
ipPortCombinations[ipPort] = struct{}{}
// add to map
exposeAPIToFiltersMap[exposeAPI] = filters
}
// --lb-port
lbPort, err := cmd.Flags().GetString("lb-port")
// parse the port mapping
exposeAPI, err := cliutil.ParseAPIPort(apiPort)
if err != nil {
log.Fatalln(err)
}
if exposeAPI.Host == "" {
exposeAPI.Host = k3d.DefaultAPIHost
}
if exposeAPI.HostIP == "" {
exposeAPI.HostIP = k3d.DefaultAPIHost
}
// --datastore-endpoint
datastoreEndpoint, err := cmd.Flags().GetString("datastore-endpoint")
@ -322,6 +275,7 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
Network: network,
Secret: secret,
CreateClusterOpts: createClusterOpts,
ExposeAPI: exposeAPI,
}
// generate list of nodes
@ -339,7 +293,7 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
MasterOpts: k3d.MasterOpts{},
}
// TODO: by default, we don't expose an API port, even if we only have a single master: should we change that?
// TODO: by default, we don't expose an API port: should we change that?
// -> if we want to change that, simply add the exposeAPI struct here
// first master node will be init node if we have more than one master specified but no external datastore
@ -366,20 +320,6 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
cluster.Nodes = append(cluster.Nodes, &node)
}
// add masterOpts
for exposeAPI, filters := range exposeAPIToFiltersMap {
nodes, err := cliutil.FilterNodes(cluster.Nodes, filters)
if err != nil {
log.Fatalln(err)
}
for _, node := range nodes {
if node.Role != k3d.MasterRole {
log.Fatalf("Node returned by filters '%+v' for exposing the API is not a master node", filters)
}
node.MasterOpts.ExposeAPI = exposeAPI
}
}
// append volumes
for volume, filters := range volumeFilterMap {
nodes, err := cliutil.FilterNodes(cluster.Nodes, filters)
@ -408,15 +348,7 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
/**********************
* Utility Containers *
**********************/
// TODO: create load balancer and other util containers // TODO: for now, this will only work with the docker provider (?) -> can replace dynamic docker lookup with static traefik config (?)
if masterCount > 1 && !createClusterOpts.DisableLoadbalancer { // TODO: add traefik to the same network and add traefik labels to the master node containers
log.Debugln("Creating LB in front of master nodes")
cluster.MasterLoadBalancer = &k3d.ClusterLoadbalancer{
Image: k3d.DefaultLBImage,
ExposedPort: lbPort,
}
}
// ...
return cluster
}

View File

@ -117,6 +117,8 @@ func CreateCluster(cluster *k3d.Cluster, runtime k3drt.Runtime) error {
// node role specific settings
if node.Role == k3d.MasterRole {
node.MasterOpts.ExposeAPI = cluster.ExposeAPI
// the cluster has an init master node, but its not this one, so connect it to the init node
if cluster.InitNode != nil && !node.MasterOpts.IsInit {
node.Args = append(node.Args, "--server", fmt.Sprintf("https://%s:%d", cluster.InitNode.Name, 6443))
@ -189,12 +191,13 @@ func CreateCluster(cluster *k3d.Cluster, runtime k3drt.Runtime) error {
for _, node := range cluster.Nodes {
if node.Role == k3d.MasterRole {
time.Sleep(1) // FIXME: arbitrary wait for one second to avoid race conditions of masters registering
// skip the init node here
if node == cluster.InitNode {
continue
}
time.Sleep(1 * time.Second) // FIXME: arbitrary wait for one second to avoid race conditions of masters registering
// name suffix
suffix = masterCount
masterCount++
@ -212,6 +215,8 @@ func CreateCluster(cluster *k3d.Cluster, runtime k3drt.Runtime) error {
if node.Role == k3d.MasterRole && cluster.CreateClusterOpts.WaitForMaster >= 0 {
waitForMasterWaitgroup.Add(1)
go func(masterNode *k3d.Node) {
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
// ... by scanning for this line in logs and restarting the container in case it appears
log.Debugf("Starting to wait for master node '%s'", masterNode.Name)
// TODO: it may be better to give endtime=starttime+timeout here so that there is no difference between the instances (go func may be called with a few (milli-)seconds difference)
err := WaitForNodeLogMessage(runtime, masterNode, "Wrote kubeconfig", (time.Duration(cluster.CreateClusterOpts.WaitForMaster) * time.Second))
@ -244,6 +249,42 @@ func CreateCluster(cluster *k3d.Cluster, runtime k3drt.Runtime) error {
}
}
/*
* Auxiliary Containers
*/
// *** MasterLoadBalancer ***
// Generate a comma-separated list of master/server names to pass to the proxy container
servers := ""
for _, node := range cluster.Nodes {
if node.Role == k3d.MasterRole {
log.Debugf("Node NAME: %s", node.Name)
if servers == "" {
servers = node.Name
} else {
servers = fmt.Sprintf("%s,%s", servers, node.Name)
}
}
}
// Create proxy as a modified node with proxyRole
lbNode := &k3d.Node{
Name: fmt.Sprintf("%s-%s-masterlb", k3d.DefaultObjectNamePrefix, cluster.Name),
Image: k3d.DefaultLBImage,
Ports: []string{fmt.Sprintf("%s:%s:%s/tcp", cluster.ExposeAPI.Host, cluster.ExposeAPI.Port, k3d.DefaultAPIPort)},
Env: []string{
fmt.Sprintf("SERVERS=%s", servers),
fmt.Sprintf("PORT=%s", k3d.DefaultAPIPort),
},
Role: k3d.NoRole,
Labels: k3d.DefaultObjectLabels, // TODO: createLoadBalancer: add more expressive labels
Network: cluster.Network.Name,
}
log.Infof("Creating LoadBalancer '%s'", lbNode.Name)
if err := CreateNode(lbNode, runtime); err != nil {
log.Errorln("Failed to create loadbalancer")
return err
}
return nil
}

View File

@ -37,6 +37,7 @@ import (
// GetKubeconfig grabs the kubeconfig file from /output from a master node container and puts it into a local directory
func GetKubeconfig(runtime runtimes.Runtime, cluster *k3d.Cluster) ([]byte, error) {
// get all master nodes for the selected cluster
// TODO: getKubeconfig: we should make sure, that the master node we're trying to getch is actually running
masterNodes, err := runtime.GetNodesByLabel(map[string]string{"k3d.cluster": cluster.Name, "k3d.role": string(k3d.MasterRole)})
if err != nil {
log.Errorln("Failed to get master nodes")
@ -49,8 +50,8 @@ func GetKubeconfig(runtime runtimes.Runtime, cluster *k3d.Cluster) ([]byte, erro
// prefer a master node, which actually has the port exposed
var chosenMaster *k3d.Node
chosenMaster = nil
APIPort := "6443" // TODO: use default from types
APIHost := "localhost" // TODO: use default from types
APIPort := k3d.DefaultAPIPort
APIHost := k3d.DefaultAPIHost
for _, master := range masterNodes {
if _, ok := master.Labels["k3d.master.api.port"]; ok {

View File

@ -125,8 +125,6 @@ func CreateNode(node *k3d.Node, runtime runtimes.Runtime) error {
return err
}
log.Debugf("spec = %+v\n", node)
} else {
return fmt.Errorf("Unknown node role '%s'", node.Role)
}
/*
@ -164,19 +162,14 @@ func patchMasterSpec(node *k3d.Node) error {
// role label
node.Labels["k3d.role"] = string(k3d.MasterRole) // TODO: maybe put those in a global var DefaultMasterNodeSpec?
// extra settings to expose the API port (if wanted)
if node.MasterOpts.ExposeAPI.Port != "" {
if node.MasterOpts.ExposeAPI.Host == "" {
node.MasterOpts.ExposeAPI.Host = "0.0.0.0"
}
node.Labels["k3d.master.api.hostIP"] = node.MasterOpts.ExposeAPI.HostIP // TODO: maybe get docker machine IP here
// Add labels and TLS SAN for the exposed API
// FIXME: For now, the labels concerning the API on the master nodes are only being used for configuring the kubeconfig
node.Labels["k3d.master.api.hostIP"] = node.MasterOpts.ExposeAPI.HostIP // TODO: maybe get docker machine IP here
node.Labels["k3d.master.api.host"] = node.MasterOpts.ExposeAPI.Host
node.Labels["k3d.master.api.port"] = node.MasterOpts.ExposeAPI.Port
node.Labels["k3d.master.api.host"] = node.MasterOpts.ExposeAPI.Host
node.Args = append(node.Args, "--tls-san", node.MasterOpts.ExposeAPI.Host) // add TLS SAN for non default host name
node.Args = append(node.Args, "--tls-san", node.MasterOpts.ExposeAPI.Host) // add TLS SAN for non default host name
node.Labels["k3d.master.api.port"] = node.MasterOpts.ExposeAPI.Port
node.Ports = append(node.Ports, fmt.Sprintf("%s:%s:6443/tcp", node.MasterOpts.ExposeAPI.Host, node.MasterOpts.ExposeAPI.Port)) // TODO: get '6443' from defaultport variable
}
return nil
}

View File

@ -38,7 +38,7 @@ const DefaultClusterNameMaxLength = 32
const DefaultK3sImageRepo = "docker.io/rancher/k3s"
// DefaultLBImage defines the default cluster load balancer image
const DefaultLBImage = "docker.io/library/traefik:v2.0"
const DefaultLBImage = "docker.io/iwilltry42/k3d-proxy:v0.0.1"
// DefaultObjectNamePrefix defines the name prefix for every object created by k3d
const DefaultObjectNamePrefix = "k3d"
@ -50,7 +50,8 @@ type Role string
const (
MasterRole Role = "master"
WorkerRole Role = "worker"
NoRole Role = "nope"
NoRole Role = "noRole"
ProxyRole Role = "proxy"
)
// DefaultK3dRoles defines the roles available for nodes
@ -87,13 +88,18 @@ const DefaultConfigDirName = ".k3d" // should end up in $HOME/
// DefaultKubeconfigPrefix defines the default prefix for kubeconfig files
const DefaultKubeconfigPrefix = DefaultObjectNamePrefix + "-kubeconfig"
// DefaultAPIPort defines the default Kubernetes API Port
const DefaultAPIPort = "6443"
// DefaultAPIHost defines the default host (IP) for the Kubernetes API
const DefaultAPIHost = "0.0.0.0"
// CreateClusterOpts describe a set of options one can set when creating a cluster
type CreateClusterOpts struct {
DisableImageVolume bool
DisableLoadbalancer bool
WaitForMaster int
K3sServerArgs []string
K3sAgentArgs []string
DisableImageVolume bool
WaitForMaster int
K3sServerArgs []string
K3sAgentArgs []string
}
// ClusterNetwork describes a network which a cluster is running in
@ -104,15 +110,15 @@ type ClusterNetwork struct {
// Cluster describes a k3d cluster
type Cluster struct {
Name string `yaml:"name" json:"name,omitempty"`
Network ClusterNetwork `yaml:"network" json:"network,omitempty"`
Secret string `yaml:"cluster_secret" json:"clusterSecret,omitempty"`
Nodes []*Node `yaml:"nodes" json:"nodes,omitempty"`
InitNode *Node // init master node
MasterLoadBalancer *ClusterLoadbalancer `yaml:"master_loadbalancer" json:"masterLoadBalancer,omitempty"`
ExternalDatastore ExternalDatastore `yaml:"external_datastore" json:"externalDatastore,omitempty"`
CreateClusterOpts *CreateClusterOpts `yaml:"options" json:"options,omitempty"`
ImageVolume string `yaml:"image_volume" json:"imageVolume,omitempty"`
Name string `yaml:"name" json:"name,omitempty"`
Network ClusterNetwork `yaml:"network" json:"network,omitempty"`
Secret string `yaml:"cluster_secret" json:"clusterSecret,omitempty"`
Nodes []*Node `yaml:"nodes" json:"nodes,omitempty"`
InitNode *Node // init master node
ExternalDatastore ExternalDatastore `yaml:"external_datastore" json:"externalDatastore,omitempty"`
CreateClusterOpts *CreateClusterOpts `yaml:"options" json:"options,omitempty"`
ExposeAPI ExposeAPI `yaml:"expose_api" json:"exposeAPI,omitempty"`
ImageVolume string `yaml:"image_volume" json:"imageVolume,omitempty"`
}
// Node describes a k3d node
@ -134,8 +140,8 @@ type Node struct {
// MasterOpts describes some additional master role specific opts
type MasterOpts struct {
ExposeAPI ExposeAPI `yaml:"expose_api" json:"exposeAPI,omitempty"`
IsInit bool `yaml:"is_initializing_master" json:"isInitializingMaster,omitempty"`
ExposeAPI ExposeAPI // filled automatically
}
// ExternalDatastore describes an external datastore used for HA/multi-master clusters
@ -161,9 +167,3 @@ type WorkerOpts struct{}
func GetDefaultObjectName(name string) string {
return fmt.Sprintf("%s-%s", DefaultObjectNamePrefix, name)
}
// ClusterLoadbalancer describes a loadbalancer deployed in front of a multi-master cluster
type ClusterLoadbalancer struct {
Image string
ExposedPort string `yaml:"exposed_port" json:"exposedPort,omitempty"`
}

13
proxy/Dockerfile Normal file
View File

@ -0,0 +1,13 @@
FROM nginx:1.16.0-alpine
RUN apk -U --no-cache add curl ca-certificates\
&& mkdir -p /etc/confd \
&& curl -sLf https://github.com/kelseyhightower/confd/releases/download/v0.16.0/confd-0.16.0-linux-amd64 > /usr/bin/confd \
&& chmod +x /usr/bin/confd \
&& apk del curl
COPY templates /etc/confd/templates/
COPY conf.d /etc/confd/conf.d/
COPY nginx-proxy /usr/bin/
ENTRYPOINT nginx-proxy

7
proxy/conf.d/nginx.toml Normal file
View File

@ -0,0 +1,7 @@
[template]
src = "nginx.tmpl"
dest = "/etc/nginx/nginx.conf"
keys = [
"SERVERS",
"PORT",
]

7
proxy/nginx-proxy Executable file
View File

@ -0,0 +1,7 @@
#!/bin/sh
# Run confd
confd -onetime -backend env
# Start nginx
nginx -g 'daemon off;'

View File

@ -0,0 +1,23 @@
error_log stderr notice;
worker_processes auto;
events {
multi_accept on;
use epoll;
worker_connections 1024;
}
stream {
upstream kube_apiserver {
{{ $servers := split (getenv "SERVERS") "," }}{{range $servers}}
server {{.}}:{{getenv "PORT"}};
{{end}}
}
server {
listen {{getenv "PORT"}};
proxy_pass kube_apiserver;
proxy_timeout 30;
proxy_connect_timeout 2s;
}
}

View File

@ -7,7 +7,7 @@ CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
source "$CURR_DIR/common.sh"
info "Creating cluster multimaster..."
$EXE create cluster "multimaster" --masters 3 --api-port '6443@master[0]' --wait 360 || failed "could not create cluster multimaster"
$EXE create cluster "multimaster" --masters 3 --api-port 6443 --wait 360 || failed "could not create cluster multimaster"
info "Checking we have access to the cluster..."
check_k3d_clusters "multimaster" || failed "error checking cluster"