Proxy: deploy a configurable nginx proxy in front of the cluster
Up to now, we exposed ports on single master nodes, which is quite inconvenient on user side and troublesome on development side. Now, we're creating a proxy container which exposes a single port and proxies traffic to all master nodes. Currently, this only works with 'k3d create cluster' and won't update the proxy when using 'k3d create node --role master'.
This commit is contained in:
parent
cc8399ba63
commit
a24d6f864e
@ -69,7 +69,7 @@ func NewCmdCreateCluster() *cobra.Command {
|
|||||||
/*********
|
/*********
|
||||||
* Flags *
|
* Flags *
|
||||||
*********/
|
*********/
|
||||||
cmd.Flags().StringArrayP("api-port", "a", []string{"6443"}, "Specify the Kubernetes API server port (Format: `--api-port [HOST:]HOSTPORT[@NODEFILTER]`\n - Example: `k3d create -m 3 -a 0.0.0.0:6550@master[0] -a 0.0.0.0:6551@master[1]` ")
|
cmd.Flags().StringP("api-port", "a", k3d.DefaultAPIPort, "Specify the Kubernetes API server port (Format: `--api-port [HOST:]HOSTPORT`\n - Example: `k3d create -m 3 -a 0.0.0.0:6550` ")
|
||||||
cmd.Flags().IntP("masters", "m", 1, "Specify how many masters you want to create")
|
cmd.Flags().IntP("masters", "m", 1, "Specify how many masters you want to create")
|
||||||
cmd.Flags().IntP("workers", "w", 0, "Specify how many workers you want to create")
|
cmd.Flags().IntP("workers", "w", 0, "Specify how many workers you want to create")
|
||||||
cmd.Flags().String("image", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), "Specify k3s image that you want to use for the nodes")
|
cmd.Flags().String("image", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), "Specify k3s image that you want to use for the nodes")
|
||||||
@ -83,10 +83,6 @@ func NewCmdCreateCluster() *cobra.Command {
|
|||||||
cmd.Flags().BoolVar(&createClusterOpts.DisableImageVolume, "no-image-volume", false, "Disable the creation of a volume for importing images")
|
cmd.Flags().BoolVar(&createClusterOpts.DisableImageVolume, "no-image-volume", false, "Disable the creation of a volume for importing images")
|
||||||
|
|
||||||
/* Multi Master Configuration */
|
/* Multi Master Configuration */
|
||||||
// multi-master - general
|
|
||||||
// TODO: implement load-balancer/proxy for multi-master setups
|
|
||||||
cmd.Flags().BoolVar(&createClusterOpts.DisableLoadbalancer, "no-lb", false, "[WIP] Disable automatic deployment of a load balancer in Multi-Master setups")
|
|
||||||
cmd.Flags().String("lb-port", "0.0.0.0:6443", "[WIP] Specify port to be exposed by the master load balancer (Format: `[HOST:]HOSTPORT)")
|
|
||||||
|
|
||||||
// multi-master - datastore
|
// multi-master - datastore
|
||||||
// TODO: implement multi-master setups with external data store
|
// TODO: implement multi-master setups with external data store
|
||||||
@ -174,65 +170,22 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
|
|||||||
}
|
}
|
||||||
|
|
||||||
// --api-port
|
// --api-port
|
||||||
apiPortFlags, err := cmd.Flags().GetStringArray("api-port")
|
apiPort, err := cmd.Flags().GetString("api-port")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// error out if we have more api-ports than masters specified
|
// parse the port mapping
|
||||||
if len(apiPortFlags) > masterCount {
|
exposeAPI, err := cliutil.ParseAPIPort(apiPort)
|
||||||
log.Fatalf("Cannot expose more api-ports than master nodes exist (%d > %d)", len(apiPortFlags), masterCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
ipPortCombinations := map[string]struct{}{} // only for finding duplicates
|
|
||||||
apiPortFilters := map[string]struct{}{} // only for deduplication
|
|
||||||
exposeAPIToFiltersMap := map[k3d.ExposeAPI][]string{}
|
|
||||||
for _, apiPortFlag := range apiPortFlags {
|
|
||||||
|
|
||||||
// split the flag value from the node filter
|
|
||||||
apiPortString, filters, err := cliutil.SplitFiltersFromFlag(apiPortFlag)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// if there's only one master node, we don't need a node filter, but if there's more than one, we need exactly one node filter per api-port flag
|
|
||||||
if len(filters) > 1 || (len(filters) == 0 && masterCount > 1) {
|
|
||||||
log.Fatalf("Exactly one node filter required per '--api-port' flag, but got %d on flag %s", len(filters), apiPortFlag)
|
|
||||||
}
|
|
||||||
|
|
||||||
// add default, if no filter was set and we only have a single master node
|
|
||||||
if len(filters) == 0 && masterCount == 1 {
|
|
||||||
filters = []string{"master[0]"}
|
|
||||||
}
|
|
||||||
|
|
||||||
// only one api-port mapping allowed per master node
|
|
||||||
if _, exists := apiPortFilters[filters[0]]; exists {
|
|
||||||
log.Fatalf("Cannot assign multiple api-port mappings to the same node: duplicate '%s'", filters[0])
|
|
||||||
}
|
|
||||||
apiPortFilters[filters[0]] = struct{}{}
|
|
||||||
|
|
||||||
// parse the port mapping
|
|
||||||
exposeAPI, err := cliutil.ParseAPIPort(apiPortString)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// error out on duplicates
|
|
||||||
ipPort := fmt.Sprintf("%s:%s", exposeAPI.HostIP, exposeAPI.Port)
|
|
||||||
if _, exists := ipPortCombinations[ipPort]; exists {
|
|
||||||
log.Fatalf("Duplicate IP:PORT combination '%s' for the Api Port is not allowed", ipPort)
|
|
||||||
}
|
|
||||||
ipPortCombinations[ipPort] = struct{}{}
|
|
||||||
|
|
||||||
// add to map
|
|
||||||
exposeAPIToFiltersMap[exposeAPI] = filters
|
|
||||||
}
|
|
||||||
|
|
||||||
// --lb-port
|
|
||||||
lbPort, err := cmd.Flags().GetString("lb-port")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
|
if exposeAPI.Host == "" {
|
||||||
|
exposeAPI.Host = k3d.DefaultAPIHost
|
||||||
|
}
|
||||||
|
if exposeAPI.HostIP == "" {
|
||||||
|
exposeAPI.HostIP = k3d.DefaultAPIHost
|
||||||
|
}
|
||||||
|
|
||||||
// --datastore-endpoint
|
// --datastore-endpoint
|
||||||
datastoreEndpoint, err := cmd.Flags().GetString("datastore-endpoint")
|
datastoreEndpoint, err := cmd.Flags().GetString("datastore-endpoint")
|
||||||
@ -319,6 +272,7 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
|
|||||||
Network: network,
|
Network: network,
|
||||||
Secret: secret,
|
Secret: secret,
|
||||||
CreateClusterOpts: createClusterOpts,
|
CreateClusterOpts: createClusterOpts,
|
||||||
|
ExposeAPI: exposeAPI,
|
||||||
}
|
}
|
||||||
|
|
||||||
// generate list of nodes
|
// generate list of nodes
|
||||||
@ -336,7 +290,7 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
|
|||||||
MasterOpts: k3d.MasterOpts{},
|
MasterOpts: k3d.MasterOpts{},
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: by default, we don't expose an API port, even if we only have a single master: should we change that?
|
// TODO: by default, we don't expose an API port: should we change that?
|
||||||
// -> if we want to change that, simply add the exposeAPI struct here
|
// -> if we want to change that, simply add the exposeAPI struct here
|
||||||
|
|
||||||
// first master node will be init node if we have more than one master specified but no external datastore
|
// first master node will be init node if we have more than one master specified but no external datastore
|
||||||
@ -363,20 +317,6 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
|
|||||||
cluster.Nodes = append(cluster.Nodes, &node)
|
cluster.Nodes = append(cluster.Nodes, &node)
|
||||||
}
|
}
|
||||||
|
|
||||||
// add masterOpts
|
|
||||||
for exposeAPI, filters := range exposeAPIToFiltersMap {
|
|
||||||
nodes, err := cliutil.FilterNodes(cluster.Nodes, filters)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
for _, node := range nodes {
|
|
||||||
if node.Role != k3d.MasterRole {
|
|
||||||
log.Fatalf("Node returned by filters '%+v' for exposing the API is not a master node", filters)
|
|
||||||
}
|
|
||||||
node.MasterOpts.ExposeAPI = exposeAPI
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// append volumes
|
// append volumes
|
||||||
for volume, filters := range volumeFilterMap {
|
for volume, filters := range volumeFilterMap {
|
||||||
nodes, err := cliutil.FilterNodes(cluster.Nodes, filters)
|
nodes, err := cliutil.FilterNodes(cluster.Nodes, filters)
|
||||||
@ -405,15 +345,7 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
|
|||||||
/**********************
|
/**********************
|
||||||
* Utility Containers *
|
* Utility Containers *
|
||||||
**********************/
|
**********************/
|
||||||
|
// ...
|
||||||
// TODO: create load balancer and other util containers // TODO: for now, this will only work with the docker provider (?) -> can replace dynamic docker lookup with static traefik config (?)
|
|
||||||
if masterCount > 1 && !createClusterOpts.DisableLoadbalancer { // TODO: add traefik to the same network and add traefik labels to the master node containers
|
|
||||||
log.Debugln("Creating LB in front of master nodes")
|
|
||||||
cluster.MasterLoadBalancer = &k3d.ClusterLoadbalancer{
|
|
||||||
Image: k3d.DefaultLBImage,
|
|
||||||
ExposedPort: lbPort,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return cluster
|
return cluster
|
||||||
}
|
}
|
||||||
|
@ -117,6 +117,8 @@ func CreateCluster(cluster *k3d.Cluster, runtime k3drt.Runtime) error {
|
|||||||
// node role specific settings
|
// node role specific settings
|
||||||
if node.Role == k3d.MasterRole {
|
if node.Role == k3d.MasterRole {
|
||||||
|
|
||||||
|
node.MasterOpts.ExposeAPI = cluster.ExposeAPI
|
||||||
|
|
||||||
// the cluster has an init master node, but its not this one, so connect it to the init node
|
// the cluster has an init master node, but its not this one, so connect it to the init node
|
||||||
if cluster.InitNode != nil && !node.MasterOpts.IsInit {
|
if cluster.InitNode != nil && !node.MasterOpts.IsInit {
|
||||||
node.Args = append(node.Args, "--server", fmt.Sprintf("https://%s:%d", cluster.InitNode.Name, 6443))
|
node.Args = append(node.Args, "--server", fmt.Sprintf("https://%s:%d", cluster.InitNode.Name, 6443))
|
||||||
@ -245,6 +247,37 @@ initNodeFinished:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Auxiliary Containers
|
||||||
|
*/
|
||||||
|
// MasterLoadBalancer
|
||||||
|
servers := ""
|
||||||
|
for _, node := range cluster.Nodes {
|
||||||
|
if node.Role == k3d.MasterRole {
|
||||||
|
log.Debugf("Node NAME: %s", node.Name)
|
||||||
|
if servers == "" {
|
||||||
|
servers = node.Name
|
||||||
|
} else {
|
||||||
|
servers = fmt.Sprintf("%s,%s", servers, node.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lbNode := &k3d.Node{
|
||||||
|
Name: fmt.Sprintf("%s-%s-masterlb", k3d.DefaultObjectNamePrefix, cluster.Name),
|
||||||
|
Image: k3d.DefaultLBImage,
|
||||||
|
Ports: []string{fmt.Sprintf("%s:%s:%s/tcp", cluster.ExposeAPI.Host, cluster.ExposeAPI.Port, k3d.DefaultAPIPort)},
|
||||||
|
Env: []string{
|
||||||
|
fmt.Sprintf("SERVERS=%s", servers),
|
||||||
|
fmt.Sprintf("PORT=%s", k3d.DefaultAPIPort),
|
||||||
|
},
|
||||||
|
Role: k3d.NoRole,
|
||||||
|
Labels: k3d.DefaultObjectLabels, // TODO: createLoadBalancer: add more expressive labels
|
||||||
|
Network: cluster.Network.Name,
|
||||||
|
}
|
||||||
|
if err := CreateNode(lbNode, runtime); err != nil {
|
||||||
|
log.Errorln("Failed to create loadbalancer")
|
||||||
|
return err
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,6 +37,7 @@ import (
|
|||||||
// GetKubeconfig grabs the kubeconfig file from /output from a master node container and puts it into a local directory
|
// GetKubeconfig grabs the kubeconfig file from /output from a master node container and puts it into a local directory
|
||||||
func GetKubeconfig(runtime runtimes.Runtime, cluster *k3d.Cluster) ([]byte, error) {
|
func GetKubeconfig(runtime runtimes.Runtime, cluster *k3d.Cluster) ([]byte, error) {
|
||||||
// get all master nodes for the selected cluster
|
// get all master nodes for the selected cluster
|
||||||
|
// TODO: getKubeconfig: we should make sure, that the master node we're trying to getch is actually running
|
||||||
masterNodes, err := runtime.GetNodesByLabel(map[string]string{"k3d.cluster": cluster.Name, "k3d.role": string(k3d.MasterRole)})
|
masterNodes, err := runtime.GetNodesByLabel(map[string]string{"k3d.cluster": cluster.Name, "k3d.role": string(k3d.MasterRole)})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorln("Failed to get master nodes")
|
log.Errorln("Failed to get master nodes")
|
||||||
@ -49,8 +50,8 @@ func GetKubeconfig(runtime runtimes.Runtime, cluster *k3d.Cluster) ([]byte, erro
|
|||||||
// prefer a master node, which actually has the port exposed
|
// prefer a master node, which actually has the port exposed
|
||||||
var chosenMaster *k3d.Node
|
var chosenMaster *k3d.Node
|
||||||
chosenMaster = nil
|
chosenMaster = nil
|
||||||
APIPort := "6443" // TODO: use default from types
|
APIPort := k3d.DefaultAPIPort
|
||||||
APIHost := "localhost" // TODO: use default from types
|
APIHost := k3d.DefaultAPIHost
|
||||||
|
|
||||||
for _, master := range masterNodes {
|
for _, master := range masterNodes {
|
||||||
if _, ok := master.Labels["k3d.master.api.port"]; ok {
|
if _, ok := master.Labels["k3d.master.api.port"]; ok {
|
||||||
|
@ -125,8 +125,6 @@ func CreateNode(node *k3d.Node, runtime runtimes.Runtime) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Debugf("spec = %+v\n", node)
|
log.Debugf("spec = %+v\n", node)
|
||||||
} else {
|
|
||||||
return fmt.Errorf("Unknown node role '%s'", node.Role)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -164,19 +162,14 @@ func patchMasterSpec(node *k3d.Node) error {
|
|||||||
// role label
|
// role label
|
||||||
node.Labels["k3d.role"] = string(k3d.MasterRole) // TODO: maybe put those in a global var DefaultMasterNodeSpec?
|
node.Labels["k3d.role"] = string(k3d.MasterRole) // TODO: maybe put those in a global var DefaultMasterNodeSpec?
|
||||||
|
|
||||||
// extra settings to expose the API port (if wanted)
|
// Add labels and TLS SAN for the exposed API
|
||||||
if node.MasterOpts.ExposeAPI.Port != "" {
|
// FIXME: For now, the labels concerning the API on the master nodes are only being used for configuring the kubeconfig
|
||||||
if node.MasterOpts.ExposeAPI.Host == "" {
|
node.Labels["k3d.master.api.hostIP"] = node.MasterOpts.ExposeAPI.HostIP // TODO: maybe get docker machine IP here
|
||||||
node.MasterOpts.ExposeAPI.Host = "0.0.0.0"
|
node.Labels["k3d.master.api.host"] = node.MasterOpts.ExposeAPI.Host
|
||||||
}
|
node.Labels["k3d.master.api.port"] = node.MasterOpts.ExposeAPI.Port
|
||||||
node.Labels["k3d.master.api.hostIP"] = node.MasterOpts.ExposeAPI.HostIP // TODO: maybe get docker machine IP here
|
|
||||||
|
|
||||||
node.Labels["k3d.master.api.host"] = node.MasterOpts.ExposeAPI.Host
|
node.Args = append(node.Args, "--tls-san", node.MasterOpts.ExposeAPI.Host) // add TLS SAN for non default host name
|
||||||
|
|
||||||
node.Args = append(node.Args, "--tls-san", node.MasterOpts.ExposeAPI.Host) // add TLS SAN for non default host name
|
|
||||||
node.Labels["k3d.master.api.port"] = node.MasterOpts.ExposeAPI.Port
|
|
||||||
node.Ports = append(node.Ports, fmt.Sprintf("%s:%s:6443/tcp", node.MasterOpts.ExposeAPI.Host, node.MasterOpts.ExposeAPI.Port)) // TODO: get '6443' from defaultport variable
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@ const DefaultClusterNameMaxLength = 32
|
|||||||
const DefaultK3sImageRepo = "docker.io/rancher/k3s"
|
const DefaultK3sImageRepo = "docker.io/rancher/k3s"
|
||||||
|
|
||||||
// DefaultLBImage defines the default cluster load balancer image
|
// DefaultLBImage defines the default cluster load balancer image
|
||||||
const DefaultLBImage = "docker.io/library/traefik:v2.0"
|
const DefaultLBImage = "docker.io/iwilltry42/k3d-proxy:v0.0.1"
|
||||||
|
|
||||||
// DefaultObjectNamePrefix defines the name prefix for every object created by k3d
|
// DefaultObjectNamePrefix defines the name prefix for every object created by k3d
|
||||||
const DefaultObjectNamePrefix = "k3d"
|
const DefaultObjectNamePrefix = "k3d"
|
||||||
@ -50,7 +50,8 @@ type Role string
|
|||||||
const (
|
const (
|
||||||
MasterRole Role = "master"
|
MasterRole Role = "master"
|
||||||
WorkerRole Role = "worker"
|
WorkerRole Role = "worker"
|
||||||
NoRole Role = "nope"
|
NoRole Role = "noRole"
|
||||||
|
ProxyRole Role = "proxy"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DefaultK3dRoles defines the roles available for nodes
|
// DefaultK3dRoles defines the roles available for nodes
|
||||||
@ -87,13 +88,18 @@ const DefaultConfigDirName = ".k3d" // should end up in $HOME/
|
|||||||
// DefaultKubeconfigPrefix defines the default prefix for kubeconfig files
|
// DefaultKubeconfigPrefix defines the default prefix for kubeconfig files
|
||||||
const DefaultKubeconfigPrefix = DefaultObjectNamePrefix + "-kubeconfig"
|
const DefaultKubeconfigPrefix = DefaultObjectNamePrefix + "-kubeconfig"
|
||||||
|
|
||||||
|
// DefaultAPIPort defines the default Kubernetes API Port
|
||||||
|
const DefaultAPIPort = "6443"
|
||||||
|
|
||||||
|
// DefaultAPIHost defines the default host (IP) for the Kubernetes API
|
||||||
|
const DefaultAPIHost = "0.0.0.0"
|
||||||
|
|
||||||
// CreateClusterOpts describe a set of options one can set when creating a cluster
|
// CreateClusterOpts describe a set of options one can set when creating a cluster
|
||||||
type CreateClusterOpts struct {
|
type CreateClusterOpts struct {
|
||||||
DisableImageVolume bool
|
DisableImageVolume bool
|
||||||
DisableLoadbalancer bool
|
WaitForMaster int
|
||||||
WaitForMaster int
|
K3sServerArgs []string
|
||||||
K3sServerArgs []string
|
K3sAgentArgs []string
|
||||||
K3sAgentArgs []string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClusterNetwork describes a network which a cluster is running in
|
// ClusterNetwork describes a network which a cluster is running in
|
||||||
@ -104,14 +110,14 @@ type ClusterNetwork struct {
|
|||||||
|
|
||||||
// Cluster describes a k3d cluster
|
// Cluster describes a k3d cluster
|
||||||
type Cluster struct {
|
type Cluster struct {
|
||||||
Name string `yaml:"name" json:"name,omitempty"`
|
Name string `yaml:"name" json:"name,omitempty"`
|
||||||
Network ClusterNetwork `yaml:"network" json:"network,omitempty"`
|
Network ClusterNetwork `yaml:"network" json:"network,omitempty"`
|
||||||
Secret string `yaml:"cluster_secret" json:"clusterSecret,omitempty"`
|
Secret string `yaml:"cluster_secret" json:"clusterSecret,omitempty"`
|
||||||
Nodes []*Node `yaml:"nodes" json:"nodes,omitempty"`
|
Nodes []*Node `yaml:"nodes" json:"nodes,omitempty"`
|
||||||
InitNode *Node // init master node
|
InitNode *Node // init master node
|
||||||
MasterLoadBalancer *ClusterLoadbalancer `yaml:"master_loadbalancer" json:"masterLoadBalancer,omitempty"`
|
ExternalDatastore ExternalDatastore `yaml:"external_datastore" json:"externalDatastore,omitempty"`
|
||||||
ExternalDatastore ExternalDatastore `yaml:"external_datastore" json:"externalDatastore,omitempty"`
|
CreateClusterOpts *CreateClusterOpts `yaml:"options" json:"options,omitempty"`
|
||||||
CreateClusterOpts *CreateClusterOpts `yaml:"options" json:"options,omitempty"`
|
ExposeAPI ExposeAPI `yaml:"expose_api" json:"exposeAPI,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Node describes a k3d node
|
// Node describes a k3d node
|
||||||
@ -133,8 +139,8 @@ type Node struct {
|
|||||||
|
|
||||||
// MasterOpts describes some additional master role specific opts
|
// MasterOpts describes some additional master role specific opts
|
||||||
type MasterOpts struct {
|
type MasterOpts struct {
|
||||||
ExposeAPI ExposeAPI `yaml:"expose_api" json:"exposeAPI,omitempty"`
|
|
||||||
IsInit bool `yaml:"is_initializing_master" json:"isInitializingMaster,omitempty"`
|
IsInit bool `yaml:"is_initializing_master" json:"isInitializingMaster,omitempty"`
|
||||||
|
ExposeAPI ExposeAPI // filled automatically
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExternalDatastore describes an external datastore used for HA/multi-master clusters
|
// ExternalDatastore describes an external datastore used for HA/multi-master clusters
|
||||||
@ -160,9 +166,3 @@ type WorkerOpts struct{}
|
|||||||
func GetDefaultObjectName(name string) string {
|
func GetDefaultObjectName(name string) string {
|
||||||
return fmt.Sprintf("%s-%s", DefaultObjectNamePrefix, name)
|
return fmt.Sprintf("%s-%s", DefaultObjectNamePrefix, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClusterLoadbalancer describes a loadbalancer deployed in front of a multi-master cluster
|
|
||||||
type ClusterLoadbalancer struct {
|
|
||||||
Image string
|
|
||||||
ExposedPort string `yaml:"exposed_port" json:"exposedPort,omitempty"`
|
|
||||||
}
|
|
||||||
|
13
proxy/Dockerfile
Normal file
13
proxy/Dockerfile
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
FROM nginx:1.16.0-alpine
|
||||||
|
|
||||||
|
RUN apk -U --no-cache add curl ca-certificates\
|
||||||
|
&& mkdir -p /etc/confd \
|
||||||
|
&& curl -sLf https://github.com/kelseyhightower/confd/releases/download/v0.16.0/confd-0.16.0-linux-amd64 > /usr/bin/confd \
|
||||||
|
&& chmod +x /usr/bin/confd \
|
||||||
|
&& apk del curl
|
||||||
|
|
||||||
|
COPY templates /etc/confd/templates/
|
||||||
|
COPY conf.d /etc/confd/conf.d/
|
||||||
|
COPY nginx-proxy /usr/bin/
|
||||||
|
|
||||||
|
ENTRYPOINT nginx-proxy
|
7
proxy/conf.d/nginx.toml
Normal file
7
proxy/conf.d/nginx.toml
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
[template]
|
||||||
|
src = "nginx.tmpl"
|
||||||
|
dest = "/etc/nginx/nginx.conf"
|
||||||
|
keys = [
|
||||||
|
"SERVERS",
|
||||||
|
"PORT",
|
||||||
|
]
|
7
proxy/nginx-proxy
Executable file
7
proxy/nginx-proxy
Executable file
@ -0,0 +1,7 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# Run confd
|
||||||
|
confd -onetime -backend env
|
||||||
|
|
||||||
|
# Start nginx
|
||||||
|
nginx -g 'daemon off;'
|
23
proxy/templates/nginx.tmpl
Normal file
23
proxy/templates/nginx.tmpl
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
error_log stderr notice;
|
||||||
|
|
||||||
|
worker_processes auto;
|
||||||
|
events {
|
||||||
|
multi_accept on;
|
||||||
|
use epoll;
|
||||||
|
worker_connections 1024;
|
||||||
|
}
|
||||||
|
|
||||||
|
stream {
|
||||||
|
upstream kube_apiserver {
|
||||||
|
{{ $servers := split (getenv "SERVERS") "," }}{{range $servers}}
|
||||||
|
server {{.}}:{{getenv "PORT"}};
|
||||||
|
{{end}}
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen {{getenv "PORT"}};
|
||||||
|
proxy_pass kube_apiserver;
|
||||||
|
proxy_timeout 30;
|
||||||
|
proxy_connect_timeout 2s;
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user