use server/agent instead of master/worker
This commit is contained in:
parent
26cd8bbb3f
commit
ec3f10ec57
@ -43,9 +43,9 @@ import (
|
||||
const clusterCreateDescription = `
|
||||
Create a new k3s cluster with containerized nodes (k3s in docker).
|
||||
Every cluster will consist of one or more containers:
|
||||
- 1 (or more) master node container (k3s)
|
||||
- 1 (or more) server node container (k3s)
|
||||
- (optionally) 1 loadbalancer container as the entrypoint to the cluster (nginx)
|
||||
- (optionally) 1 (or more) worker node containers (k3s)
|
||||
- (optionally) 1 (or more) agent node containers (k3s)
|
||||
`
|
||||
|
||||
// NewCmdClusterCreate returns a new cobra command
|
||||
@ -71,8 +71,8 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
|
||||
// create cluster
|
||||
if updateDefaultKubeconfig || updateCurrentContext {
|
||||
log.Debugln("'--update-default-kubeconfig set: enabling wait-for-master")
|
||||
cluster.CreateClusterOpts.WaitForMaster = true
|
||||
log.Debugln("'--update-default-kubeconfig set: enabling wait-for-server")
|
||||
cluster.CreateClusterOpts.WaitForServer = true
|
||||
}
|
||||
if err := k3dCluster.ClusterCreate(cmd.Context(), runtimes.SelectedRuntime, cluster); err != nil {
|
||||
// rollback if creation failed
|
||||
@ -111,28 +111,28 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
/*********
|
||||
* Flags *
|
||||
*********/
|
||||
cmd.Flags().StringP("api-port", "a", "random", "Specify the Kubernetes API server port exposed on the LoadBalancer (Format: `--api-port [HOST:]HOSTPORT`)\n - Example: `k3d create -m 3 -a 0.0.0.0:6550`")
|
||||
cmd.Flags().IntP("masters", "m", 1, "Specify how many masters you want to create")
|
||||
cmd.Flags().IntP("workers", "w", 0, "Specify how many workers you want to create")
|
||||
cmd.Flags().String("api-port", "random", "Specify the Kubernetes API server port exposed on the LoadBalancer (Format: `--api-port [HOST:]HOSTPORT`)\n - Example: `k3d create -m 3 -a 0.0.0.0:6550`")
|
||||
cmd.Flags().IntP("servers", "s", 1, "Specify how many servers you want to create")
|
||||
cmd.Flags().IntP("agents", "a", 0, "Specify how many agents you want to create")
|
||||
cmd.Flags().StringP("image", "i", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), "Specify k3s image that you want to use for the nodes")
|
||||
cmd.Flags().String("network", "", "Join an existing network")
|
||||
cmd.Flags().String("token", "", "Specify a cluster token. By default, we generate one.")
|
||||
cmd.Flags().StringArrayP("volume", "v", nil, "Mount volumes into the nodes (Format: `--volume [SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d create -w 2 -v /my/path@worker[0,1] -v /tmp/test:/tmp/other@master[0]`")
|
||||
cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d create -w 2 -p 8080:80@worker[0] -p 8081@worker[1]`")
|
||||
cmd.Flags().BoolVar(&createClusterOpts.WaitForMaster, "wait", true, "Wait for the master(s) to be ready before returning. Use '--timeout DURATION' to not wait forever.")
|
||||
cmd.Flags().StringArrayP("volume", "v", nil, "Mount volumes into the nodes (Format: `--volume [SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d create -w 2 -v /my/path@agent[0,1] -v /tmp/test:/tmp/other@server[0]`")
|
||||
cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d create -w 2 -p 8080:80@agent[0] -p 8081@agent[1]`")
|
||||
cmd.Flags().BoolVar(&createClusterOpts.WaitForServer, "wait", true, "Wait for the server(s) to be ready before returning. Use '--timeout DURATION' to not wait forever.")
|
||||
cmd.Flags().DurationVar(&createClusterOpts.Timeout, "timeout", 0*time.Second, "Rollback changes if cluster couldn't be created in specified duration.")
|
||||
cmd.Flags().BoolVar(&updateDefaultKubeconfig, "update-default-kubeconfig", true, "Directly update the default kubeconfig with the new cluster's context")
|
||||
cmd.Flags().BoolVar(&updateCurrentContext, "switch-context", true, "Directly switch the default kubeconfig's current-context to the new cluster's context (implies --update-default-kubeconfig)")
|
||||
cmd.Flags().BoolVar(&createClusterOpts.DisableLoadBalancer, "no-lb", false, "Disable the creation of a LoadBalancer in front of the master nodes")
|
||||
cmd.Flags().BoolVar(&createClusterOpts.DisableLoadBalancer, "no-lb", false, "Disable the creation of a LoadBalancer in front of the server nodes")
|
||||
|
||||
/* Image Importing */
|
||||
cmd.Flags().BoolVar(&createClusterOpts.DisableImageVolume, "no-image-volume", false, "Disable the creation of a volume for importing images")
|
||||
|
||||
/* Multi Master Configuration */
|
||||
/* Multi Server Configuration */
|
||||
|
||||
// multi-master - datastore
|
||||
// TODO: implement multi-master setups with external data store
|
||||
// cmd.Flags().String("datastore-endpoint", "", "[WIP] Specify external datastore endpoint (e.g. for multi master clusters)")
|
||||
// multi-server - datastore
|
||||
// TODO: implement multi-server setups with external data store
|
||||
// cmd.Flags().String("datastore-endpoint", "", "[WIP] Specify external datastore endpoint (e.g. for multi server clusters)")
|
||||
/*
|
||||
cmd.Flags().String("datastore-network", "", "Specify container network where we can find the datastore-endpoint (add a connection)")
|
||||
|
||||
@ -143,8 +143,8 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
*/
|
||||
|
||||
/* k3s */
|
||||
cmd.Flags().StringArrayVar(&createClusterOpts.K3sServerArgs, "k3s-server-arg", nil, "Additional args passed to the `k3s server` command on master nodes (new flag per arg)")
|
||||
cmd.Flags().StringArrayVar(&createClusterOpts.K3sAgentArgs, "k3s-agent-arg", nil, "Additional args passed to the `k3s agent` command on worker nodes (new flag per arg)")
|
||||
cmd.Flags().StringArrayVar(&createClusterOpts.K3sServerArgs, "k3s-server-arg", nil, "Additional args passed to the `k3s server` command on server nodes (new flag per arg)")
|
||||
cmd.Flags().StringArrayVar(&createClusterOpts.K3sAgentArgs, "k3s-agent-arg", nil, "Additional args passed to the `k3s agent` command on agent nodes (new flag per arg)")
|
||||
|
||||
/* Subcommands */
|
||||
|
||||
@ -181,14 +181,14 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
|
||||
image = version.GetK3sVersion(true)
|
||||
}
|
||||
|
||||
// --masters
|
||||
masterCount, err := cmd.Flags().GetInt("masters")
|
||||
// --servers
|
||||
serverCount, err := cmd.Flags().GetInt("servers")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// --workers
|
||||
workerCount, err := cmd.Flags().GetInt("workers")
|
||||
// --agents
|
||||
agentCount, err := cmd.Flags().GetInt("agents")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
@ -203,7 +203,7 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
|
||||
network.Name = networkName
|
||||
network.External = true
|
||||
}
|
||||
if networkName == "host" && (masterCount+workerCount) > 1 {
|
||||
if networkName == "host" && (serverCount+agentCount) > 1 {
|
||||
log.Fatalln("Can only run a single node in hostnetwork mode")
|
||||
}
|
||||
|
||||
@ -323,31 +323,31 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
|
||||
// generate list of nodes
|
||||
cluster.Nodes = []*k3d.Node{}
|
||||
|
||||
// MasterLoadBalancer
|
||||
// ServerLoadBalancer
|
||||
if !createClusterOpts.DisableLoadBalancer {
|
||||
cluster.MasterLoadBalancer = &k3d.Node{
|
||||
cluster.ServerLoadBalancer = &k3d.Node{
|
||||
Role: k3d.LoadBalancerRole,
|
||||
}
|
||||
}
|
||||
|
||||
/****************
|
||||
* Master Nodes *
|
||||
* Server Nodes *
|
||||
****************/
|
||||
|
||||
for i := 0; i < masterCount; i++ {
|
||||
for i := 0; i < serverCount; i++ {
|
||||
node := k3d.Node{
|
||||
Role: k3d.MasterRole,
|
||||
Role: k3d.ServerRole,
|
||||
Image: image,
|
||||
Args: createClusterOpts.K3sServerArgs,
|
||||
MasterOpts: k3d.MasterOpts{},
|
||||
ServerOpts: k3d.ServerOpts{},
|
||||
}
|
||||
|
||||
// TODO: by default, we don't expose an API port: should we change that?
|
||||
// -> if we want to change that, simply add the exposeAPI struct here
|
||||
|
||||
// first master node will be init node if we have more than one master specified but no external datastore
|
||||
if i == 0 && masterCount > 1 {
|
||||
node.MasterOpts.IsInit = true
|
||||
// first server node will be init node if we have more than one server specified but no external datastore
|
||||
if i == 0 && serverCount > 1 {
|
||||
node.ServerOpts.IsInit = true
|
||||
cluster.InitNode = &node
|
||||
}
|
||||
|
||||
@ -356,12 +356,12 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
|
||||
}
|
||||
|
||||
/****************
|
||||
* Worker Nodes *
|
||||
* Agent Nodes *
|
||||
****************/
|
||||
|
||||
for i := 0; i < workerCount; i++ {
|
||||
for i := 0; i < agentCount; i++ {
|
||||
node := k3d.Node{
|
||||
Role: k3d.WorkerRole,
|
||||
Role: k3d.AgentRole,
|
||||
Image: image,
|
||||
Args: createClusterOpts.K3sAgentArgs,
|
||||
}
|
||||
@ -381,11 +381,11 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
|
||||
}
|
||||
|
||||
// append ports
|
||||
nodeCount := masterCount + workerCount
|
||||
nodeCount := serverCount + agentCount
|
||||
nodeList := cluster.Nodes
|
||||
if !createClusterOpts.DisableLoadBalancer {
|
||||
nodeCount++
|
||||
nodeList = append(nodeList, cluster.MasterLoadBalancer)
|
||||
nodeList = append(nodeList, cluster.ServerLoadBalancer)
|
||||
}
|
||||
for portmap, filters := range portFilterMap {
|
||||
if len(filters) == 0 && (nodeCount) > 1 {
|
||||
|
@ -103,7 +103,7 @@ func PrintClusters(clusters []*k3d.Cluster, flags clusterFlags) {
|
||||
defer tabwriter.Flush()
|
||||
|
||||
if !flags.noHeader {
|
||||
headers := []string{"NAME", "MASTERS", "WORKERS"} // TODO: getCluster: add status column
|
||||
headers := []string{"NAME", "SERVERS", "AGENTS"} // TODO: getCluster: add status column
|
||||
if flags.token {
|
||||
headers = append(headers, "TOKEN")
|
||||
}
|
||||
@ -116,13 +116,13 @@ func PrintClusters(clusters []*k3d.Cluster, flags clusterFlags) {
|
||||
k3cluster.SortClusters(clusters)
|
||||
|
||||
for _, cluster := range clusters {
|
||||
masterCount := cluster.MasterCount()
|
||||
workerCount := cluster.WorkerCount()
|
||||
serverCount := cluster.ServerCount()
|
||||
agentCount := cluster.AgentCount()
|
||||
|
||||
if flags.token {
|
||||
fmt.Fprintf(tabwriter, "%s\t%d\t%d\t%s\n", cluster.Name, masterCount, workerCount, cluster.Token)
|
||||
fmt.Fprintf(tabwriter, "%s\t%d\t%d\t%s\n", cluster.Name, serverCount, agentCount, cluster.Token)
|
||||
} else {
|
||||
fmt.Fprintf(tabwriter, "%s\t%d\t%d\n", cluster.Name, masterCount, workerCount)
|
||||
fmt.Fprintf(tabwriter, "%s\t%d\t%d\n", cluster.Name, serverCount, agentCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ func NewCmdClusterStart() *cobra.Command {
|
||||
|
||||
// add flags
|
||||
cmd.Flags().BoolP("all", "a", false, "Start all existing clusters")
|
||||
cmd.Flags().BoolVar(&startClusterOpts.WaitForMaster, "wait", false, "Wait for the master(s) (and loadbalancer) to be ready before returning.")
|
||||
cmd.Flags().BoolVar(&startClusterOpts.WaitForServer, "wait", false, "Wait for the server(s) (and loadbalancer) to be ready before returning.")
|
||||
cmd.Flags().DurationVar(&startClusterOpts.Timeout, "timeout", 0*time.Second, "Maximum waiting time for '--wait' before canceling/returning.")
|
||||
|
||||
// add subcommands
|
||||
|
@ -57,7 +57,7 @@ func NewCmdNodeCreate() *cobra.Command {
|
||||
|
||||
// add flags
|
||||
cmd.Flags().Int("replicas", 1, "Number of replicas of this node specification.")
|
||||
cmd.Flags().String("role", string(k3d.WorkerRole), "Specify node role [master, worker]")
|
||||
cmd.Flags().String("role", string(k3d.AgentRole), "Specify node role [server, agent]")
|
||||
if err := cmd.RegisterFlagCompletionFunc("role", util.ValidArgsNodeRoles); err != nil {
|
||||
log.Fatalln("Failed to register flag completion for '--role'", err)
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ func NewCmdNodeStart() *cobra.Command {
|
||||
|
||||
// parseStartNodeCmd parses the command input into variables required to start a node
|
||||
func parseStartNodeCmd(cmd *cobra.Command, args []string) *k3d.Node {
|
||||
// node name // TODO: startNode: allow node filters, e.g. `k3d start nodes mycluster@worker` to start all worker nodes of cluster 'mycluster'
|
||||
// node name // TODO: startNode: allow node filters, e.g. `k3d node start mycluster@agent` to start all agent nodes of cluster 'mycluster'
|
||||
if len(args) == 0 || len(args[0]) == 0 {
|
||||
log.Fatalln("No node name given")
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ func NewCmdNodeStop() *cobra.Command {
|
||||
|
||||
// parseStopNodeCmd parses the command input into variables required to stop a node
|
||||
func parseStopNodeCmd(cmd *cobra.Command, args []string) *k3d.Node {
|
||||
// node name // TODO: allow node filters, e.g. `k3d stop nodes mycluster@worker` to stop all worker nodes of cluster 'mycluster'
|
||||
// node name // TODO: allow node filters, e.g. `k3d node stop mycluster@agent` to stop all agent nodes of cluster 'mycluster'
|
||||
if len(args) == 0 || len(args[0]) == 0 {
|
||||
log.Fatalln("No node name given")
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ nodeLoop:
|
||||
func ValidArgsNodeRoles(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
|
||||
var completions []string
|
||||
roles := []string{string(k3d.MasterRole), string(k3d.WorkerRole)}
|
||||
roles := []string{string(k3d.ServerRole), string(k3d.AgentRole)}
|
||||
|
||||
for _, role := range roles {
|
||||
if strings.HasPrefix(role, toComplete) {
|
||||
|
@ -34,7 +34,7 @@ import (
|
||||
)
|
||||
|
||||
// Regexp pattern to match node filters
|
||||
var filterRegexp = regexp.MustCompile(`^(?P<group>master|worker|loadbalancer|all)(?P<subsetSpec>\[(?P<subset>(?P<subsetList>(\d+,?)+)|(?P<subsetRange>\d*:\d*)|(?P<subsetWildcard>\*))\])?$`)
|
||||
var filterRegexp = regexp.MustCompile(`^(?P<group>server|agent|loadbalancer|all)(?P<subsetSpec>\[(?P<subset>(?P<subsetList>(\d+,?)+)|(?P<subsetRange>\d*:\d*)|(?P<subsetWildcard>\*))\])?$`)
|
||||
|
||||
// SplitFiltersFromFlag separates a flag's value from the node filter, if there is one
|
||||
func SplitFiltersFromFlag(flag string) (string, []string, error) {
|
||||
@ -72,16 +72,16 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
|
||||
}
|
||||
|
||||
// map roles to subsets
|
||||
masterNodes := []*k3d.Node{}
|
||||
workerNodes := []*k3d.Node{}
|
||||
var masterlb *k3d.Node
|
||||
serverNodes := []*k3d.Node{}
|
||||
agentNodes := []*k3d.Node{}
|
||||
var serverlb *k3d.Node
|
||||
for _, node := range nodes {
|
||||
if node.Role == k3d.MasterRole {
|
||||
masterNodes = append(masterNodes, node)
|
||||
} else if node.Role == k3d.WorkerRole {
|
||||
workerNodes = append(workerNodes, node)
|
||||
if node.Role == k3d.ServerRole {
|
||||
serverNodes = append(serverNodes, node)
|
||||
} else if node.Role == k3d.AgentRole {
|
||||
agentNodes = append(agentNodes, node)
|
||||
} else if node.Role == k3d.LoadBalancerRole {
|
||||
masterlb = node
|
||||
serverlb = node
|
||||
}
|
||||
}
|
||||
|
||||
@ -110,12 +110,12 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
|
||||
|
||||
// Choose the group of nodes to operate on
|
||||
groupNodes := []*k3d.Node{}
|
||||
if submatches["group"] == string(k3d.MasterRole) {
|
||||
groupNodes = masterNodes
|
||||
} else if submatches["group"] == string(k3d.WorkerRole) {
|
||||
groupNodes = workerNodes
|
||||
if submatches["group"] == string(k3d.ServerRole) {
|
||||
groupNodes = serverNodes
|
||||
} else if submatches["group"] == string(k3d.AgentRole) {
|
||||
groupNodes = agentNodes
|
||||
} else if submatches["group"] == string(k3d.LoadBalancerRole) {
|
||||
filteredNodes = append(filteredNodes, masterlb)
|
||||
filteredNodes = append(filteredNodes, serverlb)
|
||||
return filteredNodes, nil // early exit if filtered group is the loadbalancer
|
||||
}
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
## Issues with ZFS
|
||||
|
||||
- k3s currently has [no support for ZFS](ttps://github.com/rancher/k3s/issues/66) and thus, creating multi-master setups (e.g. `k3d cluster create multimaster --masters 3`) fails, because the initializing master node (server flag `--cluster-init`) errors out with the following log:
|
||||
- k3s currently has [no support for ZFS](ttps://github.com/rancher/k3s/issues/66) and thus, creating multi-server setups (e.g. `k3d cluster create multiserver --servers 3`) fails, because the initializing server node (server flag `--cluster-init`) errors out with the following log:
|
||||
```bash
|
||||
starting kubernetes: preparing server: start cluster and https: raft_init(): io: create I/O capabilities probe file: posix_allocate: operation not supported on socket
|
||||
```
|
||||
@ -25,10 +25,10 @@
|
||||
- clean up or expand docker root filesystem
|
||||
- change the kubelet's eviction thresholds upon cluster creation: `k3d cluster create --k3s-agent-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%' --k3s-agent-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%'`
|
||||
|
||||
## Restarting a multi-master cluster or the initializing master node fails
|
||||
## Restarting a multi-server cluster or the initializing server node fails
|
||||
|
||||
- What you do: You create a cluster with more than one master node and later, you either stop `master-0` or stop/start the whole cluster
|
||||
- What you do: You create a cluster with more than one server node and later, you either stop `server-0` or stop/start the whole cluster
|
||||
- What fails: After the restart, you cannot connect to the cluster anymore and `kubectl` will give you a lot of errors
|
||||
- What causes this issue: it's a [known issue with dqlite in `k3s`](https://github.com/rancher/k3s/issues/1391) which doesn't allow the initializing master node to go down
|
||||
- What causes this issue: it's a [known issue with dqlite in `k3s`](https://github.com/rancher/k3s/issues/1391) which doesn't allow the initializing server node to go down
|
||||
- What's the solution: Hopefully, this will be solved by the planned [replacement of dqlite with embedded etcd in k3s](https://github.com/rancher/k3s/pull/1770)
|
||||
- Related issues: [#262](https://github.com/rancher/k3d/issues/262)
|
||||
|
@ -47,7 +47,7 @@ You have several options there:
|
||||
|
||||
## Quick Start
|
||||
|
||||
Create a cluster named `mycluster` with just a single master node:
|
||||
Create a cluster named `mycluster` with just a single server node:
|
||||
|
||||
```bash
|
||||
k3d cluster create mycluster
|
||||
|
@ -1,9 +1,9 @@
|
||||
# Defaults
|
||||
|
||||
- multiple master nodes
|
||||
- by default, when `--master` > 1 and no `--datastore-x` option is set, the first master node (master-0) will be the initializing master node
|
||||
- the initializing master node will have the `--cluster-init` flag appended
|
||||
- all other master nodes will refer to the initializing master node via `--server https://<init-node>:6443`
|
||||
- multiple server nodes
|
||||
- by default, when `--server` > 1 and no `--datastore-x` option is set, the first server node (server-0) will be the initializing server node
|
||||
- the initializing server node will have the `--cluster-init` flag appended
|
||||
- all other server nodes will refer to the initializing server node via `--server https://<init-node>:6443`
|
||||
- API-Ports
|
||||
- by default, we don't expose any API-Port (no host port mapping)
|
||||
- kubeconfig
|
||||
|
@ -14,8 +14,8 @@ Existing networks won't be managed by k3d together with the cluster lifecycle.
|
||||
### `host` network
|
||||
|
||||
When using the `--network` flag to connect to the host network (i.e. `k3d cluster create --network host`),
|
||||
you won't be able to create more than **one master node**.
|
||||
An edge case would be one master node (with agent disabled) and one worker node.
|
||||
you won't be able to create more than **one server node**.
|
||||
An edge case would be one server node (with agent disabled) and one agent node.
|
||||
|
||||
### `bridge` network
|
||||
|
||||
|
@ -2,5 +2,5 @@ title: Usage
|
||||
arrange:
|
||||
- commands.md
|
||||
- kubeconfig.md
|
||||
- multimaster.md
|
||||
- multiserver.md
|
||||
- guides
|
@ -14,7 +14,7 @@ k3d
|
||||
-i, --image # specify which k3s image should be used for the nodes
|
||||
--k3s-agent-arg # add additional arguments to the k3s agent (see https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/#k3s-agent-cli-help)
|
||||
--k3s-server-arg # add additional arguments to the k3s server (see https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/#k3s-server-cli-help)
|
||||
-m, --masters # specify how many master nodes you want to create
|
||||
-m, --servers # specify how many server nodes you want to create
|
||||
--network # specify a network you want to connect to
|
||||
--no-image-volume # disable the creation of a volume for storing images (used for the 'k3d load image' command)
|
||||
-p, --port # add some more port mappings
|
||||
@ -23,11 +23,11 @@ k3d
|
||||
--update-default-kubeconfig # enable the automated update of the default kubeconfig with the details of the newly created cluster (also sets '--wait=true')
|
||||
--switch-context # (implies --update-default-kubeconfig) automatically sets the current-context of your default kubeconfig to the new cluster's context
|
||||
-v, --volume # specify additional bind-mounts
|
||||
--wait # enable waiting for all master nodes to be ready before returning
|
||||
-w, --workers # specify how many worker nodes you want to create
|
||||
--wait # enable waiting for all server nodes to be ready before returning
|
||||
-a, --agents # specify how many agent nodes you want to create
|
||||
start CLUSTERNAME # start a (stopped) cluster
|
||||
-a, --all # start all clusters
|
||||
--wait # wait for all masters and master-loadbalancer to be up before returning
|
||||
--wait # wait for all servers and server-loadbalancer to be up before returning
|
||||
--timeout # maximum waiting time for '--wait' before canceling/returning
|
||||
stop CLUSTERNAME # stop a cluster
|
||||
-a, --all # stop all clusters
|
||||
|
@ -7,14 +7,14 @@ Therefore, we have to create the cluster in a way, that the internal port 80 (wh
|
||||
|
||||
1. Create a cluster, mapping the ingress port 80 to localhost:8081
|
||||
|
||||
`#!bash k3d cluster create --api-port 6550 -p 8081:80@loadbalancer --workers 2`
|
||||
`#!bash k3d cluster create --api-port 6550 -p 8081:80@loadbalancer --agents 2`
|
||||
|
||||
!!! info "Good to know"
|
||||
- `--api-port 6550` is not required for the example to work. It's used to have `k3s`'s API-Server listening on port 6550 with that port mapped to the host system.
|
||||
- the port-mapping construct `8081:80@loadbalancer` means
|
||||
- map port `8081` from the host to port `80` on the container which matches the nodefilter `loadbalancer`
|
||||
- the `loadbalancer` nodefilter matches only the `masterlb` that's deployed in front of a cluster's master nodes
|
||||
- all ports exposed on the `masterlb` will be proxied to the same ports on all master nodes in the cluster
|
||||
- the `loadbalancer` nodefilter matches only the `serverlb` that's deployed in front of a cluster's server nodes
|
||||
- all ports exposed on the `serverlb` will be proxied to the same ports on all server nodes in the cluster
|
||||
|
||||
2. Get the kubeconfig file
|
||||
|
||||
@ -54,9 +54,9 @@ Therefore, we have to create the cluster in a way, that the internal port 80 (wh
|
||||
|
||||
## 2. via NodePort
|
||||
|
||||
1. Create a cluster, mapping the port 30080 from worker-0 to localhost:8082
|
||||
1. Create a cluster, mapping the port 30080 from agent-0 to localhost:8082
|
||||
|
||||
`#!bash k3d cluster create mycluster -p 8082:30080@worker[0] --workers 2`
|
||||
`#!bash k3d cluster create mycluster -p 8082:30080@agent[0] --agents 2`
|
||||
|
||||
- Note: Kubernetes' default NodePort range is [`30000-32767`](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport)
|
||||
|
||||
|
@ -1,25 +1,25 @@
|
||||
# Creating multi-master clusters
|
||||
# Creating multi-server clusters
|
||||
|
||||
!!! info "Important note"
|
||||
For the best results (and less unexpected issues), choose 1, 3, 5, ... master nodes.
|
||||
For the best results (and less unexpected issues), choose 1, 3, 5, ... server nodes.
|
||||
|
||||
## Embedded dqlite
|
||||
|
||||
Create a cluster with 3 master nodes using k3s' embedded dqlite database.
|
||||
The first master to be created will use the `--cluster-init` flag and k3d will wait for it to be up and running before creating (and connecting) the other master nodes.
|
||||
Create a cluster with 3 server nodes using k3s' embedded dqlite database.
|
||||
The first server to be created will use the `--cluster-init` flag and k3d will wait for it to be up and running before creating (and connecting) the other server nodes.
|
||||
|
||||
```bash
|
||||
k3d cluster create multimaster --masters 3
|
||||
k3d cluster create multiserver --servers 3
|
||||
```
|
||||
|
||||
## Adding master nodes to a running cluster
|
||||
## Adding server nodes to a running cluster
|
||||
|
||||
In theory (and also in practice in most cases), this is as easy as executing the following command:
|
||||
|
||||
```bash
|
||||
k3d node create newmaster --cluster multimaster --role master
|
||||
k3d node create newserver --cluster multiserver --role server
|
||||
```
|
||||
|
||||
!!! important "There's a trap!"
|
||||
If your cluster was initially created with only a single master node, then this will fail.
|
||||
That's because the initial master node was not started with the `--cluster-init` flag and thus is not using the dqlite backend.
|
||||
If your cluster was initially created with only a single server node, then this will fail.
|
||||
That's because the initial server node was not started with the `--cluster-init` flag and thus is not using the dqlite backend.
|
||||
|
@ -69,7 +69,7 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
if cluster.Network.Name == "host" {
|
||||
useHostNet = true
|
||||
if len(cluster.Nodes) > 1 {
|
||||
return fmt.Errorf("Only one master node supported when using host network")
|
||||
return fmt.Errorf("Only one server node supported when using host network")
|
||||
}
|
||||
}
|
||||
|
||||
@ -119,9 +119,9 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
* Nodes
|
||||
*/
|
||||
|
||||
// Worker defaults (per cluster)
|
||||
// connection url is always the name of the first master node (index 0)
|
||||
connectionURL := fmt.Sprintf("https://%s:%s", generateNodeName(cluster.Name, k3d.MasterRole, 0), k3d.DefaultAPIPort)
|
||||
// agent defaults (per cluster)
|
||||
// connection url is always the name of the first server node (index 0)
|
||||
connectionURL := fmt.Sprintf("https://%s:%s", generateNodeName(cluster.Name, k3d.ServerRole, 0), k3d.DefaultAPIPort)
|
||||
|
||||
nodeSetup := func(node *k3d.Node, suffix int) error {
|
||||
// cluster specific settings
|
||||
@ -139,16 +139,16 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
}
|
||||
|
||||
// node role specific settings
|
||||
if node.Role == k3d.MasterRole {
|
||||
if node.Role == k3d.ServerRole {
|
||||
|
||||
node.MasterOpts.ExposeAPI = cluster.ExposeAPI
|
||||
node.ServerOpts.ExposeAPI = cluster.ExposeAPI
|
||||
|
||||
// the cluster has an init master node, but its not this one, so connect it to the init node
|
||||
if cluster.InitNode != nil && !node.MasterOpts.IsInit {
|
||||
// the cluster has an init server node, but its not this one, so connect it to the init node
|
||||
if cluster.InitNode != nil && !node.ServerOpts.IsInit {
|
||||
node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", connectionURL))
|
||||
}
|
||||
|
||||
} else if node.Role == k3d.WorkerRole {
|
||||
} else if node.Role == k3d.AgentRole {
|
||||
node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", connectionURL))
|
||||
}
|
||||
|
||||
@ -167,41 +167,41 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
}
|
||||
|
||||
// used for node suffices
|
||||
masterCount := 0
|
||||
workerCount := 0
|
||||
serverCount := 0
|
||||
agentCount := 0
|
||||
suffix := 0
|
||||
|
||||
// create init node first
|
||||
if cluster.InitNode != nil {
|
||||
log.Infoln("Creating initializing master node")
|
||||
log.Infoln("Creating initializing server node")
|
||||
cluster.InitNode.Args = append(cluster.InitNode.Args, "--cluster-init")
|
||||
|
||||
// in case the LoadBalancer was disabled, expose the API Port on the initializing master node
|
||||
// in case the LoadBalancer was disabled, expose the API Port on the initializing server node
|
||||
if cluster.CreateClusterOpts.DisableLoadBalancer {
|
||||
cluster.InitNode.Ports = append(cluster.InitNode.Ports, fmt.Sprintf("%s:%s:%s/tcp", cluster.ExposeAPI.Host, cluster.ExposeAPI.Port, k3d.DefaultAPIPort))
|
||||
}
|
||||
|
||||
if err := nodeSetup(cluster.InitNode, masterCount); err != nil {
|
||||
if err := nodeSetup(cluster.InitNode, serverCount); err != nil {
|
||||
return err
|
||||
}
|
||||
masterCount++
|
||||
serverCount++
|
||||
|
||||
// wait for the initnode to come up before doing anything else
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Errorln("Failed to bring up initializing master node in time")
|
||||
log.Errorln("Failed to bring up initializing server node in time")
|
||||
return fmt.Errorf(">>> %w", ctx.Err())
|
||||
default:
|
||||
}
|
||||
log.Debugln("Waiting for initializing master node...")
|
||||
log.Debugln("Waiting for initializing server node...")
|
||||
logreader, err := runtime.GetNodeLogs(ctx, cluster.InitNode, time.Time{})
|
||||
if err != nil {
|
||||
if logreader != nil {
|
||||
logreader.Close()
|
||||
}
|
||||
log.Errorln(err)
|
||||
log.Errorln("Failed to get logs from the initializig master node.. waiting for 3 seconds instead")
|
||||
log.Errorln("Failed to get logs from the initializig server node.. waiting for 3 seconds instead")
|
||||
time.Sleep(3 * time.Second)
|
||||
break
|
||||
}
|
||||
@ -210,7 +210,7 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
nRead, _ := buf.ReadFrom(logreader)
|
||||
logreader.Close()
|
||||
if nRead > 0 && strings.Contains(buf.String(), "Running kubelet") {
|
||||
log.Debugln("Initializing master node is up... continuing")
|
||||
log.Debugln("Initializing server node is up... continuing")
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
@ -218,46 +218,46 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
|
||||
}
|
||||
|
||||
// vars to support waiting for master nodes to be ready
|
||||
waitForMasterWaitgroup, ctx := errgroup.WithContext(ctx)
|
||||
// vars to support waiting for server nodes to be ready
|
||||
waitForServerWaitgroup, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
// create all other nodes, but skip the init node
|
||||
for _, node := range cluster.Nodes {
|
||||
if node.Role == k3d.MasterRole {
|
||||
if node.Role == k3d.ServerRole {
|
||||
|
||||
// skip the init node here
|
||||
if node == cluster.InitNode {
|
||||
continue
|
||||
} else if masterCount == 0 && cluster.CreateClusterOpts.DisableLoadBalancer {
|
||||
// if this is the first master node and the master loadbalancer is disabled, expose the API Port on this master node
|
||||
} else if serverCount == 0 && cluster.CreateClusterOpts.DisableLoadBalancer {
|
||||
// if this is the first server node and the server loadbalancer is disabled, expose the API Port on this server node
|
||||
node.Ports = append(node.Ports, fmt.Sprintf("%s:%s:%s/tcp", cluster.ExposeAPI.Host, cluster.ExposeAPI.Port, k3d.DefaultAPIPort))
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second) // FIXME: arbitrary wait for one second to avoid race conditions of masters registering
|
||||
time.Sleep(1 * time.Second) // FIXME: arbitrary wait for one second to avoid race conditions of servers registering
|
||||
|
||||
// name suffix
|
||||
suffix = masterCount
|
||||
masterCount++
|
||||
suffix = serverCount
|
||||
serverCount++
|
||||
|
||||
} else if node.Role == k3d.WorkerRole {
|
||||
} else if node.Role == k3d.AgentRole {
|
||||
// name suffix
|
||||
suffix = workerCount
|
||||
workerCount++
|
||||
suffix = agentCount
|
||||
agentCount++
|
||||
}
|
||||
if node.Role == k3d.MasterRole || node.Role == k3d.WorkerRole {
|
||||
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
|
||||
if err := nodeSetup(node, suffix); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// asynchronously wait for this master node to be ready (by checking the logs for a specific log mesage)
|
||||
if node.Role == k3d.MasterRole && cluster.CreateClusterOpts.WaitForMaster {
|
||||
masterNode := node
|
||||
waitForMasterWaitgroup.Go(func() error {
|
||||
// asynchronously wait for this server node to be ready (by checking the logs for a specific log mesage)
|
||||
if node.Role == k3d.ServerRole && cluster.CreateClusterOpts.WaitForServer {
|
||||
serverNode := node
|
||||
waitForServerWaitgroup.Go(func() error {
|
||||
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
|
||||
// ... by scanning for this line in logs and restarting the container in case it appears
|
||||
log.Debugf("Starting to wait for master node '%s'", masterNode.Name)
|
||||
return NodeWaitForLogMessage(ctx, runtime, masterNode, k3d.ReadyLogMessageByRole[k3d.MasterRole], time.Time{})
|
||||
log.Debugf("Starting to wait for server node '%s'", serverNode.Name)
|
||||
return NodeWaitForLogMessage(ctx, runtime, serverNode, k3d.ReadyLogMessageByRole[k3d.ServerRole], time.Time{})
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -265,13 +265,13 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
/*
|
||||
* Auxiliary Containers
|
||||
*/
|
||||
// *** MasterLoadBalancer ***
|
||||
// *** ServerLoadBalancer ***
|
||||
if !cluster.CreateClusterOpts.DisableLoadBalancer {
|
||||
if !useHostNet { // masterlb not supported in hostnetwork mode due to port collisions with master node
|
||||
// Generate a comma-separated list of master/server names to pass to the LB container
|
||||
if !useHostNet { // serverlb not supported in hostnetwork mode due to port collisions with server node
|
||||
// Generate a comma-separated list of server/server names to pass to the LB container
|
||||
servers := ""
|
||||
for _, node := range cluster.Nodes {
|
||||
if node.Role == k3d.MasterRole {
|
||||
if node.Role == k3d.ServerRole {
|
||||
log.Debugf("Node NAME: %s", node.Name)
|
||||
if servers == "" {
|
||||
servers = node.Name
|
||||
@ -283,16 +283,16 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
|
||||
// generate comma-separated list of extra ports to forward
|
||||
ports := k3d.DefaultAPIPort
|
||||
for _, portString := range cluster.MasterLoadBalancer.Ports {
|
||||
for _, portString := range cluster.ServerLoadBalancer.Ports {
|
||||
split := strings.Split(portString, ":")
|
||||
ports += "," + split[len(split)-1]
|
||||
}
|
||||
|
||||
// Create LB as a modified node with loadbalancerRole
|
||||
lbNode := &k3d.Node{
|
||||
Name: fmt.Sprintf("%s-%s-masterlb", k3d.DefaultObjectNamePrefix, cluster.Name),
|
||||
Name: fmt.Sprintf("%s-%s-serverlb", k3d.DefaultObjectNamePrefix, cluster.Name),
|
||||
Image: fmt.Sprintf("%s:%s", k3d.DefaultLBImageRepo, version.GetHelperImageVersion()),
|
||||
Ports: append(cluster.MasterLoadBalancer.Ports, fmt.Sprintf("%s:%s:%s/tcp", cluster.ExposeAPI.Host, cluster.ExposeAPI.Port, k3d.DefaultAPIPort)),
|
||||
Ports: append(cluster.ServerLoadBalancer.Ports, fmt.Sprintf("%s:%s:%s/tcp", cluster.ExposeAPI.Host, cluster.ExposeAPI.Port, k3d.DefaultAPIPort)),
|
||||
Env: []string{
|
||||
fmt.Sprintf("SERVERS=%s", servers),
|
||||
fmt.Sprintf("PORTS=%s", ports),
|
||||
@ -307,8 +307,8 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
log.Errorln("Failed to create loadbalancer")
|
||||
return err
|
||||
}
|
||||
if cluster.CreateClusterOpts.WaitForMaster {
|
||||
waitForMasterWaitgroup.Go(func() error {
|
||||
if cluster.CreateClusterOpts.WaitForServer {
|
||||
waitForServerWaitgroup.Go(func() error {
|
||||
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
|
||||
// ... by scanning for this line in logs and restarting the container in case it appears
|
||||
log.Debugf("Starting to wait for loadbalancer node '%s'", lbNode.Name)
|
||||
@ -316,12 +316,12 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
||||
})
|
||||
}
|
||||
} else {
|
||||
log.Infoln("Hostnetwork selected -> Skipping creation of Master LoadBalancer")
|
||||
log.Infoln("Hostnetwork selected -> Skipping creation of server LoadBalancer")
|
||||
}
|
||||
}
|
||||
|
||||
if err := waitForMasterWaitgroup.Wait(); err != nil {
|
||||
log.Errorln("Failed to bring up all master nodes (and loadbalancer) in time. Check the logs:")
|
||||
if err := waitForServerWaitgroup.Wait(); err != nil {
|
||||
log.Errorln("Failed to bring up all server nodes (and loadbalancer) in time. Check the logs:")
|
||||
log.Errorf(">>> %+v", err)
|
||||
return fmt.Errorf("Failed to bring up cluster")
|
||||
}
|
||||
@ -516,16 +516,16 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
// vars to support waiting for master nodes to be ready
|
||||
waitForMasterWaitgroup, ctx := errgroup.WithContext(ctx)
|
||||
// vars to support waiting for server nodes to be ready
|
||||
waitForServerWaitgroup, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
failed := 0
|
||||
var masterlb *k3d.Node
|
||||
var serverlb *k3d.Node
|
||||
for _, node := range cluster.Nodes {
|
||||
|
||||
// skip the LB, because we want to start it last
|
||||
if node.Role == k3d.LoadBalancerRole {
|
||||
masterlb = node
|
||||
serverlb = node
|
||||
continue
|
||||
}
|
||||
|
||||
@ -536,34 +536,34 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
|
||||
continue
|
||||
}
|
||||
|
||||
// asynchronously wait for this master node to be ready (by checking the logs for a specific log mesage)
|
||||
if node.Role == k3d.MasterRole && startClusterOpts.WaitForMaster {
|
||||
masterNode := node
|
||||
waitForMasterWaitgroup.Go(func() error {
|
||||
// asynchronously wait for this server node to be ready (by checking the logs for a specific log mesage)
|
||||
if node.Role == k3d.ServerRole && startClusterOpts.WaitForServer {
|
||||
serverNode := node
|
||||
waitForServerWaitgroup.Go(func() error {
|
||||
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
|
||||
// ... by scanning for this line in logs and restarting the container in case it appears
|
||||
log.Debugf("Starting to wait for master node '%s'", masterNode.Name)
|
||||
return NodeWaitForLogMessage(ctx, runtime, masterNode, k3d.ReadyLogMessageByRole[k3d.MasterRole], start)
|
||||
log.Debugf("Starting to wait for server node '%s'", serverNode.Name)
|
||||
return NodeWaitForLogMessage(ctx, runtime, serverNode, k3d.ReadyLogMessageByRole[k3d.ServerRole], start)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// start masterlb
|
||||
if masterlb != nil {
|
||||
log.Debugln("Starting masterlb...")
|
||||
if err := runtime.StartNode(ctx, masterlb); err != nil { // FIXME: we could run into a nullpointer exception here
|
||||
log.Warningf("Failed to start masterlb '%s': Try to start it manually", masterlb.Name)
|
||||
// start serverlb
|
||||
if serverlb != nil {
|
||||
log.Debugln("Starting serverlb...")
|
||||
if err := runtime.StartNode(ctx, serverlb); err != nil { // FIXME: we could run into a nullpointer exception here
|
||||
log.Warningf("Failed to start serverlb '%s': Try to start it manually", serverlb.Name)
|
||||
failed++
|
||||
}
|
||||
waitForMasterWaitgroup.Go(func() error {
|
||||
waitForServerWaitgroup.Go(func() error {
|
||||
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
|
||||
// ... by scanning for this line in logs and restarting the container in case it appears
|
||||
log.Debugf("Starting to wait for loadbalancer node '%s'", masterlb.Name)
|
||||
return NodeWaitForLogMessage(ctx, runtime, masterlb, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], start)
|
||||
log.Debugf("Starting to wait for loadbalancer node '%s'", serverlb.Name)
|
||||
return NodeWaitForLogMessage(ctx, runtime, serverlb, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], start)
|
||||
})
|
||||
}
|
||||
|
||||
if err := waitForMasterWaitgroup.Wait(); err != nil {
|
||||
if err := waitForServerWaitgroup.Wait(); err != nil {
|
||||
log.Errorln("Failed to bring up all nodes in time. Check the logs:")
|
||||
log.Errorln(">>> ", err)
|
||||
return fmt.Errorf("Failed to bring up cluster")
|
||||
|
@ -45,7 +45,7 @@ type WriteKubeConfigOptions struct {
|
||||
}
|
||||
|
||||
// KubeconfigGetWrite ...
|
||||
// 1. fetches the KubeConfig from the first master node retrieved for a given cluster
|
||||
// 1. fetches the KubeConfig from the first server node retrieved for a given cluster
|
||||
// 2. modifies it by updating some fields with cluster-specific information
|
||||
// 3. writes it to the specified output
|
||||
func KubeconfigGetWrite(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster, output string, writeKubeConfigOptions *WriteKubeConfigOptions) (string, error) {
|
||||
@ -107,45 +107,45 @@ func KubeconfigGetWrite(ctx context.Context, runtime runtimes.Runtime, cluster *
|
||||
|
||||
}
|
||||
|
||||
// KubeconfigGet grabs the kubeconfig file from /output from a master node container,
|
||||
// KubeconfigGet grabs the kubeconfig file from /output from a server node container,
|
||||
// modifies it by updating some fields with cluster-specific information
|
||||
// and returns a Config object for further processing
|
||||
func KubeconfigGet(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) (*clientcmdapi.Config, error) {
|
||||
// get all master nodes for the selected cluster
|
||||
// TODO: getKubeconfig: we should make sure, that the master node we're trying to fetch from is actually running
|
||||
masterNodes, err := runtime.GetNodesByLabel(ctx, map[string]string{k3d.LabelClusterName: cluster.Name, k3d.LabelRole: string(k3d.MasterRole)})
|
||||
// get all server nodes for the selected cluster
|
||||
// TODO: getKubeconfig: we should make sure, that the server node we're trying to fetch from is actually running
|
||||
serverNodes, err := runtime.GetNodesByLabel(ctx, map[string]string{k3d.LabelClusterName: cluster.Name, k3d.LabelRole: string(k3d.ServerRole)})
|
||||
if err != nil {
|
||||
log.Errorln("Failed to get master nodes")
|
||||
log.Errorln("Failed to get server nodes")
|
||||
return nil, err
|
||||
}
|
||||
if len(masterNodes) == 0 {
|
||||
return nil, fmt.Errorf("Didn't find any master node")
|
||||
if len(serverNodes) == 0 {
|
||||
return nil, fmt.Errorf("Didn't find any server node")
|
||||
}
|
||||
|
||||
// prefer a master node, which actually has the port exposed
|
||||
var chosenMaster *k3d.Node
|
||||
chosenMaster = nil
|
||||
// prefer a server node, which actually has the port exposed
|
||||
var chosenServer *k3d.Node
|
||||
chosenServer = nil
|
||||
APIPort := k3d.DefaultAPIPort
|
||||
APIHost := k3d.DefaultAPIHost
|
||||
|
||||
for _, master := range masterNodes {
|
||||
if _, ok := master.Labels[k3d.LabelMasterAPIPort]; ok {
|
||||
chosenMaster = master
|
||||
APIPort = master.Labels[k3d.LabelMasterAPIPort]
|
||||
if _, ok := master.Labels[k3d.LabelMasterAPIHost]; ok {
|
||||
APIHost = master.Labels[k3d.LabelMasterAPIHost]
|
||||
for _, server := range serverNodes {
|
||||
if _, ok := server.Labels[k3d.LabelServerAPIPort]; ok {
|
||||
chosenServer = server
|
||||
APIPort = server.Labels[k3d.LabelServerAPIPort]
|
||||
if _, ok := server.Labels[k3d.LabelServerAPIHost]; ok {
|
||||
APIHost = server.Labels[k3d.LabelServerAPIHost]
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if chosenMaster == nil {
|
||||
chosenMaster = masterNodes[0]
|
||||
if chosenServer == nil {
|
||||
chosenServer = serverNodes[0]
|
||||
}
|
||||
// get the kubeconfig from the first master node
|
||||
reader, err := runtime.GetKubeconfig(ctx, chosenMaster)
|
||||
// get the kubeconfig from the first server node
|
||||
reader, err := runtime.GetKubeconfig(ctx, chosenServer)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get kubeconfig from node '%s'", chosenMaster.Name)
|
||||
log.Errorf("Failed to get kubeconfig from node '%s'", chosenServer.Name)
|
||||
return nil, err
|
||||
}
|
||||
defer reader.Close()
|
||||
|
@ -43,23 +43,23 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu
|
||||
}
|
||||
|
||||
// find the LoadBalancer for the target cluster
|
||||
masterNodesList := []string{}
|
||||
serverNodesList := []string{}
|
||||
var loadbalancer *k3d.Node
|
||||
for _, node := range cluster.Nodes {
|
||||
if node.Role == k3d.LoadBalancerRole { // get the loadbalancer we want to update
|
||||
loadbalancer = node
|
||||
} else if node.Role == k3d.MasterRole { // create a list of master nodes
|
||||
masterNodesList = append(masterNodesList, node.Name)
|
||||
} else if node.Role == k3d.ServerRole { // create a list of server nodes
|
||||
serverNodesList = append(serverNodesList, node.Name)
|
||||
}
|
||||
}
|
||||
masterNodes := strings.Join(masterNodesList, ",")
|
||||
serverNodes := strings.Join(serverNodesList, ",")
|
||||
if loadbalancer == nil {
|
||||
return fmt.Errorf("Failed to find loadbalancer for cluster '%s'", cluster.Name)
|
||||
}
|
||||
|
||||
log.Debugf("Servers as passed to masterlb: '%s'", masterNodes)
|
||||
log.Debugf("Servers as passed to serverlb: '%s'", serverNodes)
|
||||
|
||||
command := fmt.Sprintf("SERVERS=%s %s", masterNodes, "confd -onetime -backend env && nginx -s reload")
|
||||
command := fmt.Sprintf("SERVERS=%s %s", serverNodes, "confd -onetime -backend env && nginx -s reload")
|
||||
if err := runtime.ExecInNode(ctx, loadbalancer, []string{"sh", "-c", command}); err != nil {
|
||||
if strings.Contains(err.Error(), "host not found in upstream") {
|
||||
log.Warnf("Loadbalancer configuration updated, but one or more k3d nodes seem to be down, check the logs:\n%s", err.Error())
|
||||
|
@ -109,17 +109,17 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
||||
}
|
||||
}
|
||||
|
||||
if node.Role == k3d.MasterRole {
|
||||
for _, forbiddenCmd := range k3d.DoNotCopyMasterFlags {
|
||||
if node.Role == k3d.ServerRole {
|
||||
for _, forbiddenCmd := range k3d.DoNotCopyServerFlags {
|
||||
for i, cmd := range node.Cmd {
|
||||
// cut out the '--cluster-init' flag as this should only be done by the initializing master node
|
||||
// cut out the '--cluster-init' flag as this should only be done by the initializing server node
|
||||
if cmd == forbiddenCmd {
|
||||
log.Debugf("Dropping '%s' from node's cmd", forbiddenCmd)
|
||||
node.Cmd = append(node.Cmd[:i], node.Cmd[i+1:]...)
|
||||
}
|
||||
}
|
||||
for i, arg := range node.Args {
|
||||
// cut out the '--cluster-init' flag as this should only be done by the initializing master node
|
||||
// cut out the '--cluster-init' flag as this should only be done by the initializing server node
|
||||
if arg == forbiddenCmd {
|
||||
log.Debugf("Dropping '%s' from node's args", forbiddenCmd)
|
||||
node.Args = append(node.Args[:i], node.Args[i+1:]...)
|
||||
@ -132,8 +132,8 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
||||
return err
|
||||
}
|
||||
|
||||
// if it's a master node, then update the loadbalancer configuration
|
||||
if node.Role == k3d.MasterRole {
|
||||
// if it's a server node, then update the loadbalancer configuration
|
||||
if node.Role == k3d.ServerRole {
|
||||
if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil {
|
||||
log.Errorln("Failed to update cluster loadbalancer")
|
||||
return err
|
||||
@ -231,12 +231,12 @@ func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, c
|
||||
node.Env = append(node.Env, k3d.DefaultNodeEnv...) // append default node env vars
|
||||
|
||||
// specify options depending on node role
|
||||
if node.Role == k3d.WorkerRole { // TODO: check here AND in CLI or only here?
|
||||
if err := patchWorkerSpec(node); err != nil {
|
||||
if node.Role == k3d.AgentRole { // TODO: check here AND in CLI or only here?
|
||||
if err := patchAgentSpec(node); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if node.Role == k3d.MasterRole {
|
||||
if err := patchMasterSpec(node); err != nil {
|
||||
} else if node.Role == k3d.ServerRole {
|
||||
if err := patchServerSpec(node); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -264,8 +264,8 @@ func NodeDelete(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) e
|
||||
return err
|
||||
}
|
||||
|
||||
// if it's a master node, then update the loadbalancer configuration
|
||||
if node.Role == k3d.MasterRole {
|
||||
// if it's a server node, then update the loadbalancer configuration
|
||||
if node.Role == k3d.ServerRole {
|
||||
if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil {
|
||||
log.Errorln("Failed to update cluster loadbalancer")
|
||||
return err
|
||||
@ -275,16 +275,16 @@ func NodeDelete(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) e
|
||||
return nil
|
||||
}
|
||||
|
||||
// patchWorkerSpec adds worker node specific settings to a node
|
||||
func patchWorkerSpec(node *k3d.Node) error {
|
||||
// patchAgentSpec adds agent node specific settings to a node
|
||||
func patchAgentSpec(node *k3d.Node) error {
|
||||
if node.Cmd == nil {
|
||||
node.Cmd = []string{"agent"}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// patchMasterSpec adds worker node specific settings to a node
|
||||
func patchMasterSpec(node *k3d.Node) error {
|
||||
// patchServerSpec adds agent node specific settings to a node
|
||||
func patchServerSpec(node *k3d.Node) error {
|
||||
|
||||
// command / arguments
|
||||
if node.Cmd == nil {
|
||||
@ -292,12 +292,12 @@ func patchMasterSpec(node *k3d.Node) error {
|
||||
}
|
||||
|
||||
// Add labels and TLS SAN for the exposed API
|
||||
// FIXME: For now, the labels concerning the API on the master nodes are only being used for configuring the kubeconfig
|
||||
node.Labels[k3d.LabelMasterAPIHostIP] = node.MasterOpts.ExposeAPI.HostIP // TODO: maybe get docker machine IP here
|
||||
node.Labels[k3d.LabelMasterAPIHost] = node.MasterOpts.ExposeAPI.Host
|
||||
node.Labels[k3d.LabelMasterAPIPort] = node.MasterOpts.ExposeAPI.Port
|
||||
// FIXME: For now, the labels concerning the API on the server nodes are only being used for configuring the kubeconfig
|
||||
node.Labels[k3d.LabelServerAPIHostIP] = node.ServerOpts.ExposeAPI.HostIP // TODO: maybe get docker machine IP here
|
||||
node.Labels[k3d.LabelServerAPIHost] = node.ServerOpts.ExposeAPI.Host
|
||||
node.Labels[k3d.LabelServerAPIPort] = node.ServerOpts.ExposeAPI.Port
|
||||
|
||||
node.Args = append(node.Args, "--tls-san", node.MasterOpts.ExposeAPI.Host) // add TLS SAN for non default host name
|
||||
node.Args = append(node.Args, "--tls-san", node.ServerOpts.ExposeAPI.Host) // add TLS SAN for non default host name
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -147,15 +147,15 @@ func TranslateContainerDetailsToNode(containerDetails types.ContainerJSON) (*k3d
|
||||
}
|
||||
}
|
||||
|
||||
// masterOpts
|
||||
masterOpts := k3d.MasterOpts{IsInit: false}
|
||||
// serverOpts
|
||||
serverOpts := k3d.ServerOpts{IsInit: false}
|
||||
for k, v := range containerDetails.Config.Labels {
|
||||
if k == k3d.LabelMasterAPIHostIP {
|
||||
masterOpts.ExposeAPI.HostIP = v
|
||||
} else if k == k3d.LabelMasterAPIHost {
|
||||
masterOpts.ExposeAPI.Host = v
|
||||
} else if k == k3d.LabelMasterAPIPort {
|
||||
masterOpts.ExposeAPI.Port = v
|
||||
if k == k3d.LabelServerAPIHostIP {
|
||||
serverOpts.ExposeAPI.HostIP = v
|
||||
} else if k == k3d.LabelServerAPIHost {
|
||||
serverOpts.ExposeAPI.Host = v
|
||||
} else if k == k3d.LabelServerAPIPort {
|
||||
serverOpts.ExposeAPI.Port = v
|
||||
}
|
||||
}
|
||||
|
||||
@ -187,8 +187,8 @@ func TranslateContainerDetailsToNode(containerDetails types.ContainerJSON) (*k3d
|
||||
Restart: restart,
|
||||
Labels: labels,
|
||||
Network: clusterNetwork,
|
||||
MasterOpts: masterOpts,
|
||||
WorkerOpts: k3d.WorkerOpts{},
|
||||
ServerOpts: serverOpts,
|
||||
AgentOpts: k3d.AgentOpts{},
|
||||
}
|
||||
return node, nil
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ func TestTranslateNodeToContainer(t *testing.T) {
|
||||
|
||||
inputNode := &k3d.Node{
|
||||
Name: "test",
|
||||
Role: k3d.MasterRole,
|
||||
Role: k3d.ServerRole,
|
||||
Image: "rancher/k3s:v0.9.0",
|
||||
Volumes: []string{"/test:/tmp/test"},
|
||||
Env: []string{"TEST_KEY_1=TEST_VAL_1"},
|
||||
@ -45,7 +45,7 @@ func TestTranslateNodeToContainer(t *testing.T) {
|
||||
Args: []string{"--some-boolflag"},
|
||||
Ports: []string{"0.0.0.0:6443:6443/tcp"},
|
||||
Restart: true,
|
||||
Labels: map[string]string{k3d.LabelRole: string(k3d.MasterRole), "test_key_1": "test_val_1"},
|
||||
Labels: map[string]string{k3d.LabelRole: string(k3d.ServerRole), "test_key_1": "test_val_1"},
|
||||
}
|
||||
|
||||
expectedRepresentation := &NodeInDocker{
|
||||
@ -54,7 +54,7 @@ func TestTranslateNodeToContainer(t *testing.T) {
|
||||
Image: "rancher/k3s:v0.9.0",
|
||||
Env: []string{"TEST_KEY_1=TEST_VAL_1"},
|
||||
Cmd: []string{"server", "--https-listen-port=6443", "--some-boolflag"},
|
||||
Labels: map[string]string{k3d.LabelRole: string(k3d.MasterRole), "test_key_1": "test_val_1"},
|
||||
Labels: map[string]string{k3d.LabelRole: string(k3d.ServerRole), "test_key_1": "test_val_1"},
|
||||
ExposedPorts: nat.PortSet{},
|
||||
},
|
||||
HostConfig: container.HostConfig{
|
||||
|
@ -92,7 +92,7 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime,
|
||||
var imageVolume string
|
||||
var ok bool
|
||||
for _, node := range cluster.Nodes {
|
||||
if node.Role == k3d.MasterRole || node.Role == k3d.WorkerRole {
|
||||
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
|
||||
if imageVolume, ok = node.Labels[k3d.LabelImageVolume]; ok {
|
||||
break
|
||||
}
|
||||
@ -162,8 +162,8 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime,
|
||||
var importWaitgroup sync.WaitGroup
|
||||
for _, tarName := range importTarNames {
|
||||
for _, node := range cluster.Nodes {
|
||||
// only import image in master and worker nodes (i.e. ignoring auxiliary nodes like the master loadbalancer)
|
||||
if node.Role == k3d.MasterRole || node.Role == k3d.WorkerRole {
|
||||
// only import image in server and agent nodes (i.e. ignoring auxiliary nodes like the server loadbalancer)
|
||||
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
|
||||
importWaitgroup.Add(1)
|
||||
go func(node *k3d.Node, wg *sync.WaitGroup, tarPath string) {
|
||||
log.Infof("Importing images from tarball '%s' into node '%s'...", tarPath, node.Name)
|
||||
|
@ -47,10 +47,10 @@ const DefaultToolsImageRepo = "docker.io/rancher/k3d-tools"
|
||||
// DefaultObjectNamePrefix defines the name prefix for every object created by k3d
|
||||
const DefaultObjectNamePrefix = "k3d"
|
||||
|
||||
// ReadyLogMessageByRole defines the log messages we wait for until a master node is considered ready
|
||||
// ReadyLogMessageByRole defines the log messages we wait for until a server node is considered ready
|
||||
var ReadyLogMessageByRole = map[Role]string{
|
||||
MasterRole: "Wrote kubeconfig",
|
||||
WorkerRole: "Successfully registered node",
|
||||
ServerRole: "Wrote kubeconfig",
|
||||
AgentRole: "Successfully registered node",
|
||||
LoadBalancerRole: "start worker processes",
|
||||
}
|
||||
|
||||
@ -59,16 +59,16 @@ type Role string
|
||||
|
||||
// existing k3d node roles
|
||||
const (
|
||||
MasterRole Role = "master"
|
||||
WorkerRole Role = "worker"
|
||||
ServerRole Role = "server"
|
||||
AgentRole Role = "agent"
|
||||
NoRole Role = "noRole"
|
||||
LoadBalancerRole Role = "loadbalancer"
|
||||
)
|
||||
|
||||
// NodeRoles defines the roles available for nodes
|
||||
var NodeRoles = map[string]Role{
|
||||
string(MasterRole): MasterRole,
|
||||
string(WorkerRole): WorkerRole,
|
||||
string(ServerRole): ServerRole,
|
||||
string(AgentRole): AgentRole,
|
||||
string(LoadBalancerRole): LoadBalancerRole,
|
||||
}
|
||||
|
||||
@ -86,15 +86,15 @@ const (
|
||||
LabelNetworkExternal string = "k3d.cluster.network.external"
|
||||
LabelNetwork string = "k3d.cluster.network"
|
||||
LabelRole string = "k3d.role"
|
||||
LabelMasterAPIPort string = "k3d.master.api.port"
|
||||
LabelMasterAPIHost string = "k3d.master.api.host"
|
||||
LabelMasterAPIHostIP string = "k3d.master.api.hostIP"
|
||||
LabelServerAPIPort string = "k3d.server.api.port"
|
||||
LabelServerAPIHost string = "k3d.server.api.host"
|
||||
LabelServerAPIHostIP string = "k3d.server.api.hostIP"
|
||||
)
|
||||
|
||||
// DefaultRoleCmds maps the node roles to their respective default commands
|
||||
var DefaultRoleCmds = map[Role][]string{
|
||||
MasterRole: {"server"},
|
||||
WorkerRole: {"agent"},
|
||||
ServerRole: {"server"},
|
||||
AgentRole: {"agent"},
|
||||
}
|
||||
|
||||
// DefaultTmpfsMounts specifies tmpfs mounts that are required for all k3d nodes
|
||||
@ -123,15 +123,15 @@ const DefaultAPIPort = "6443"
|
||||
// DefaultAPIHost defines the default host (IP) for the Kubernetes API
|
||||
const DefaultAPIHost = "0.0.0.0"
|
||||
|
||||
// DoNotCopyMasterFlags defines a list of commands/args that shouldn't be copied from an existing node when adding a similar node to a cluster
|
||||
var DoNotCopyMasterFlags = []string{
|
||||
// DoNotCopyServerFlags defines a list of commands/args that shouldn't be copied from an existing node when adding a similar node to a cluster
|
||||
var DoNotCopyServerFlags = []string{
|
||||
"--cluster-init",
|
||||
}
|
||||
|
||||
// ClusterCreateOpts describe a set of options one can set when creating a cluster
|
||||
type ClusterCreateOpts struct {
|
||||
DisableImageVolume bool
|
||||
WaitForMaster bool
|
||||
WaitForServer bool
|
||||
Timeout time.Duration
|
||||
DisableLoadBalancer bool
|
||||
K3sServerArgs []string
|
||||
@ -140,7 +140,7 @@ type ClusterCreateOpts struct {
|
||||
|
||||
// ClusterStartOpts describe a set of options one can set when (re-)starting a cluster
|
||||
type ClusterStartOpts struct {
|
||||
WaitForMaster bool
|
||||
WaitForServer bool
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
@ -173,34 +173,34 @@ type Cluster struct {
|
||||
Network ClusterNetwork `yaml:"network" json:"network,omitempty"`
|
||||
Token string `yaml:"cluster_token" json:"clusterToken,omitempty"`
|
||||
Nodes []*Node `yaml:"nodes" json:"nodes,omitempty"`
|
||||
InitNode *Node // init master node
|
||||
InitNode *Node // init server node
|
||||
ExternalDatastore ExternalDatastore `yaml:"external_datastore" json:"externalDatastore,omitempty"`
|
||||
CreateClusterOpts *ClusterCreateOpts `yaml:"options" json:"options,omitempty"`
|
||||
ExposeAPI ExposeAPI `yaml:"expose_api" json:"exposeAPI,omitempty"`
|
||||
MasterLoadBalancer *Node `yaml:"master_loadbalancer" json:"masterLoadBalancer,omitempty"`
|
||||
ServerLoadBalancer *Node `yaml:"server_loadbalancer" json:"serverLoadBalancer,omitempty"`
|
||||
ImageVolume string `yaml:"image_volume" json:"imageVolume,omitempty"`
|
||||
}
|
||||
|
||||
// MasterCount return number of master node into cluster
|
||||
func (c *Cluster) MasterCount() int {
|
||||
masterCount := 0
|
||||
// ServerCount return number of server node into cluster
|
||||
func (c *Cluster) ServerCount() int {
|
||||
serverCount := 0
|
||||
for _, node := range c.Nodes {
|
||||
if node.Role == MasterRole {
|
||||
masterCount++
|
||||
if node.Role == ServerRole {
|
||||
serverCount++
|
||||
}
|
||||
}
|
||||
return masterCount
|
||||
return serverCount
|
||||
}
|
||||
|
||||
// WorkerCount return number of worker node into cluster
|
||||
func (c *Cluster) WorkerCount() int {
|
||||
workerCount := 0
|
||||
// AgentCount return number of agent node into cluster
|
||||
func (c *Cluster) AgentCount() int {
|
||||
agentCount := 0
|
||||
for _, node := range c.Nodes {
|
||||
if node.Role == WorkerRole {
|
||||
workerCount++
|
||||
if node.Role == AgentRole {
|
||||
agentCount++
|
||||
}
|
||||
}
|
||||
return workerCount
|
||||
return agentCount
|
||||
}
|
||||
|
||||
// Node describes a k3d node
|
||||
@ -216,17 +216,17 @@ type Node struct {
|
||||
Restart bool `yaml:"restart" json:"restart,omitempty"`
|
||||
Labels map[string]string // filled automatically
|
||||
Network string // filled automatically
|
||||
MasterOpts MasterOpts `yaml:"master_opts" json:"masterOpts,omitempty"`
|
||||
WorkerOpts WorkerOpts `yaml:"worker_opts" json:"workerOpts,omitempty"`
|
||||
ServerOpts ServerOpts `yaml:"server_opts" json:"serverOpts,omitempty"`
|
||||
AgentOpts AgentOpts `yaml:"agent_opts" json:"agentOpts,omitempty"`
|
||||
}
|
||||
|
||||
// MasterOpts describes some additional master role specific opts
|
||||
type MasterOpts struct {
|
||||
IsInit bool `yaml:"is_initializing_master" json:"isInitializingMaster,omitempty"`
|
||||
// ServerOpts describes some additional server role specific opts
|
||||
type ServerOpts struct {
|
||||
IsInit bool `yaml:"is_initializing_server" json:"isInitializingServer,omitempty"`
|
||||
ExposeAPI ExposeAPI // filled automatically
|
||||
}
|
||||
|
||||
// ExternalDatastore describes an external datastore used for HA/multi-master clusters
|
||||
// ExternalDatastore describes an external datastore used for HA/multi-server clusters
|
||||
type ExternalDatastore struct {
|
||||
Endpoint string `yaml:"endpoint" json:"endpoint,omitempty"`
|
||||
CAFile string `yaml:"ca_file" json:"caFile,omitempty"`
|
||||
@ -242,8 +242,8 @@ type ExposeAPI struct {
|
||||
Port string `yaml:"port" json:"port"`
|
||||
}
|
||||
|
||||
// WorkerOpts describes some additional worker role specific opts
|
||||
type WorkerOpts struct{}
|
||||
// AgentOpts describes some additional agent role specific opts
|
||||
type AgentOpts struct{}
|
||||
|
||||
// GetDefaultObjectName prefixes the passed name with the default prefix
|
||||
func GetDefaultObjectName(name string) string {
|
||||
|
@ -12,7 +12,7 @@ events {
|
||||
|
||||
stream {
|
||||
{{- range $port := $ports }}
|
||||
upstream master_nodes_{{ $port }} {
|
||||
upstream server_nodes_{{ $port }} {
|
||||
{{- range $server := $servers }}
|
||||
server {{ $server }}:{{ $port }} max_fails=1 fail_timeout=10s;
|
||||
{{- end }}
|
||||
@ -20,7 +20,7 @@ stream {
|
||||
|
||||
server {
|
||||
listen {{ $port }};
|
||||
proxy_pass master_nodes_{{ $port }};
|
||||
proxy_pass server_nodes_{{ $port }};
|
||||
proxy_timeout 600;
|
||||
proxy_connect_timeout 2s;
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ source "$CURR_DIR/common.sh"
|
||||
clustername="lifecycletest"
|
||||
|
||||
info "Creating cluster $clustername..."
|
||||
$EXE cluster create "$clustername" --workers 1 --api-port 6443 --wait --timeout 360s || failed "could not create cluster $clustername"
|
||||
$EXE cluster create "$clustername" --agents 1 --api-port 6443 --wait --timeout 360s || failed "could not create cluster $clustername"
|
||||
|
||||
info "Sleeping for 5 seconds to give the cluster enough time to get ready..."
|
||||
sleep 5
|
||||
@ -38,9 +38,9 @@ check_clusters "$clustername" || failed "error checking cluster"
|
||||
info "Checking that we have 2 nodes online..."
|
||||
check_multi_node "$clustername" 2 || failed "failed to verify number of nodes"
|
||||
|
||||
# 4. adding another worker node
|
||||
info "Adding one worker node..."
|
||||
$EXE node create "extra-worker" --cluster "$clustername" --role "worker" --wait --timeout 360s || failed "failed to add worker node"
|
||||
# 4. adding another agent node
|
||||
info "Adding one agent node..."
|
||||
$EXE node create "extra-agent" --cluster "$clustername" --role "agent" --wait --timeout 360s || failed "failed to add agent node"
|
||||
|
||||
info "Checking that we have 3 nodes available now..."
|
||||
check_multi_node "$clustername" 3 || failed "failed to verify number of nodes"
|
||||
|
@ -6,20 +6,20 @@ CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
# shellcheck source=./common.sh
|
||||
source "$CURR_DIR/common.sh"
|
||||
|
||||
info "Creating cluster multimaster..."
|
||||
$EXE cluster create "multimaster" --masters 3 --api-port 6443 --wait --timeout 360s || failed "could not create cluster multimaster"
|
||||
info "Creating cluster multiserver..."
|
||||
$EXE cluster create "multiserver" --servers 3 --api-port 6443 --wait --timeout 360s || failed "could not create cluster multiserver"
|
||||
|
||||
info "Checking that we have access to the cluster..."
|
||||
check_clusters "multimaster" || failed "error checking cluster"
|
||||
check_clusters "multiserver" || failed "error checking cluster"
|
||||
|
||||
info "Sleeping for 5 seconds to give the cluster enough time to get ready..."
|
||||
sleep 5
|
||||
|
||||
info "Checking that we have 3 master nodes online..."
|
||||
check_multi_node "multimaster" 3 || failed "failed to verify number of nodes"
|
||||
info "Checking that we have 3 server nodes online..."
|
||||
check_multi_node "multiserver" 3 || failed "failed to verify number of nodes"
|
||||
|
||||
info "Deleting cluster multimaster..."
|
||||
$EXE cluster delete "multimaster" || failed "could not delete the cluster multimaster"
|
||||
info "Deleting cluster multiserver..."
|
||||
$EXE cluster delete "multiserver" || failed "could not delete the cluster multiserver"
|
||||
|
||||
exit 0
|
||||
|
||||
|
24
thoughts.md
24
thoughts.md
@ -36,10 +36,10 @@ Here's how k3d types should translate to a runtime type:
|
||||
|
||||
## Node Configuration
|
||||
|
||||
- master node(s)
|
||||
- server node(s)
|
||||
- ENV
|
||||
- `K3S_CLUSTER_INIT`
|
||||
- if num_masters > 1 && no external datastore configured
|
||||
- if num_servers > 1 && no external datastore configured
|
||||
- `K3S_KUBECONFIG_OUTPUT`
|
||||
- k3d default -> `/output/kubeconfig.yaml`
|
||||
- CMD/ARGS
|
||||
@ -65,9 +65,9 @@ Here's how k3d types should translate to a runtime type:
|
||||
- `privileged`
|
||||
- Network
|
||||
- cluster network or external/inherited
|
||||
- worker nodes
|
||||
- agent nodes
|
||||
- ENV
|
||||
- `K3S_URL` to connect to master node
|
||||
- `K3S_URL` to connect to server node
|
||||
- server hostname + port (6443)
|
||||
- cluster-specific or inherited
|
||||
- CMD/ARGS
|
||||
@ -81,23 +81,23 @@ Here's how k3d types should translate to a runtime type:
|
||||
|
||||
- `--port [host:]port[:containerPort][/protocol][@group_identifier[[index] | @node_identifier]`
|
||||
- Examples:
|
||||
- `--port 0.0.0.0:8080:8081/tcp@workers` -> whole group
|
||||
- `--port 80@workers[0]` -> single instance of group by list index
|
||||
- `--port 80@workers[0,2-3]` -> multiple instances of a group by index lists and ranges
|
||||
- `--port 80@k3d-test-worker-0` -> single instance by specific node identifier
|
||||
- `--port 80@k3d-test-master-0@workers[1-5]` -> multiple instances by combination of node and group identifiers
|
||||
- `--port 0.0.0.0:8080:8081/tcp@agents` -> whole group
|
||||
- `--port 80@agents[0]` -> single instance of group by list index
|
||||
- `--port 80@agents[0,2-3]` -> multiple instances of a group by index lists and ranges
|
||||
- `--port 80@k3d-test-agent-0` -> single instance by specific node identifier
|
||||
- `--port 80@k3d-test-server-0@agents[1-5]` -> multiple instances by combination of node and group identifiers
|
||||
|
||||
- analogous for volumes
|
||||
|
||||
## [WIP] Multi-Master Setup
|
||||
## [WIP] Multi-Server Setup
|
||||
|
||||
- to make this possible, we always deploy a load-balancer (nginx) in front of the master nodes as an extra container
|
||||
- to make this possible, we always deploy a load-balancer (nginx) in front of the server nodes as an extra container
|
||||
- consider that in the kubeconfig file and `--tls-san`
|
||||
|
||||
### Variants
|
||||
|
||||
- [x] embedded datastore (dqlite)
|
||||
- if `--masters` > 1 deploy a load-balancer in front of them as an extra container
|
||||
- if `--servers` > 1 deploy a load-balancer in front of them as an extra container
|
||||
- [ ] external datastore
|
||||
|
||||
## [DONE] Keep State in Docker Labels
|
||||
|
Loading…
Reference in New Issue
Block a user