use server/agent instead of master/worker
This commit is contained in:
parent
26cd8bbb3f
commit
ec3f10ec57
@ -43,9 +43,9 @@ import (
|
|||||||
const clusterCreateDescription = `
|
const clusterCreateDescription = `
|
||||||
Create a new k3s cluster with containerized nodes (k3s in docker).
|
Create a new k3s cluster with containerized nodes (k3s in docker).
|
||||||
Every cluster will consist of one or more containers:
|
Every cluster will consist of one or more containers:
|
||||||
- 1 (or more) master node container (k3s)
|
- 1 (or more) server node container (k3s)
|
||||||
- (optionally) 1 loadbalancer container as the entrypoint to the cluster (nginx)
|
- (optionally) 1 loadbalancer container as the entrypoint to the cluster (nginx)
|
||||||
- (optionally) 1 (or more) worker node containers (k3s)
|
- (optionally) 1 (or more) agent node containers (k3s)
|
||||||
`
|
`
|
||||||
|
|
||||||
// NewCmdClusterCreate returns a new cobra command
|
// NewCmdClusterCreate returns a new cobra command
|
||||||
@ -71,8 +71,8 @@ func NewCmdClusterCreate() *cobra.Command {
|
|||||||
|
|
||||||
// create cluster
|
// create cluster
|
||||||
if updateDefaultKubeconfig || updateCurrentContext {
|
if updateDefaultKubeconfig || updateCurrentContext {
|
||||||
log.Debugln("'--update-default-kubeconfig set: enabling wait-for-master")
|
log.Debugln("'--update-default-kubeconfig set: enabling wait-for-server")
|
||||||
cluster.CreateClusterOpts.WaitForMaster = true
|
cluster.CreateClusterOpts.WaitForServer = true
|
||||||
}
|
}
|
||||||
if err := k3dCluster.ClusterCreate(cmd.Context(), runtimes.SelectedRuntime, cluster); err != nil {
|
if err := k3dCluster.ClusterCreate(cmd.Context(), runtimes.SelectedRuntime, cluster); err != nil {
|
||||||
// rollback if creation failed
|
// rollback if creation failed
|
||||||
@ -111,28 +111,28 @@ func NewCmdClusterCreate() *cobra.Command {
|
|||||||
/*********
|
/*********
|
||||||
* Flags *
|
* Flags *
|
||||||
*********/
|
*********/
|
||||||
cmd.Flags().StringP("api-port", "a", "random", "Specify the Kubernetes API server port exposed on the LoadBalancer (Format: `--api-port [HOST:]HOSTPORT`)\n - Example: `k3d create -m 3 -a 0.0.0.0:6550`")
|
cmd.Flags().String("api-port", "random", "Specify the Kubernetes API server port exposed on the LoadBalancer (Format: `--api-port [HOST:]HOSTPORT`)\n - Example: `k3d create -m 3 -a 0.0.0.0:6550`")
|
||||||
cmd.Flags().IntP("masters", "m", 1, "Specify how many masters you want to create")
|
cmd.Flags().IntP("servers", "s", 1, "Specify how many servers you want to create")
|
||||||
cmd.Flags().IntP("workers", "w", 0, "Specify how many workers you want to create")
|
cmd.Flags().IntP("agents", "a", 0, "Specify how many agents you want to create")
|
||||||
cmd.Flags().StringP("image", "i", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), "Specify k3s image that you want to use for the nodes")
|
cmd.Flags().StringP("image", "i", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), "Specify k3s image that you want to use for the nodes")
|
||||||
cmd.Flags().String("network", "", "Join an existing network")
|
cmd.Flags().String("network", "", "Join an existing network")
|
||||||
cmd.Flags().String("token", "", "Specify a cluster token. By default, we generate one.")
|
cmd.Flags().String("token", "", "Specify a cluster token. By default, we generate one.")
|
||||||
cmd.Flags().StringArrayP("volume", "v", nil, "Mount volumes into the nodes (Format: `--volume [SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d create -w 2 -v /my/path@worker[0,1] -v /tmp/test:/tmp/other@master[0]`")
|
cmd.Flags().StringArrayP("volume", "v", nil, "Mount volumes into the nodes (Format: `--volume [SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d create -w 2 -v /my/path@agent[0,1] -v /tmp/test:/tmp/other@server[0]`")
|
||||||
cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d create -w 2 -p 8080:80@worker[0] -p 8081@worker[1]`")
|
cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d create -w 2 -p 8080:80@agent[0] -p 8081@agent[1]`")
|
||||||
cmd.Flags().BoolVar(&createClusterOpts.WaitForMaster, "wait", true, "Wait for the master(s) to be ready before returning. Use '--timeout DURATION' to not wait forever.")
|
cmd.Flags().BoolVar(&createClusterOpts.WaitForServer, "wait", true, "Wait for the server(s) to be ready before returning. Use '--timeout DURATION' to not wait forever.")
|
||||||
cmd.Flags().DurationVar(&createClusterOpts.Timeout, "timeout", 0*time.Second, "Rollback changes if cluster couldn't be created in specified duration.")
|
cmd.Flags().DurationVar(&createClusterOpts.Timeout, "timeout", 0*time.Second, "Rollback changes if cluster couldn't be created in specified duration.")
|
||||||
cmd.Flags().BoolVar(&updateDefaultKubeconfig, "update-default-kubeconfig", true, "Directly update the default kubeconfig with the new cluster's context")
|
cmd.Flags().BoolVar(&updateDefaultKubeconfig, "update-default-kubeconfig", true, "Directly update the default kubeconfig with the new cluster's context")
|
||||||
cmd.Flags().BoolVar(&updateCurrentContext, "switch-context", true, "Directly switch the default kubeconfig's current-context to the new cluster's context (implies --update-default-kubeconfig)")
|
cmd.Flags().BoolVar(&updateCurrentContext, "switch-context", true, "Directly switch the default kubeconfig's current-context to the new cluster's context (implies --update-default-kubeconfig)")
|
||||||
cmd.Flags().BoolVar(&createClusterOpts.DisableLoadBalancer, "no-lb", false, "Disable the creation of a LoadBalancer in front of the master nodes")
|
cmd.Flags().BoolVar(&createClusterOpts.DisableLoadBalancer, "no-lb", false, "Disable the creation of a LoadBalancer in front of the server nodes")
|
||||||
|
|
||||||
/* Image Importing */
|
/* Image Importing */
|
||||||
cmd.Flags().BoolVar(&createClusterOpts.DisableImageVolume, "no-image-volume", false, "Disable the creation of a volume for importing images")
|
cmd.Flags().BoolVar(&createClusterOpts.DisableImageVolume, "no-image-volume", false, "Disable the creation of a volume for importing images")
|
||||||
|
|
||||||
/* Multi Master Configuration */
|
/* Multi Server Configuration */
|
||||||
|
|
||||||
// multi-master - datastore
|
// multi-server - datastore
|
||||||
// TODO: implement multi-master setups with external data store
|
// TODO: implement multi-server setups with external data store
|
||||||
// cmd.Flags().String("datastore-endpoint", "", "[WIP] Specify external datastore endpoint (e.g. for multi master clusters)")
|
// cmd.Flags().String("datastore-endpoint", "", "[WIP] Specify external datastore endpoint (e.g. for multi server clusters)")
|
||||||
/*
|
/*
|
||||||
cmd.Flags().String("datastore-network", "", "Specify container network where we can find the datastore-endpoint (add a connection)")
|
cmd.Flags().String("datastore-network", "", "Specify container network where we can find the datastore-endpoint (add a connection)")
|
||||||
|
|
||||||
@ -143,8 +143,8 @@ func NewCmdClusterCreate() *cobra.Command {
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/* k3s */
|
/* k3s */
|
||||||
cmd.Flags().StringArrayVar(&createClusterOpts.K3sServerArgs, "k3s-server-arg", nil, "Additional args passed to the `k3s server` command on master nodes (new flag per arg)")
|
cmd.Flags().StringArrayVar(&createClusterOpts.K3sServerArgs, "k3s-server-arg", nil, "Additional args passed to the `k3s server` command on server nodes (new flag per arg)")
|
||||||
cmd.Flags().StringArrayVar(&createClusterOpts.K3sAgentArgs, "k3s-agent-arg", nil, "Additional args passed to the `k3s agent` command on worker nodes (new flag per arg)")
|
cmd.Flags().StringArrayVar(&createClusterOpts.K3sAgentArgs, "k3s-agent-arg", nil, "Additional args passed to the `k3s agent` command on agent nodes (new flag per arg)")
|
||||||
|
|
||||||
/* Subcommands */
|
/* Subcommands */
|
||||||
|
|
||||||
@ -181,14 +181,14 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
|
|||||||
image = version.GetK3sVersion(true)
|
image = version.GetK3sVersion(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// --masters
|
// --servers
|
||||||
masterCount, err := cmd.Flags().GetInt("masters")
|
serverCount, err := cmd.Flags().GetInt("servers")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// --workers
|
// --agents
|
||||||
workerCount, err := cmd.Flags().GetInt("workers")
|
agentCount, err := cmd.Flags().GetInt("agents")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
@ -203,7 +203,7 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
|
|||||||
network.Name = networkName
|
network.Name = networkName
|
||||||
network.External = true
|
network.External = true
|
||||||
}
|
}
|
||||||
if networkName == "host" && (masterCount+workerCount) > 1 {
|
if networkName == "host" && (serverCount+agentCount) > 1 {
|
||||||
log.Fatalln("Can only run a single node in hostnetwork mode")
|
log.Fatalln("Can only run a single node in hostnetwork mode")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -323,31 +323,31 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
|
|||||||
// generate list of nodes
|
// generate list of nodes
|
||||||
cluster.Nodes = []*k3d.Node{}
|
cluster.Nodes = []*k3d.Node{}
|
||||||
|
|
||||||
// MasterLoadBalancer
|
// ServerLoadBalancer
|
||||||
if !createClusterOpts.DisableLoadBalancer {
|
if !createClusterOpts.DisableLoadBalancer {
|
||||||
cluster.MasterLoadBalancer = &k3d.Node{
|
cluster.ServerLoadBalancer = &k3d.Node{
|
||||||
Role: k3d.LoadBalancerRole,
|
Role: k3d.LoadBalancerRole,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/****************
|
/****************
|
||||||
* Master Nodes *
|
* Server Nodes *
|
||||||
****************/
|
****************/
|
||||||
|
|
||||||
for i := 0; i < masterCount; i++ {
|
for i := 0; i < serverCount; i++ {
|
||||||
node := k3d.Node{
|
node := k3d.Node{
|
||||||
Role: k3d.MasterRole,
|
Role: k3d.ServerRole,
|
||||||
Image: image,
|
Image: image,
|
||||||
Args: createClusterOpts.K3sServerArgs,
|
Args: createClusterOpts.K3sServerArgs,
|
||||||
MasterOpts: k3d.MasterOpts{},
|
ServerOpts: k3d.ServerOpts{},
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: by default, we don't expose an API port: should we change that?
|
// TODO: by default, we don't expose an API port: should we change that?
|
||||||
// -> if we want to change that, simply add the exposeAPI struct here
|
// -> if we want to change that, simply add the exposeAPI struct here
|
||||||
|
|
||||||
// first master node will be init node if we have more than one master specified but no external datastore
|
// first server node will be init node if we have more than one server specified but no external datastore
|
||||||
if i == 0 && masterCount > 1 {
|
if i == 0 && serverCount > 1 {
|
||||||
node.MasterOpts.IsInit = true
|
node.ServerOpts.IsInit = true
|
||||||
cluster.InitNode = &node
|
cluster.InitNode = &node
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -356,12 +356,12 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
|
|||||||
}
|
}
|
||||||
|
|
||||||
/****************
|
/****************
|
||||||
* Worker Nodes *
|
* Agent Nodes *
|
||||||
****************/
|
****************/
|
||||||
|
|
||||||
for i := 0; i < workerCount; i++ {
|
for i := 0; i < agentCount; i++ {
|
||||||
node := k3d.Node{
|
node := k3d.Node{
|
||||||
Role: k3d.WorkerRole,
|
Role: k3d.AgentRole,
|
||||||
Image: image,
|
Image: image,
|
||||||
Args: createClusterOpts.K3sAgentArgs,
|
Args: createClusterOpts.K3sAgentArgs,
|
||||||
}
|
}
|
||||||
@ -381,11 +381,11 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
|
|||||||
}
|
}
|
||||||
|
|
||||||
// append ports
|
// append ports
|
||||||
nodeCount := masterCount + workerCount
|
nodeCount := serverCount + agentCount
|
||||||
nodeList := cluster.Nodes
|
nodeList := cluster.Nodes
|
||||||
if !createClusterOpts.DisableLoadBalancer {
|
if !createClusterOpts.DisableLoadBalancer {
|
||||||
nodeCount++
|
nodeCount++
|
||||||
nodeList = append(nodeList, cluster.MasterLoadBalancer)
|
nodeList = append(nodeList, cluster.ServerLoadBalancer)
|
||||||
}
|
}
|
||||||
for portmap, filters := range portFilterMap {
|
for portmap, filters := range portFilterMap {
|
||||||
if len(filters) == 0 && (nodeCount) > 1 {
|
if len(filters) == 0 && (nodeCount) > 1 {
|
||||||
|
@ -103,7 +103,7 @@ func PrintClusters(clusters []*k3d.Cluster, flags clusterFlags) {
|
|||||||
defer tabwriter.Flush()
|
defer tabwriter.Flush()
|
||||||
|
|
||||||
if !flags.noHeader {
|
if !flags.noHeader {
|
||||||
headers := []string{"NAME", "MASTERS", "WORKERS"} // TODO: getCluster: add status column
|
headers := []string{"NAME", "SERVERS", "AGENTS"} // TODO: getCluster: add status column
|
||||||
if flags.token {
|
if flags.token {
|
||||||
headers = append(headers, "TOKEN")
|
headers = append(headers, "TOKEN")
|
||||||
}
|
}
|
||||||
@ -116,13 +116,13 @@ func PrintClusters(clusters []*k3d.Cluster, flags clusterFlags) {
|
|||||||
k3cluster.SortClusters(clusters)
|
k3cluster.SortClusters(clusters)
|
||||||
|
|
||||||
for _, cluster := range clusters {
|
for _, cluster := range clusters {
|
||||||
masterCount := cluster.MasterCount()
|
serverCount := cluster.ServerCount()
|
||||||
workerCount := cluster.WorkerCount()
|
agentCount := cluster.AgentCount()
|
||||||
|
|
||||||
if flags.token {
|
if flags.token {
|
||||||
fmt.Fprintf(tabwriter, "%s\t%d\t%d\t%s\n", cluster.Name, masterCount, workerCount, cluster.Token)
|
fmt.Fprintf(tabwriter, "%s\t%d\t%d\t%s\n", cluster.Name, serverCount, agentCount, cluster.Token)
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(tabwriter, "%s\t%d\t%d\n", cluster.Name, masterCount, workerCount)
|
fmt.Fprintf(tabwriter, "%s\t%d\t%d\n", cluster.Name, serverCount, agentCount)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -62,7 +62,7 @@ func NewCmdClusterStart() *cobra.Command {
|
|||||||
|
|
||||||
// add flags
|
// add flags
|
||||||
cmd.Flags().BoolP("all", "a", false, "Start all existing clusters")
|
cmd.Flags().BoolP("all", "a", false, "Start all existing clusters")
|
||||||
cmd.Flags().BoolVar(&startClusterOpts.WaitForMaster, "wait", false, "Wait for the master(s) (and loadbalancer) to be ready before returning.")
|
cmd.Flags().BoolVar(&startClusterOpts.WaitForServer, "wait", false, "Wait for the server(s) (and loadbalancer) to be ready before returning.")
|
||||||
cmd.Flags().DurationVar(&startClusterOpts.Timeout, "timeout", 0*time.Second, "Maximum waiting time for '--wait' before canceling/returning.")
|
cmd.Flags().DurationVar(&startClusterOpts.Timeout, "timeout", 0*time.Second, "Maximum waiting time for '--wait' before canceling/returning.")
|
||||||
|
|
||||||
// add subcommands
|
// add subcommands
|
||||||
|
@ -57,7 +57,7 @@ func NewCmdNodeCreate() *cobra.Command {
|
|||||||
|
|
||||||
// add flags
|
// add flags
|
||||||
cmd.Flags().Int("replicas", 1, "Number of replicas of this node specification.")
|
cmd.Flags().Int("replicas", 1, "Number of replicas of this node specification.")
|
||||||
cmd.Flags().String("role", string(k3d.WorkerRole), "Specify node role [master, worker]")
|
cmd.Flags().String("role", string(k3d.AgentRole), "Specify node role [server, agent]")
|
||||||
if err := cmd.RegisterFlagCompletionFunc("role", util.ValidArgsNodeRoles); err != nil {
|
if err := cmd.RegisterFlagCompletionFunc("role", util.ValidArgsNodeRoles); err != nil {
|
||||||
log.Fatalln("Failed to register flag completion for '--role'", err)
|
log.Fatalln("Failed to register flag completion for '--role'", err)
|
||||||
}
|
}
|
||||||
|
@ -53,7 +53,7 @@ func NewCmdNodeStart() *cobra.Command {
|
|||||||
|
|
||||||
// parseStartNodeCmd parses the command input into variables required to start a node
|
// parseStartNodeCmd parses the command input into variables required to start a node
|
||||||
func parseStartNodeCmd(cmd *cobra.Command, args []string) *k3d.Node {
|
func parseStartNodeCmd(cmd *cobra.Command, args []string) *k3d.Node {
|
||||||
// node name // TODO: startNode: allow node filters, e.g. `k3d start nodes mycluster@worker` to start all worker nodes of cluster 'mycluster'
|
// node name // TODO: startNode: allow node filters, e.g. `k3d node start mycluster@agent` to start all agent nodes of cluster 'mycluster'
|
||||||
if len(args) == 0 || len(args[0]) == 0 {
|
if len(args) == 0 || len(args[0]) == 0 {
|
||||||
log.Fatalln("No node name given")
|
log.Fatalln("No node name given")
|
||||||
}
|
}
|
||||||
|
@ -54,7 +54,7 @@ func NewCmdNodeStop() *cobra.Command {
|
|||||||
|
|
||||||
// parseStopNodeCmd parses the command input into variables required to stop a node
|
// parseStopNodeCmd parses the command input into variables required to stop a node
|
||||||
func parseStopNodeCmd(cmd *cobra.Command, args []string) *k3d.Node {
|
func parseStopNodeCmd(cmd *cobra.Command, args []string) *k3d.Node {
|
||||||
// node name // TODO: allow node filters, e.g. `k3d stop nodes mycluster@worker` to stop all worker nodes of cluster 'mycluster'
|
// node name // TODO: allow node filters, e.g. `k3d node stop mycluster@agent` to stop all agent nodes of cluster 'mycluster'
|
||||||
if len(args) == 0 || len(args[0]) == 0 {
|
if len(args) == 0 || len(args[0]) == 0 {
|
||||||
log.Fatalln("No node name given")
|
log.Fatalln("No node name given")
|
||||||
}
|
}
|
||||||
|
@ -86,7 +86,7 @@ nodeLoop:
|
|||||||
func ValidArgsNodeRoles(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
func ValidArgsNodeRoles(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||||
|
|
||||||
var completions []string
|
var completions []string
|
||||||
roles := []string{string(k3d.MasterRole), string(k3d.WorkerRole)}
|
roles := []string{string(k3d.ServerRole), string(k3d.AgentRole)}
|
||||||
|
|
||||||
for _, role := range roles {
|
for _, role := range roles {
|
||||||
if strings.HasPrefix(role, toComplete) {
|
if strings.HasPrefix(role, toComplete) {
|
||||||
|
@ -34,7 +34,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Regexp pattern to match node filters
|
// Regexp pattern to match node filters
|
||||||
var filterRegexp = regexp.MustCompile(`^(?P<group>master|worker|loadbalancer|all)(?P<subsetSpec>\[(?P<subset>(?P<subsetList>(\d+,?)+)|(?P<subsetRange>\d*:\d*)|(?P<subsetWildcard>\*))\])?$`)
|
var filterRegexp = regexp.MustCompile(`^(?P<group>server|agent|loadbalancer|all)(?P<subsetSpec>\[(?P<subset>(?P<subsetList>(\d+,?)+)|(?P<subsetRange>\d*:\d*)|(?P<subsetWildcard>\*))\])?$`)
|
||||||
|
|
||||||
// SplitFiltersFromFlag separates a flag's value from the node filter, if there is one
|
// SplitFiltersFromFlag separates a flag's value from the node filter, if there is one
|
||||||
func SplitFiltersFromFlag(flag string) (string, []string, error) {
|
func SplitFiltersFromFlag(flag string) (string, []string, error) {
|
||||||
@ -72,16 +72,16 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// map roles to subsets
|
// map roles to subsets
|
||||||
masterNodes := []*k3d.Node{}
|
serverNodes := []*k3d.Node{}
|
||||||
workerNodes := []*k3d.Node{}
|
agentNodes := []*k3d.Node{}
|
||||||
var masterlb *k3d.Node
|
var serverlb *k3d.Node
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
if node.Role == k3d.MasterRole {
|
if node.Role == k3d.ServerRole {
|
||||||
masterNodes = append(masterNodes, node)
|
serverNodes = append(serverNodes, node)
|
||||||
} else if node.Role == k3d.WorkerRole {
|
} else if node.Role == k3d.AgentRole {
|
||||||
workerNodes = append(workerNodes, node)
|
agentNodes = append(agentNodes, node)
|
||||||
} else if node.Role == k3d.LoadBalancerRole {
|
} else if node.Role == k3d.LoadBalancerRole {
|
||||||
masterlb = node
|
serverlb = node
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -110,12 +110,12 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
|
|||||||
|
|
||||||
// Choose the group of nodes to operate on
|
// Choose the group of nodes to operate on
|
||||||
groupNodes := []*k3d.Node{}
|
groupNodes := []*k3d.Node{}
|
||||||
if submatches["group"] == string(k3d.MasterRole) {
|
if submatches["group"] == string(k3d.ServerRole) {
|
||||||
groupNodes = masterNodes
|
groupNodes = serverNodes
|
||||||
} else if submatches["group"] == string(k3d.WorkerRole) {
|
} else if submatches["group"] == string(k3d.AgentRole) {
|
||||||
groupNodes = workerNodes
|
groupNodes = agentNodes
|
||||||
} else if submatches["group"] == string(k3d.LoadBalancerRole) {
|
} else if submatches["group"] == string(k3d.LoadBalancerRole) {
|
||||||
filteredNodes = append(filteredNodes, masterlb)
|
filteredNodes = append(filteredNodes, serverlb)
|
||||||
return filteredNodes, nil // early exit if filtered group is the loadbalancer
|
return filteredNodes, nil // early exit if filtered group is the loadbalancer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
|
|
||||||
## Issues with ZFS
|
## Issues with ZFS
|
||||||
|
|
||||||
- k3s currently has [no support for ZFS](ttps://github.com/rancher/k3s/issues/66) and thus, creating multi-master setups (e.g. `k3d cluster create multimaster --masters 3`) fails, because the initializing master node (server flag `--cluster-init`) errors out with the following log:
|
- k3s currently has [no support for ZFS](ttps://github.com/rancher/k3s/issues/66) and thus, creating multi-server setups (e.g. `k3d cluster create multiserver --servers 3`) fails, because the initializing server node (server flag `--cluster-init`) errors out with the following log:
|
||||||
```bash
|
```bash
|
||||||
starting kubernetes: preparing server: start cluster and https: raft_init(): io: create I/O capabilities probe file: posix_allocate: operation not supported on socket
|
starting kubernetes: preparing server: start cluster and https: raft_init(): io: create I/O capabilities probe file: posix_allocate: operation not supported on socket
|
||||||
```
|
```
|
||||||
@ -25,10 +25,10 @@
|
|||||||
- clean up or expand docker root filesystem
|
- clean up or expand docker root filesystem
|
||||||
- change the kubelet's eviction thresholds upon cluster creation: `k3d cluster create --k3s-agent-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%' --k3s-agent-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%'`
|
- change the kubelet's eviction thresholds upon cluster creation: `k3d cluster create --k3s-agent-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%' --k3s-agent-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%'`
|
||||||
|
|
||||||
## Restarting a multi-master cluster or the initializing master node fails
|
## Restarting a multi-server cluster or the initializing server node fails
|
||||||
|
|
||||||
- What you do: You create a cluster with more than one master node and later, you either stop `master-0` or stop/start the whole cluster
|
- What you do: You create a cluster with more than one server node and later, you either stop `server-0` or stop/start the whole cluster
|
||||||
- What fails: After the restart, you cannot connect to the cluster anymore and `kubectl` will give you a lot of errors
|
- What fails: After the restart, you cannot connect to the cluster anymore and `kubectl` will give you a lot of errors
|
||||||
- What causes this issue: it's a [known issue with dqlite in `k3s`](https://github.com/rancher/k3s/issues/1391) which doesn't allow the initializing master node to go down
|
- What causes this issue: it's a [known issue with dqlite in `k3s`](https://github.com/rancher/k3s/issues/1391) which doesn't allow the initializing server node to go down
|
||||||
- What's the solution: Hopefully, this will be solved by the planned [replacement of dqlite with embedded etcd in k3s](https://github.com/rancher/k3s/pull/1770)
|
- What's the solution: Hopefully, this will be solved by the planned [replacement of dqlite with embedded etcd in k3s](https://github.com/rancher/k3s/pull/1770)
|
||||||
- Related issues: [#262](https://github.com/rancher/k3d/issues/262)
|
- Related issues: [#262](https://github.com/rancher/k3d/issues/262)
|
||||||
|
@ -47,7 +47,7 @@ You have several options there:
|
|||||||
|
|
||||||
## Quick Start
|
## Quick Start
|
||||||
|
|
||||||
Create a cluster named `mycluster` with just a single master node:
|
Create a cluster named `mycluster` with just a single server node:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
k3d cluster create mycluster
|
k3d cluster create mycluster
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
# Defaults
|
# Defaults
|
||||||
|
|
||||||
- multiple master nodes
|
- multiple server nodes
|
||||||
- by default, when `--master` > 1 and no `--datastore-x` option is set, the first master node (master-0) will be the initializing master node
|
- by default, when `--server` > 1 and no `--datastore-x` option is set, the first server node (server-0) will be the initializing server node
|
||||||
- the initializing master node will have the `--cluster-init` flag appended
|
- the initializing server node will have the `--cluster-init` flag appended
|
||||||
- all other master nodes will refer to the initializing master node via `--server https://<init-node>:6443`
|
- all other server nodes will refer to the initializing server node via `--server https://<init-node>:6443`
|
||||||
- API-Ports
|
- API-Ports
|
||||||
- by default, we don't expose any API-Port (no host port mapping)
|
- by default, we don't expose any API-Port (no host port mapping)
|
||||||
- kubeconfig
|
- kubeconfig
|
||||||
|
@ -14,8 +14,8 @@ Existing networks won't be managed by k3d together with the cluster lifecycle.
|
|||||||
### `host` network
|
### `host` network
|
||||||
|
|
||||||
When using the `--network` flag to connect to the host network (i.e. `k3d cluster create --network host`),
|
When using the `--network` flag to connect to the host network (i.e. `k3d cluster create --network host`),
|
||||||
you won't be able to create more than **one master node**.
|
you won't be able to create more than **one server node**.
|
||||||
An edge case would be one master node (with agent disabled) and one worker node.
|
An edge case would be one server node (with agent disabled) and one agent node.
|
||||||
|
|
||||||
### `bridge` network
|
### `bridge` network
|
||||||
|
|
||||||
|
@ -2,5 +2,5 @@ title: Usage
|
|||||||
arrange:
|
arrange:
|
||||||
- commands.md
|
- commands.md
|
||||||
- kubeconfig.md
|
- kubeconfig.md
|
||||||
- multimaster.md
|
- multiserver.md
|
||||||
- guides
|
- guides
|
@ -14,7 +14,7 @@ k3d
|
|||||||
-i, --image # specify which k3s image should be used for the nodes
|
-i, --image # specify which k3s image should be used for the nodes
|
||||||
--k3s-agent-arg # add additional arguments to the k3s agent (see https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/#k3s-agent-cli-help)
|
--k3s-agent-arg # add additional arguments to the k3s agent (see https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/#k3s-agent-cli-help)
|
||||||
--k3s-server-arg # add additional arguments to the k3s server (see https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/#k3s-server-cli-help)
|
--k3s-server-arg # add additional arguments to the k3s server (see https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/#k3s-server-cli-help)
|
||||||
-m, --masters # specify how many master nodes you want to create
|
-m, --servers # specify how many server nodes you want to create
|
||||||
--network # specify a network you want to connect to
|
--network # specify a network you want to connect to
|
||||||
--no-image-volume # disable the creation of a volume for storing images (used for the 'k3d load image' command)
|
--no-image-volume # disable the creation of a volume for storing images (used for the 'k3d load image' command)
|
||||||
-p, --port # add some more port mappings
|
-p, --port # add some more port mappings
|
||||||
@ -23,11 +23,11 @@ k3d
|
|||||||
--update-default-kubeconfig # enable the automated update of the default kubeconfig with the details of the newly created cluster (also sets '--wait=true')
|
--update-default-kubeconfig # enable the automated update of the default kubeconfig with the details of the newly created cluster (also sets '--wait=true')
|
||||||
--switch-context # (implies --update-default-kubeconfig) automatically sets the current-context of your default kubeconfig to the new cluster's context
|
--switch-context # (implies --update-default-kubeconfig) automatically sets the current-context of your default kubeconfig to the new cluster's context
|
||||||
-v, --volume # specify additional bind-mounts
|
-v, --volume # specify additional bind-mounts
|
||||||
--wait # enable waiting for all master nodes to be ready before returning
|
--wait # enable waiting for all server nodes to be ready before returning
|
||||||
-w, --workers # specify how many worker nodes you want to create
|
-a, --agents # specify how many agent nodes you want to create
|
||||||
start CLUSTERNAME # start a (stopped) cluster
|
start CLUSTERNAME # start a (stopped) cluster
|
||||||
-a, --all # start all clusters
|
-a, --all # start all clusters
|
||||||
--wait # wait for all masters and master-loadbalancer to be up before returning
|
--wait # wait for all servers and server-loadbalancer to be up before returning
|
||||||
--timeout # maximum waiting time for '--wait' before canceling/returning
|
--timeout # maximum waiting time for '--wait' before canceling/returning
|
||||||
stop CLUSTERNAME # stop a cluster
|
stop CLUSTERNAME # stop a cluster
|
||||||
-a, --all # stop all clusters
|
-a, --all # stop all clusters
|
||||||
|
@ -7,14 +7,14 @@ Therefore, we have to create the cluster in a way, that the internal port 80 (wh
|
|||||||
|
|
||||||
1. Create a cluster, mapping the ingress port 80 to localhost:8081
|
1. Create a cluster, mapping the ingress port 80 to localhost:8081
|
||||||
|
|
||||||
`#!bash k3d cluster create --api-port 6550 -p 8081:80@loadbalancer --workers 2`
|
`#!bash k3d cluster create --api-port 6550 -p 8081:80@loadbalancer --agents 2`
|
||||||
|
|
||||||
!!! info "Good to know"
|
!!! info "Good to know"
|
||||||
- `--api-port 6550` is not required for the example to work. It's used to have `k3s`'s API-Server listening on port 6550 with that port mapped to the host system.
|
- `--api-port 6550` is not required for the example to work. It's used to have `k3s`'s API-Server listening on port 6550 with that port mapped to the host system.
|
||||||
- the port-mapping construct `8081:80@loadbalancer` means
|
- the port-mapping construct `8081:80@loadbalancer` means
|
||||||
- map port `8081` from the host to port `80` on the container which matches the nodefilter `loadbalancer`
|
- map port `8081` from the host to port `80` on the container which matches the nodefilter `loadbalancer`
|
||||||
- the `loadbalancer` nodefilter matches only the `masterlb` that's deployed in front of a cluster's master nodes
|
- the `loadbalancer` nodefilter matches only the `serverlb` that's deployed in front of a cluster's server nodes
|
||||||
- all ports exposed on the `masterlb` will be proxied to the same ports on all master nodes in the cluster
|
- all ports exposed on the `serverlb` will be proxied to the same ports on all server nodes in the cluster
|
||||||
|
|
||||||
2. Get the kubeconfig file
|
2. Get the kubeconfig file
|
||||||
|
|
||||||
@ -54,9 +54,9 @@ Therefore, we have to create the cluster in a way, that the internal port 80 (wh
|
|||||||
|
|
||||||
## 2. via NodePort
|
## 2. via NodePort
|
||||||
|
|
||||||
1. Create a cluster, mapping the port 30080 from worker-0 to localhost:8082
|
1. Create a cluster, mapping the port 30080 from agent-0 to localhost:8082
|
||||||
|
|
||||||
`#!bash k3d cluster create mycluster -p 8082:30080@worker[0] --workers 2`
|
`#!bash k3d cluster create mycluster -p 8082:30080@agent[0] --agents 2`
|
||||||
|
|
||||||
- Note: Kubernetes' default NodePort range is [`30000-32767`](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport)
|
- Note: Kubernetes' default NodePort range is [`30000-32767`](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport)
|
||||||
|
|
||||||
|
@ -1,25 +1,25 @@
|
|||||||
# Creating multi-master clusters
|
# Creating multi-server clusters
|
||||||
|
|
||||||
!!! info "Important note"
|
!!! info "Important note"
|
||||||
For the best results (and less unexpected issues), choose 1, 3, 5, ... master nodes.
|
For the best results (and less unexpected issues), choose 1, 3, 5, ... server nodes.
|
||||||
|
|
||||||
## Embedded dqlite
|
## Embedded dqlite
|
||||||
|
|
||||||
Create a cluster with 3 master nodes using k3s' embedded dqlite database.
|
Create a cluster with 3 server nodes using k3s' embedded dqlite database.
|
||||||
The first master to be created will use the `--cluster-init` flag and k3d will wait for it to be up and running before creating (and connecting) the other master nodes.
|
The first server to be created will use the `--cluster-init` flag and k3d will wait for it to be up and running before creating (and connecting) the other server nodes.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
k3d cluster create multimaster --masters 3
|
k3d cluster create multiserver --servers 3
|
||||||
```
|
```
|
||||||
|
|
||||||
## Adding master nodes to a running cluster
|
## Adding server nodes to a running cluster
|
||||||
|
|
||||||
In theory (and also in practice in most cases), this is as easy as executing the following command:
|
In theory (and also in practice in most cases), this is as easy as executing the following command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
k3d node create newmaster --cluster multimaster --role master
|
k3d node create newserver --cluster multiserver --role server
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! important "There's a trap!"
|
!!! important "There's a trap!"
|
||||||
If your cluster was initially created with only a single master node, then this will fail.
|
If your cluster was initially created with only a single server node, then this will fail.
|
||||||
That's because the initial master node was not started with the `--cluster-init` flag and thus is not using the dqlite backend.
|
That's because the initial server node was not started with the `--cluster-init` flag and thus is not using the dqlite backend.
|
||||||
|
@ -69,7 +69,7 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
|||||||
if cluster.Network.Name == "host" {
|
if cluster.Network.Name == "host" {
|
||||||
useHostNet = true
|
useHostNet = true
|
||||||
if len(cluster.Nodes) > 1 {
|
if len(cluster.Nodes) > 1 {
|
||||||
return fmt.Errorf("Only one master node supported when using host network")
|
return fmt.Errorf("Only one server node supported when using host network")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -119,9 +119,9 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
|||||||
* Nodes
|
* Nodes
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Worker defaults (per cluster)
|
// agent defaults (per cluster)
|
||||||
// connection url is always the name of the first master node (index 0)
|
// connection url is always the name of the first server node (index 0)
|
||||||
connectionURL := fmt.Sprintf("https://%s:%s", generateNodeName(cluster.Name, k3d.MasterRole, 0), k3d.DefaultAPIPort)
|
connectionURL := fmt.Sprintf("https://%s:%s", generateNodeName(cluster.Name, k3d.ServerRole, 0), k3d.DefaultAPIPort)
|
||||||
|
|
||||||
nodeSetup := func(node *k3d.Node, suffix int) error {
|
nodeSetup := func(node *k3d.Node, suffix int) error {
|
||||||
// cluster specific settings
|
// cluster specific settings
|
||||||
@ -139,16 +139,16 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
|||||||
}
|
}
|
||||||
|
|
||||||
// node role specific settings
|
// node role specific settings
|
||||||
if node.Role == k3d.MasterRole {
|
if node.Role == k3d.ServerRole {
|
||||||
|
|
||||||
node.MasterOpts.ExposeAPI = cluster.ExposeAPI
|
node.ServerOpts.ExposeAPI = cluster.ExposeAPI
|
||||||
|
|
||||||
// the cluster has an init master node, but its not this one, so connect it to the init node
|
// the cluster has an init server node, but its not this one, so connect it to the init node
|
||||||
if cluster.InitNode != nil && !node.MasterOpts.IsInit {
|
if cluster.InitNode != nil && !node.ServerOpts.IsInit {
|
||||||
node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", connectionURL))
|
node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", connectionURL))
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if node.Role == k3d.WorkerRole {
|
} else if node.Role == k3d.AgentRole {
|
||||||
node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", connectionURL))
|
node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", connectionURL))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -167,41 +167,41 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
|||||||
}
|
}
|
||||||
|
|
||||||
// used for node suffices
|
// used for node suffices
|
||||||
masterCount := 0
|
serverCount := 0
|
||||||
workerCount := 0
|
agentCount := 0
|
||||||
suffix := 0
|
suffix := 0
|
||||||
|
|
||||||
// create init node first
|
// create init node first
|
||||||
if cluster.InitNode != nil {
|
if cluster.InitNode != nil {
|
||||||
log.Infoln("Creating initializing master node")
|
log.Infoln("Creating initializing server node")
|
||||||
cluster.InitNode.Args = append(cluster.InitNode.Args, "--cluster-init")
|
cluster.InitNode.Args = append(cluster.InitNode.Args, "--cluster-init")
|
||||||
|
|
||||||
// in case the LoadBalancer was disabled, expose the API Port on the initializing master node
|
// in case the LoadBalancer was disabled, expose the API Port on the initializing server node
|
||||||
if cluster.CreateClusterOpts.DisableLoadBalancer {
|
if cluster.CreateClusterOpts.DisableLoadBalancer {
|
||||||
cluster.InitNode.Ports = append(cluster.InitNode.Ports, fmt.Sprintf("%s:%s:%s/tcp", cluster.ExposeAPI.Host, cluster.ExposeAPI.Port, k3d.DefaultAPIPort))
|
cluster.InitNode.Ports = append(cluster.InitNode.Ports, fmt.Sprintf("%s:%s:%s/tcp", cluster.ExposeAPI.Host, cluster.ExposeAPI.Port, k3d.DefaultAPIPort))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := nodeSetup(cluster.InitNode, masterCount); err != nil {
|
if err := nodeSetup(cluster.InitNode, serverCount); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
masterCount++
|
serverCount++
|
||||||
|
|
||||||
// wait for the initnode to come up before doing anything else
|
// wait for the initnode to come up before doing anything else
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
log.Errorln("Failed to bring up initializing master node in time")
|
log.Errorln("Failed to bring up initializing server node in time")
|
||||||
return fmt.Errorf(">>> %w", ctx.Err())
|
return fmt.Errorf(">>> %w", ctx.Err())
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
log.Debugln("Waiting for initializing master node...")
|
log.Debugln("Waiting for initializing server node...")
|
||||||
logreader, err := runtime.GetNodeLogs(ctx, cluster.InitNode, time.Time{})
|
logreader, err := runtime.GetNodeLogs(ctx, cluster.InitNode, time.Time{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if logreader != nil {
|
if logreader != nil {
|
||||||
logreader.Close()
|
logreader.Close()
|
||||||
}
|
}
|
||||||
log.Errorln(err)
|
log.Errorln(err)
|
||||||
log.Errorln("Failed to get logs from the initializig master node.. waiting for 3 seconds instead")
|
log.Errorln("Failed to get logs from the initializig server node.. waiting for 3 seconds instead")
|
||||||
time.Sleep(3 * time.Second)
|
time.Sleep(3 * time.Second)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -210,7 +210,7 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
|||||||
nRead, _ := buf.ReadFrom(logreader)
|
nRead, _ := buf.ReadFrom(logreader)
|
||||||
logreader.Close()
|
logreader.Close()
|
||||||
if nRead > 0 && strings.Contains(buf.String(), "Running kubelet") {
|
if nRead > 0 && strings.Contains(buf.String(), "Running kubelet") {
|
||||||
log.Debugln("Initializing master node is up... continuing")
|
log.Debugln("Initializing server node is up... continuing")
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
@ -218,46 +218,46 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// vars to support waiting for master nodes to be ready
|
// vars to support waiting for server nodes to be ready
|
||||||
waitForMasterWaitgroup, ctx := errgroup.WithContext(ctx)
|
waitForServerWaitgroup, ctx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
// create all other nodes, but skip the init node
|
// create all other nodes, but skip the init node
|
||||||
for _, node := range cluster.Nodes {
|
for _, node := range cluster.Nodes {
|
||||||
if node.Role == k3d.MasterRole {
|
if node.Role == k3d.ServerRole {
|
||||||
|
|
||||||
// skip the init node here
|
// skip the init node here
|
||||||
if node == cluster.InitNode {
|
if node == cluster.InitNode {
|
||||||
continue
|
continue
|
||||||
} else if masterCount == 0 && cluster.CreateClusterOpts.DisableLoadBalancer {
|
} else if serverCount == 0 && cluster.CreateClusterOpts.DisableLoadBalancer {
|
||||||
// if this is the first master node and the master loadbalancer is disabled, expose the API Port on this master node
|
// if this is the first server node and the server loadbalancer is disabled, expose the API Port on this server node
|
||||||
node.Ports = append(node.Ports, fmt.Sprintf("%s:%s:%s/tcp", cluster.ExposeAPI.Host, cluster.ExposeAPI.Port, k3d.DefaultAPIPort))
|
node.Ports = append(node.Ports, fmt.Sprintf("%s:%s:%s/tcp", cluster.ExposeAPI.Host, cluster.ExposeAPI.Port, k3d.DefaultAPIPort))
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(1 * time.Second) // FIXME: arbitrary wait for one second to avoid race conditions of masters registering
|
time.Sleep(1 * time.Second) // FIXME: arbitrary wait for one second to avoid race conditions of servers registering
|
||||||
|
|
||||||
// name suffix
|
// name suffix
|
||||||
suffix = masterCount
|
suffix = serverCount
|
||||||
masterCount++
|
serverCount++
|
||||||
|
|
||||||
} else if node.Role == k3d.WorkerRole {
|
} else if node.Role == k3d.AgentRole {
|
||||||
// name suffix
|
// name suffix
|
||||||
suffix = workerCount
|
suffix = agentCount
|
||||||
workerCount++
|
agentCount++
|
||||||
}
|
}
|
||||||
if node.Role == k3d.MasterRole || node.Role == k3d.WorkerRole {
|
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
|
||||||
if err := nodeSetup(node, suffix); err != nil {
|
if err := nodeSetup(node, suffix); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// asynchronously wait for this master node to be ready (by checking the logs for a specific log mesage)
|
// asynchronously wait for this server node to be ready (by checking the logs for a specific log mesage)
|
||||||
if node.Role == k3d.MasterRole && cluster.CreateClusterOpts.WaitForMaster {
|
if node.Role == k3d.ServerRole && cluster.CreateClusterOpts.WaitForServer {
|
||||||
masterNode := node
|
serverNode := node
|
||||||
waitForMasterWaitgroup.Go(func() error {
|
waitForServerWaitgroup.Go(func() error {
|
||||||
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
|
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
|
||||||
// ... by scanning for this line in logs and restarting the container in case it appears
|
// ... by scanning for this line in logs and restarting the container in case it appears
|
||||||
log.Debugf("Starting to wait for master node '%s'", masterNode.Name)
|
log.Debugf("Starting to wait for server node '%s'", serverNode.Name)
|
||||||
return NodeWaitForLogMessage(ctx, runtime, masterNode, k3d.ReadyLogMessageByRole[k3d.MasterRole], time.Time{})
|
return NodeWaitForLogMessage(ctx, runtime, serverNode, k3d.ReadyLogMessageByRole[k3d.ServerRole], time.Time{})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -265,13 +265,13 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
|||||||
/*
|
/*
|
||||||
* Auxiliary Containers
|
* Auxiliary Containers
|
||||||
*/
|
*/
|
||||||
// *** MasterLoadBalancer ***
|
// *** ServerLoadBalancer ***
|
||||||
if !cluster.CreateClusterOpts.DisableLoadBalancer {
|
if !cluster.CreateClusterOpts.DisableLoadBalancer {
|
||||||
if !useHostNet { // masterlb not supported in hostnetwork mode due to port collisions with master node
|
if !useHostNet { // serverlb not supported in hostnetwork mode due to port collisions with server node
|
||||||
// Generate a comma-separated list of master/server names to pass to the LB container
|
// Generate a comma-separated list of server/server names to pass to the LB container
|
||||||
servers := ""
|
servers := ""
|
||||||
for _, node := range cluster.Nodes {
|
for _, node := range cluster.Nodes {
|
||||||
if node.Role == k3d.MasterRole {
|
if node.Role == k3d.ServerRole {
|
||||||
log.Debugf("Node NAME: %s", node.Name)
|
log.Debugf("Node NAME: %s", node.Name)
|
||||||
if servers == "" {
|
if servers == "" {
|
||||||
servers = node.Name
|
servers = node.Name
|
||||||
@ -283,16 +283,16 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
|||||||
|
|
||||||
// generate comma-separated list of extra ports to forward
|
// generate comma-separated list of extra ports to forward
|
||||||
ports := k3d.DefaultAPIPort
|
ports := k3d.DefaultAPIPort
|
||||||
for _, portString := range cluster.MasterLoadBalancer.Ports {
|
for _, portString := range cluster.ServerLoadBalancer.Ports {
|
||||||
split := strings.Split(portString, ":")
|
split := strings.Split(portString, ":")
|
||||||
ports += "," + split[len(split)-1]
|
ports += "," + split[len(split)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create LB as a modified node with loadbalancerRole
|
// Create LB as a modified node with loadbalancerRole
|
||||||
lbNode := &k3d.Node{
|
lbNode := &k3d.Node{
|
||||||
Name: fmt.Sprintf("%s-%s-masterlb", k3d.DefaultObjectNamePrefix, cluster.Name),
|
Name: fmt.Sprintf("%s-%s-serverlb", k3d.DefaultObjectNamePrefix, cluster.Name),
|
||||||
Image: fmt.Sprintf("%s:%s", k3d.DefaultLBImageRepo, version.GetHelperImageVersion()),
|
Image: fmt.Sprintf("%s:%s", k3d.DefaultLBImageRepo, version.GetHelperImageVersion()),
|
||||||
Ports: append(cluster.MasterLoadBalancer.Ports, fmt.Sprintf("%s:%s:%s/tcp", cluster.ExposeAPI.Host, cluster.ExposeAPI.Port, k3d.DefaultAPIPort)),
|
Ports: append(cluster.ServerLoadBalancer.Ports, fmt.Sprintf("%s:%s:%s/tcp", cluster.ExposeAPI.Host, cluster.ExposeAPI.Port, k3d.DefaultAPIPort)),
|
||||||
Env: []string{
|
Env: []string{
|
||||||
fmt.Sprintf("SERVERS=%s", servers),
|
fmt.Sprintf("SERVERS=%s", servers),
|
||||||
fmt.Sprintf("PORTS=%s", ports),
|
fmt.Sprintf("PORTS=%s", ports),
|
||||||
@ -307,8 +307,8 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
|||||||
log.Errorln("Failed to create loadbalancer")
|
log.Errorln("Failed to create loadbalancer")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if cluster.CreateClusterOpts.WaitForMaster {
|
if cluster.CreateClusterOpts.WaitForServer {
|
||||||
waitForMasterWaitgroup.Go(func() error {
|
waitForServerWaitgroup.Go(func() error {
|
||||||
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
|
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
|
||||||
// ... by scanning for this line in logs and restarting the container in case it appears
|
// ... by scanning for this line in logs and restarting the container in case it appears
|
||||||
log.Debugf("Starting to wait for loadbalancer node '%s'", lbNode.Name)
|
log.Debugf("Starting to wait for loadbalancer node '%s'", lbNode.Name)
|
||||||
@ -316,12 +316,12 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Infoln("Hostnetwork selected -> Skipping creation of Master LoadBalancer")
|
log.Infoln("Hostnetwork selected -> Skipping creation of server LoadBalancer")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := waitForMasterWaitgroup.Wait(); err != nil {
|
if err := waitForServerWaitgroup.Wait(); err != nil {
|
||||||
log.Errorln("Failed to bring up all master nodes (and loadbalancer) in time. Check the logs:")
|
log.Errorln("Failed to bring up all server nodes (and loadbalancer) in time. Check the logs:")
|
||||||
log.Errorf(">>> %+v", err)
|
log.Errorf(">>> %+v", err)
|
||||||
return fmt.Errorf("Failed to bring up cluster")
|
return fmt.Errorf("Failed to bring up cluster")
|
||||||
}
|
}
|
||||||
@ -516,16 +516,16 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
// vars to support waiting for master nodes to be ready
|
// vars to support waiting for server nodes to be ready
|
||||||
waitForMasterWaitgroup, ctx := errgroup.WithContext(ctx)
|
waitForServerWaitgroup, ctx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
failed := 0
|
failed := 0
|
||||||
var masterlb *k3d.Node
|
var serverlb *k3d.Node
|
||||||
for _, node := range cluster.Nodes {
|
for _, node := range cluster.Nodes {
|
||||||
|
|
||||||
// skip the LB, because we want to start it last
|
// skip the LB, because we want to start it last
|
||||||
if node.Role == k3d.LoadBalancerRole {
|
if node.Role == k3d.LoadBalancerRole {
|
||||||
masterlb = node
|
serverlb = node
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -536,34 +536,34 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// asynchronously wait for this master node to be ready (by checking the logs for a specific log mesage)
|
// asynchronously wait for this server node to be ready (by checking the logs for a specific log mesage)
|
||||||
if node.Role == k3d.MasterRole && startClusterOpts.WaitForMaster {
|
if node.Role == k3d.ServerRole && startClusterOpts.WaitForServer {
|
||||||
masterNode := node
|
serverNode := node
|
||||||
waitForMasterWaitgroup.Go(func() error {
|
waitForServerWaitgroup.Go(func() error {
|
||||||
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
|
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
|
||||||
// ... by scanning for this line in logs and restarting the container in case it appears
|
// ... by scanning for this line in logs and restarting the container in case it appears
|
||||||
log.Debugf("Starting to wait for master node '%s'", masterNode.Name)
|
log.Debugf("Starting to wait for server node '%s'", serverNode.Name)
|
||||||
return NodeWaitForLogMessage(ctx, runtime, masterNode, k3d.ReadyLogMessageByRole[k3d.MasterRole], start)
|
return NodeWaitForLogMessage(ctx, runtime, serverNode, k3d.ReadyLogMessageByRole[k3d.ServerRole], start)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// start masterlb
|
// start serverlb
|
||||||
if masterlb != nil {
|
if serverlb != nil {
|
||||||
log.Debugln("Starting masterlb...")
|
log.Debugln("Starting serverlb...")
|
||||||
if err := runtime.StartNode(ctx, masterlb); err != nil { // FIXME: we could run into a nullpointer exception here
|
if err := runtime.StartNode(ctx, serverlb); err != nil { // FIXME: we could run into a nullpointer exception here
|
||||||
log.Warningf("Failed to start masterlb '%s': Try to start it manually", masterlb.Name)
|
log.Warningf("Failed to start serverlb '%s': Try to start it manually", serverlb.Name)
|
||||||
failed++
|
failed++
|
||||||
}
|
}
|
||||||
waitForMasterWaitgroup.Go(func() error {
|
waitForServerWaitgroup.Go(func() error {
|
||||||
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
|
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
|
||||||
// ... by scanning for this line in logs and restarting the container in case it appears
|
// ... by scanning for this line in logs and restarting the container in case it appears
|
||||||
log.Debugf("Starting to wait for loadbalancer node '%s'", masterlb.Name)
|
log.Debugf("Starting to wait for loadbalancer node '%s'", serverlb.Name)
|
||||||
return NodeWaitForLogMessage(ctx, runtime, masterlb, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], start)
|
return NodeWaitForLogMessage(ctx, runtime, serverlb, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], start)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := waitForMasterWaitgroup.Wait(); err != nil {
|
if err := waitForServerWaitgroup.Wait(); err != nil {
|
||||||
log.Errorln("Failed to bring up all nodes in time. Check the logs:")
|
log.Errorln("Failed to bring up all nodes in time. Check the logs:")
|
||||||
log.Errorln(">>> ", err)
|
log.Errorln(">>> ", err)
|
||||||
return fmt.Errorf("Failed to bring up cluster")
|
return fmt.Errorf("Failed to bring up cluster")
|
||||||
|
@ -45,7 +45,7 @@ type WriteKubeConfigOptions struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// KubeconfigGetWrite ...
|
// KubeconfigGetWrite ...
|
||||||
// 1. fetches the KubeConfig from the first master node retrieved for a given cluster
|
// 1. fetches the KubeConfig from the first server node retrieved for a given cluster
|
||||||
// 2. modifies it by updating some fields with cluster-specific information
|
// 2. modifies it by updating some fields with cluster-specific information
|
||||||
// 3. writes it to the specified output
|
// 3. writes it to the specified output
|
||||||
func KubeconfigGetWrite(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster, output string, writeKubeConfigOptions *WriteKubeConfigOptions) (string, error) {
|
func KubeconfigGetWrite(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster, output string, writeKubeConfigOptions *WriteKubeConfigOptions) (string, error) {
|
||||||
@ -107,45 +107,45 @@ func KubeconfigGetWrite(ctx context.Context, runtime runtimes.Runtime, cluster *
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// KubeconfigGet grabs the kubeconfig file from /output from a master node container,
|
// KubeconfigGet grabs the kubeconfig file from /output from a server node container,
|
||||||
// modifies it by updating some fields with cluster-specific information
|
// modifies it by updating some fields with cluster-specific information
|
||||||
// and returns a Config object for further processing
|
// and returns a Config object for further processing
|
||||||
func KubeconfigGet(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) (*clientcmdapi.Config, error) {
|
func KubeconfigGet(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) (*clientcmdapi.Config, error) {
|
||||||
// get all master nodes for the selected cluster
|
// get all server nodes for the selected cluster
|
||||||
// TODO: getKubeconfig: we should make sure, that the master node we're trying to fetch from is actually running
|
// TODO: getKubeconfig: we should make sure, that the server node we're trying to fetch from is actually running
|
||||||
masterNodes, err := runtime.GetNodesByLabel(ctx, map[string]string{k3d.LabelClusterName: cluster.Name, k3d.LabelRole: string(k3d.MasterRole)})
|
serverNodes, err := runtime.GetNodesByLabel(ctx, map[string]string{k3d.LabelClusterName: cluster.Name, k3d.LabelRole: string(k3d.ServerRole)})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorln("Failed to get master nodes")
|
log.Errorln("Failed to get server nodes")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(masterNodes) == 0 {
|
if len(serverNodes) == 0 {
|
||||||
return nil, fmt.Errorf("Didn't find any master node")
|
return nil, fmt.Errorf("Didn't find any server node")
|
||||||
}
|
}
|
||||||
|
|
||||||
// prefer a master node, which actually has the port exposed
|
// prefer a server node, which actually has the port exposed
|
||||||
var chosenMaster *k3d.Node
|
var chosenServer *k3d.Node
|
||||||
chosenMaster = nil
|
chosenServer = nil
|
||||||
APIPort := k3d.DefaultAPIPort
|
APIPort := k3d.DefaultAPIPort
|
||||||
APIHost := k3d.DefaultAPIHost
|
APIHost := k3d.DefaultAPIHost
|
||||||
|
|
||||||
for _, master := range masterNodes {
|
for _, server := range serverNodes {
|
||||||
if _, ok := master.Labels[k3d.LabelMasterAPIPort]; ok {
|
if _, ok := server.Labels[k3d.LabelServerAPIPort]; ok {
|
||||||
chosenMaster = master
|
chosenServer = server
|
||||||
APIPort = master.Labels[k3d.LabelMasterAPIPort]
|
APIPort = server.Labels[k3d.LabelServerAPIPort]
|
||||||
if _, ok := master.Labels[k3d.LabelMasterAPIHost]; ok {
|
if _, ok := server.Labels[k3d.LabelServerAPIHost]; ok {
|
||||||
APIHost = master.Labels[k3d.LabelMasterAPIHost]
|
APIHost = server.Labels[k3d.LabelServerAPIHost]
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if chosenMaster == nil {
|
if chosenServer == nil {
|
||||||
chosenMaster = masterNodes[0]
|
chosenServer = serverNodes[0]
|
||||||
}
|
}
|
||||||
// get the kubeconfig from the first master node
|
// get the kubeconfig from the first server node
|
||||||
reader, err := runtime.GetKubeconfig(ctx, chosenMaster)
|
reader, err := runtime.GetKubeconfig(ctx, chosenServer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Failed to get kubeconfig from node '%s'", chosenMaster.Name)
|
log.Errorf("Failed to get kubeconfig from node '%s'", chosenServer.Name)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer reader.Close()
|
defer reader.Close()
|
||||||
|
@ -43,23 +43,23 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu
|
|||||||
}
|
}
|
||||||
|
|
||||||
// find the LoadBalancer for the target cluster
|
// find the LoadBalancer for the target cluster
|
||||||
masterNodesList := []string{}
|
serverNodesList := []string{}
|
||||||
var loadbalancer *k3d.Node
|
var loadbalancer *k3d.Node
|
||||||
for _, node := range cluster.Nodes {
|
for _, node := range cluster.Nodes {
|
||||||
if node.Role == k3d.LoadBalancerRole { // get the loadbalancer we want to update
|
if node.Role == k3d.LoadBalancerRole { // get the loadbalancer we want to update
|
||||||
loadbalancer = node
|
loadbalancer = node
|
||||||
} else if node.Role == k3d.MasterRole { // create a list of master nodes
|
} else if node.Role == k3d.ServerRole { // create a list of server nodes
|
||||||
masterNodesList = append(masterNodesList, node.Name)
|
serverNodesList = append(serverNodesList, node.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
masterNodes := strings.Join(masterNodesList, ",")
|
serverNodes := strings.Join(serverNodesList, ",")
|
||||||
if loadbalancer == nil {
|
if loadbalancer == nil {
|
||||||
return fmt.Errorf("Failed to find loadbalancer for cluster '%s'", cluster.Name)
|
return fmt.Errorf("Failed to find loadbalancer for cluster '%s'", cluster.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("Servers as passed to masterlb: '%s'", masterNodes)
|
log.Debugf("Servers as passed to serverlb: '%s'", serverNodes)
|
||||||
|
|
||||||
command := fmt.Sprintf("SERVERS=%s %s", masterNodes, "confd -onetime -backend env && nginx -s reload")
|
command := fmt.Sprintf("SERVERS=%s %s", serverNodes, "confd -onetime -backend env && nginx -s reload")
|
||||||
if err := runtime.ExecInNode(ctx, loadbalancer, []string{"sh", "-c", command}); err != nil {
|
if err := runtime.ExecInNode(ctx, loadbalancer, []string{"sh", "-c", command}); err != nil {
|
||||||
if strings.Contains(err.Error(), "host not found in upstream") {
|
if strings.Contains(err.Error(), "host not found in upstream") {
|
||||||
log.Warnf("Loadbalancer configuration updated, but one or more k3d nodes seem to be down, check the logs:\n%s", err.Error())
|
log.Warnf("Loadbalancer configuration updated, but one or more k3d nodes seem to be down, check the logs:\n%s", err.Error())
|
||||||
|
@ -109,17 +109,17 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if node.Role == k3d.MasterRole {
|
if node.Role == k3d.ServerRole {
|
||||||
for _, forbiddenCmd := range k3d.DoNotCopyMasterFlags {
|
for _, forbiddenCmd := range k3d.DoNotCopyServerFlags {
|
||||||
for i, cmd := range node.Cmd {
|
for i, cmd := range node.Cmd {
|
||||||
// cut out the '--cluster-init' flag as this should only be done by the initializing master node
|
// cut out the '--cluster-init' flag as this should only be done by the initializing server node
|
||||||
if cmd == forbiddenCmd {
|
if cmd == forbiddenCmd {
|
||||||
log.Debugf("Dropping '%s' from node's cmd", forbiddenCmd)
|
log.Debugf("Dropping '%s' from node's cmd", forbiddenCmd)
|
||||||
node.Cmd = append(node.Cmd[:i], node.Cmd[i+1:]...)
|
node.Cmd = append(node.Cmd[:i], node.Cmd[i+1:]...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i, arg := range node.Args {
|
for i, arg := range node.Args {
|
||||||
// cut out the '--cluster-init' flag as this should only be done by the initializing master node
|
// cut out the '--cluster-init' flag as this should only be done by the initializing server node
|
||||||
if arg == forbiddenCmd {
|
if arg == forbiddenCmd {
|
||||||
log.Debugf("Dropping '%s' from node's args", forbiddenCmd)
|
log.Debugf("Dropping '%s' from node's args", forbiddenCmd)
|
||||||
node.Args = append(node.Args[:i], node.Args[i+1:]...)
|
node.Args = append(node.Args[:i], node.Args[i+1:]...)
|
||||||
@ -132,8 +132,8 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// if it's a master node, then update the loadbalancer configuration
|
// if it's a server node, then update the loadbalancer configuration
|
||||||
if node.Role == k3d.MasterRole {
|
if node.Role == k3d.ServerRole {
|
||||||
if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil {
|
if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil {
|
||||||
log.Errorln("Failed to update cluster loadbalancer")
|
log.Errorln("Failed to update cluster loadbalancer")
|
||||||
return err
|
return err
|
||||||
@ -231,12 +231,12 @@ func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, c
|
|||||||
node.Env = append(node.Env, k3d.DefaultNodeEnv...) // append default node env vars
|
node.Env = append(node.Env, k3d.DefaultNodeEnv...) // append default node env vars
|
||||||
|
|
||||||
// specify options depending on node role
|
// specify options depending on node role
|
||||||
if node.Role == k3d.WorkerRole { // TODO: check here AND in CLI or only here?
|
if node.Role == k3d.AgentRole { // TODO: check here AND in CLI or only here?
|
||||||
if err := patchWorkerSpec(node); err != nil {
|
if err := patchAgentSpec(node); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else if node.Role == k3d.MasterRole {
|
} else if node.Role == k3d.ServerRole {
|
||||||
if err := patchMasterSpec(node); err != nil {
|
if err := patchServerSpec(node); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -264,8 +264,8 @@ func NodeDelete(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) e
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// if it's a master node, then update the loadbalancer configuration
|
// if it's a server node, then update the loadbalancer configuration
|
||||||
if node.Role == k3d.MasterRole {
|
if node.Role == k3d.ServerRole {
|
||||||
if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil {
|
if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil {
|
||||||
log.Errorln("Failed to update cluster loadbalancer")
|
log.Errorln("Failed to update cluster loadbalancer")
|
||||||
return err
|
return err
|
||||||
@ -275,16 +275,16 @@ func NodeDelete(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) e
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// patchWorkerSpec adds worker node specific settings to a node
|
// patchAgentSpec adds agent node specific settings to a node
|
||||||
func patchWorkerSpec(node *k3d.Node) error {
|
func patchAgentSpec(node *k3d.Node) error {
|
||||||
if node.Cmd == nil {
|
if node.Cmd == nil {
|
||||||
node.Cmd = []string{"agent"}
|
node.Cmd = []string{"agent"}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// patchMasterSpec adds worker node specific settings to a node
|
// patchServerSpec adds agent node specific settings to a node
|
||||||
func patchMasterSpec(node *k3d.Node) error {
|
func patchServerSpec(node *k3d.Node) error {
|
||||||
|
|
||||||
// command / arguments
|
// command / arguments
|
||||||
if node.Cmd == nil {
|
if node.Cmd == nil {
|
||||||
@ -292,12 +292,12 @@ func patchMasterSpec(node *k3d.Node) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add labels and TLS SAN for the exposed API
|
// Add labels and TLS SAN for the exposed API
|
||||||
// FIXME: For now, the labels concerning the API on the master nodes are only being used for configuring the kubeconfig
|
// FIXME: For now, the labels concerning the API on the server nodes are only being used for configuring the kubeconfig
|
||||||
node.Labels[k3d.LabelMasterAPIHostIP] = node.MasterOpts.ExposeAPI.HostIP // TODO: maybe get docker machine IP here
|
node.Labels[k3d.LabelServerAPIHostIP] = node.ServerOpts.ExposeAPI.HostIP // TODO: maybe get docker machine IP here
|
||||||
node.Labels[k3d.LabelMasterAPIHost] = node.MasterOpts.ExposeAPI.Host
|
node.Labels[k3d.LabelServerAPIHost] = node.ServerOpts.ExposeAPI.Host
|
||||||
node.Labels[k3d.LabelMasterAPIPort] = node.MasterOpts.ExposeAPI.Port
|
node.Labels[k3d.LabelServerAPIPort] = node.ServerOpts.ExposeAPI.Port
|
||||||
|
|
||||||
node.Args = append(node.Args, "--tls-san", node.MasterOpts.ExposeAPI.Host) // add TLS SAN for non default host name
|
node.Args = append(node.Args, "--tls-san", node.ServerOpts.ExposeAPI.Host) // add TLS SAN for non default host name
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -147,15 +147,15 @@ func TranslateContainerDetailsToNode(containerDetails types.ContainerJSON) (*k3d
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// masterOpts
|
// serverOpts
|
||||||
masterOpts := k3d.MasterOpts{IsInit: false}
|
serverOpts := k3d.ServerOpts{IsInit: false}
|
||||||
for k, v := range containerDetails.Config.Labels {
|
for k, v := range containerDetails.Config.Labels {
|
||||||
if k == k3d.LabelMasterAPIHostIP {
|
if k == k3d.LabelServerAPIHostIP {
|
||||||
masterOpts.ExposeAPI.HostIP = v
|
serverOpts.ExposeAPI.HostIP = v
|
||||||
} else if k == k3d.LabelMasterAPIHost {
|
} else if k == k3d.LabelServerAPIHost {
|
||||||
masterOpts.ExposeAPI.Host = v
|
serverOpts.ExposeAPI.Host = v
|
||||||
} else if k == k3d.LabelMasterAPIPort {
|
} else if k == k3d.LabelServerAPIPort {
|
||||||
masterOpts.ExposeAPI.Port = v
|
serverOpts.ExposeAPI.Port = v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -187,8 +187,8 @@ func TranslateContainerDetailsToNode(containerDetails types.ContainerJSON) (*k3d
|
|||||||
Restart: restart,
|
Restart: restart,
|
||||||
Labels: labels,
|
Labels: labels,
|
||||||
Network: clusterNetwork,
|
Network: clusterNetwork,
|
||||||
MasterOpts: masterOpts,
|
ServerOpts: serverOpts,
|
||||||
WorkerOpts: k3d.WorkerOpts{},
|
AgentOpts: k3d.AgentOpts{},
|
||||||
}
|
}
|
||||||
return node, nil
|
return node, nil
|
||||||
}
|
}
|
||||||
|
@ -37,7 +37,7 @@ func TestTranslateNodeToContainer(t *testing.T) {
|
|||||||
|
|
||||||
inputNode := &k3d.Node{
|
inputNode := &k3d.Node{
|
||||||
Name: "test",
|
Name: "test",
|
||||||
Role: k3d.MasterRole,
|
Role: k3d.ServerRole,
|
||||||
Image: "rancher/k3s:v0.9.0",
|
Image: "rancher/k3s:v0.9.0",
|
||||||
Volumes: []string{"/test:/tmp/test"},
|
Volumes: []string{"/test:/tmp/test"},
|
||||||
Env: []string{"TEST_KEY_1=TEST_VAL_1"},
|
Env: []string{"TEST_KEY_1=TEST_VAL_1"},
|
||||||
@ -45,7 +45,7 @@ func TestTranslateNodeToContainer(t *testing.T) {
|
|||||||
Args: []string{"--some-boolflag"},
|
Args: []string{"--some-boolflag"},
|
||||||
Ports: []string{"0.0.0.0:6443:6443/tcp"},
|
Ports: []string{"0.0.0.0:6443:6443/tcp"},
|
||||||
Restart: true,
|
Restart: true,
|
||||||
Labels: map[string]string{k3d.LabelRole: string(k3d.MasterRole), "test_key_1": "test_val_1"},
|
Labels: map[string]string{k3d.LabelRole: string(k3d.ServerRole), "test_key_1": "test_val_1"},
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedRepresentation := &NodeInDocker{
|
expectedRepresentation := &NodeInDocker{
|
||||||
@ -54,7 +54,7 @@ func TestTranslateNodeToContainer(t *testing.T) {
|
|||||||
Image: "rancher/k3s:v0.9.0",
|
Image: "rancher/k3s:v0.9.0",
|
||||||
Env: []string{"TEST_KEY_1=TEST_VAL_1"},
|
Env: []string{"TEST_KEY_1=TEST_VAL_1"},
|
||||||
Cmd: []string{"server", "--https-listen-port=6443", "--some-boolflag"},
|
Cmd: []string{"server", "--https-listen-port=6443", "--some-boolflag"},
|
||||||
Labels: map[string]string{k3d.LabelRole: string(k3d.MasterRole), "test_key_1": "test_val_1"},
|
Labels: map[string]string{k3d.LabelRole: string(k3d.ServerRole), "test_key_1": "test_val_1"},
|
||||||
ExposedPorts: nat.PortSet{},
|
ExposedPorts: nat.PortSet{},
|
||||||
},
|
},
|
||||||
HostConfig: container.HostConfig{
|
HostConfig: container.HostConfig{
|
||||||
|
@ -92,7 +92,7 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime,
|
|||||||
var imageVolume string
|
var imageVolume string
|
||||||
var ok bool
|
var ok bool
|
||||||
for _, node := range cluster.Nodes {
|
for _, node := range cluster.Nodes {
|
||||||
if node.Role == k3d.MasterRole || node.Role == k3d.WorkerRole {
|
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
|
||||||
if imageVolume, ok = node.Labels[k3d.LabelImageVolume]; ok {
|
if imageVolume, ok = node.Labels[k3d.LabelImageVolume]; ok {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -162,8 +162,8 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime,
|
|||||||
var importWaitgroup sync.WaitGroup
|
var importWaitgroup sync.WaitGroup
|
||||||
for _, tarName := range importTarNames {
|
for _, tarName := range importTarNames {
|
||||||
for _, node := range cluster.Nodes {
|
for _, node := range cluster.Nodes {
|
||||||
// only import image in master and worker nodes (i.e. ignoring auxiliary nodes like the master loadbalancer)
|
// only import image in server and agent nodes (i.e. ignoring auxiliary nodes like the server loadbalancer)
|
||||||
if node.Role == k3d.MasterRole || node.Role == k3d.WorkerRole {
|
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
|
||||||
importWaitgroup.Add(1)
|
importWaitgroup.Add(1)
|
||||||
go func(node *k3d.Node, wg *sync.WaitGroup, tarPath string) {
|
go func(node *k3d.Node, wg *sync.WaitGroup, tarPath string) {
|
||||||
log.Infof("Importing images from tarball '%s' into node '%s'...", tarPath, node.Name)
|
log.Infof("Importing images from tarball '%s' into node '%s'...", tarPath, node.Name)
|
||||||
|
@ -47,10 +47,10 @@ const DefaultToolsImageRepo = "docker.io/rancher/k3d-tools"
|
|||||||
// DefaultObjectNamePrefix defines the name prefix for every object created by k3d
|
// DefaultObjectNamePrefix defines the name prefix for every object created by k3d
|
||||||
const DefaultObjectNamePrefix = "k3d"
|
const DefaultObjectNamePrefix = "k3d"
|
||||||
|
|
||||||
// ReadyLogMessageByRole defines the log messages we wait for until a master node is considered ready
|
// ReadyLogMessageByRole defines the log messages we wait for until a server node is considered ready
|
||||||
var ReadyLogMessageByRole = map[Role]string{
|
var ReadyLogMessageByRole = map[Role]string{
|
||||||
MasterRole: "Wrote kubeconfig",
|
ServerRole: "Wrote kubeconfig",
|
||||||
WorkerRole: "Successfully registered node",
|
AgentRole: "Successfully registered node",
|
||||||
LoadBalancerRole: "start worker processes",
|
LoadBalancerRole: "start worker processes",
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -59,16 +59,16 @@ type Role string
|
|||||||
|
|
||||||
// existing k3d node roles
|
// existing k3d node roles
|
||||||
const (
|
const (
|
||||||
MasterRole Role = "master"
|
ServerRole Role = "server"
|
||||||
WorkerRole Role = "worker"
|
AgentRole Role = "agent"
|
||||||
NoRole Role = "noRole"
|
NoRole Role = "noRole"
|
||||||
LoadBalancerRole Role = "loadbalancer"
|
LoadBalancerRole Role = "loadbalancer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NodeRoles defines the roles available for nodes
|
// NodeRoles defines the roles available for nodes
|
||||||
var NodeRoles = map[string]Role{
|
var NodeRoles = map[string]Role{
|
||||||
string(MasterRole): MasterRole,
|
string(ServerRole): ServerRole,
|
||||||
string(WorkerRole): WorkerRole,
|
string(AgentRole): AgentRole,
|
||||||
string(LoadBalancerRole): LoadBalancerRole,
|
string(LoadBalancerRole): LoadBalancerRole,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -86,15 +86,15 @@ const (
|
|||||||
LabelNetworkExternal string = "k3d.cluster.network.external"
|
LabelNetworkExternal string = "k3d.cluster.network.external"
|
||||||
LabelNetwork string = "k3d.cluster.network"
|
LabelNetwork string = "k3d.cluster.network"
|
||||||
LabelRole string = "k3d.role"
|
LabelRole string = "k3d.role"
|
||||||
LabelMasterAPIPort string = "k3d.master.api.port"
|
LabelServerAPIPort string = "k3d.server.api.port"
|
||||||
LabelMasterAPIHost string = "k3d.master.api.host"
|
LabelServerAPIHost string = "k3d.server.api.host"
|
||||||
LabelMasterAPIHostIP string = "k3d.master.api.hostIP"
|
LabelServerAPIHostIP string = "k3d.server.api.hostIP"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DefaultRoleCmds maps the node roles to their respective default commands
|
// DefaultRoleCmds maps the node roles to their respective default commands
|
||||||
var DefaultRoleCmds = map[Role][]string{
|
var DefaultRoleCmds = map[Role][]string{
|
||||||
MasterRole: {"server"},
|
ServerRole: {"server"},
|
||||||
WorkerRole: {"agent"},
|
AgentRole: {"agent"},
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultTmpfsMounts specifies tmpfs mounts that are required for all k3d nodes
|
// DefaultTmpfsMounts specifies tmpfs mounts that are required for all k3d nodes
|
||||||
@ -123,15 +123,15 @@ const DefaultAPIPort = "6443"
|
|||||||
// DefaultAPIHost defines the default host (IP) for the Kubernetes API
|
// DefaultAPIHost defines the default host (IP) for the Kubernetes API
|
||||||
const DefaultAPIHost = "0.0.0.0"
|
const DefaultAPIHost = "0.0.0.0"
|
||||||
|
|
||||||
// DoNotCopyMasterFlags defines a list of commands/args that shouldn't be copied from an existing node when adding a similar node to a cluster
|
// DoNotCopyServerFlags defines a list of commands/args that shouldn't be copied from an existing node when adding a similar node to a cluster
|
||||||
var DoNotCopyMasterFlags = []string{
|
var DoNotCopyServerFlags = []string{
|
||||||
"--cluster-init",
|
"--cluster-init",
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClusterCreateOpts describe a set of options one can set when creating a cluster
|
// ClusterCreateOpts describe a set of options one can set when creating a cluster
|
||||||
type ClusterCreateOpts struct {
|
type ClusterCreateOpts struct {
|
||||||
DisableImageVolume bool
|
DisableImageVolume bool
|
||||||
WaitForMaster bool
|
WaitForServer bool
|
||||||
Timeout time.Duration
|
Timeout time.Duration
|
||||||
DisableLoadBalancer bool
|
DisableLoadBalancer bool
|
||||||
K3sServerArgs []string
|
K3sServerArgs []string
|
||||||
@ -140,7 +140,7 @@ type ClusterCreateOpts struct {
|
|||||||
|
|
||||||
// ClusterStartOpts describe a set of options one can set when (re-)starting a cluster
|
// ClusterStartOpts describe a set of options one can set when (re-)starting a cluster
|
||||||
type ClusterStartOpts struct {
|
type ClusterStartOpts struct {
|
||||||
WaitForMaster bool
|
WaitForServer bool
|
||||||
Timeout time.Duration
|
Timeout time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -173,34 +173,34 @@ type Cluster struct {
|
|||||||
Network ClusterNetwork `yaml:"network" json:"network,omitempty"`
|
Network ClusterNetwork `yaml:"network" json:"network,omitempty"`
|
||||||
Token string `yaml:"cluster_token" json:"clusterToken,omitempty"`
|
Token string `yaml:"cluster_token" json:"clusterToken,omitempty"`
|
||||||
Nodes []*Node `yaml:"nodes" json:"nodes,omitempty"`
|
Nodes []*Node `yaml:"nodes" json:"nodes,omitempty"`
|
||||||
InitNode *Node // init master node
|
InitNode *Node // init server node
|
||||||
ExternalDatastore ExternalDatastore `yaml:"external_datastore" json:"externalDatastore,omitempty"`
|
ExternalDatastore ExternalDatastore `yaml:"external_datastore" json:"externalDatastore,omitempty"`
|
||||||
CreateClusterOpts *ClusterCreateOpts `yaml:"options" json:"options,omitempty"`
|
CreateClusterOpts *ClusterCreateOpts `yaml:"options" json:"options,omitempty"`
|
||||||
ExposeAPI ExposeAPI `yaml:"expose_api" json:"exposeAPI,omitempty"`
|
ExposeAPI ExposeAPI `yaml:"expose_api" json:"exposeAPI,omitempty"`
|
||||||
MasterLoadBalancer *Node `yaml:"master_loadbalancer" json:"masterLoadBalancer,omitempty"`
|
ServerLoadBalancer *Node `yaml:"server_loadbalancer" json:"serverLoadBalancer,omitempty"`
|
||||||
ImageVolume string `yaml:"image_volume" json:"imageVolume,omitempty"`
|
ImageVolume string `yaml:"image_volume" json:"imageVolume,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// MasterCount return number of master node into cluster
|
// ServerCount return number of server node into cluster
|
||||||
func (c *Cluster) MasterCount() int {
|
func (c *Cluster) ServerCount() int {
|
||||||
masterCount := 0
|
serverCount := 0
|
||||||
for _, node := range c.Nodes {
|
for _, node := range c.Nodes {
|
||||||
if node.Role == MasterRole {
|
if node.Role == ServerRole {
|
||||||
masterCount++
|
serverCount++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return masterCount
|
return serverCount
|
||||||
}
|
}
|
||||||
|
|
||||||
// WorkerCount return number of worker node into cluster
|
// AgentCount return number of agent node into cluster
|
||||||
func (c *Cluster) WorkerCount() int {
|
func (c *Cluster) AgentCount() int {
|
||||||
workerCount := 0
|
agentCount := 0
|
||||||
for _, node := range c.Nodes {
|
for _, node := range c.Nodes {
|
||||||
if node.Role == WorkerRole {
|
if node.Role == AgentRole {
|
||||||
workerCount++
|
agentCount++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return workerCount
|
return agentCount
|
||||||
}
|
}
|
||||||
|
|
||||||
// Node describes a k3d node
|
// Node describes a k3d node
|
||||||
@ -216,17 +216,17 @@ type Node struct {
|
|||||||
Restart bool `yaml:"restart" json:"restart,omitempty"`
|
Restart bool `yaml:"restart" json:"restart,omitempty"`
|
||||||
Labels map[string]string // filled automatically
|
Labels map[string]string // filled automatically
|
||||||
Network string // filled automatically
|
Network string // filled automatically
|
||||||
MasterOpts MasterOpts `yaml:"master_opts" json:"masterOpts,omitempty"`
|
ServerOpts ServerOpts `yaml:"server_opts" json:"serverOpts,omitempty"`
|
||||||
WorkerOpts WorkerOpts `yaml:"worker_opts" json:"workerOpts,omitempty"`
|
AgentOpts AgentOpts `yaml:"agent_opts" json:"agentOpts,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// MasterOpts describes some additional master role specific opts
|
// ServerOpts describes some additional server role specific opts
|
||||||
type MasterOpts struct {
|
type ServerOpts struct {
|
||||||
IsInit bool `yaml:"is_initializing_master" json:"isInitializingMaster,omitempty"`
|
IsInit bool `yaml:"is_initializing_server" json:"isInitializingServer,omitempty"`
|
||||||
ExposeAPI ExposeAPI // filled automatically
|
ExposeAPI ExposeAPI // filled automatically
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExternalDatastore describes an external datastore used for HA/multi-master clusters
|
// ExternalDatastore describes an external datastore used for HA/multi-server clusters
|
||||||
type ExternalDatastore struct {
|
type ExternalDatastore struct {
|
||||||
Endpoint string `yaml:"endpoint" json:"endpoint,omitempty"`
|
Endpoint string `yaml:"endpoint" json:"endpoint,omitempty"`
|
||||||
CAFile string `yaml:"ca_file" json:"caFile,omitempty"`
|
CAFile string `yaml:"ca_file" json:"caFile,omitempty"`
|
||||||
@ -242,8 +242,8 @@ type ExposeAPI struct {
|
|||||||
Port string `yaml:"port" json:"port"`
|
Port string `yaml:"port" json:"port"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// WorkerOpts describes some additional worker role specific opts
|
// AgentOpts describes some additional agent role specific opts
|
||||||
type WorkerOpts struct{}
|
type AgentOpts struct{}
|
||||||
|
|
||||||
// GetDefaultObjectName prefixes the passed name with the default prefix
|
// GetDefaultObjectName prefixes the passed name with the default prefix
|
||||||
func GetDefaultObjectName(name string) string {
|
func GetDefaultObjectName(name string) string {
|
||||||
|
@ -12,7 +12,7 @@ events {
|
|||||||
|
|
||||||
stream {
|
stream {
|
||||||
{{- range $port := $ports }}
|
{{- range $port := $ports }}
|
||||||
upstream master_nodes_{{ $port }} {
|
upstream server_nodes_{{ $port }} {
|
||||||
{{- range $server := $servers }}
|
{{- range $server := $servers }}
|
||||||
server {{ $server }}:{{ $port }} max_fails=1 fail_timeout=10s;
|
server {{ $server }}:{{ $port }} max_fails=1 fail_timeout=10s;
|
||||||
{{- end }}
|
{{- end }}
|
||||||
@ -20,7 +20,7 @@ stream {
|
|||||||
|
|
||||||
server {
|
server {
|
||||||
listen {{ $port }};
|
listen {{ $port }};
|
||||||
proxy_pass master_nodes_{{ $port }};
|
proxy_pass server_nodes_{{ $port }};
|
||||||
proxy_timeout 600;
|
proxy_timeout 600;
|
||||||
proxy_connect_timeout 2s;
|
proxy_connect_timeout 2s;
|
||||||
}
|
}
|
||||||
|
@ -9,7 +9,7 @@ source "$CURR_DIR/common.sh"
|
|||||||
clustername="lifecycletest"
|
clustername="lifecycletest"
|
||||||
|
|
||||||
info "Creating cluster $clustername..."
|
info "Creating cluster $clustername..."
|
||||||
$EXE cluster create "$clustername" --workers 1 --api-port 6443 --wait --timeout 360s || failed "could not create cluster $clustername"
|
$EXE cluster create "$clustername" --agents 1 --api-port 6443 --wait --timeout 360s || failed "could not create cluster $clustername"
|
||||||
|
|
||||||
info "Sleeping for 5 seconds to give the cluster enough time to get ready..."
|
info "Sleeping for 5 seconds to give the cluster enough time to get ready..."
|
||||||
sleep 5
|
sleep 5
|
||||||
@ -38,9 +38,9 @@ check_clusters "$clustername" || failed "error checking cluster"
|
|||||||
info "Checking that we have 2 nodes online..."
|
info "Checking that we have 2 nodes online..."
|
||||||
check_multi_node "$clustername" 2 || failed "failed to verify number of nodes"
|
check_multi_node "$clustername" 2 || failed "failed to verify number of nodes"
|
||||||
|
|
||||||
# 4. adding another worker node
|
# 4. adding another agent node
|
||||||
info "Adding one worker node..."
|
info "Adding one agent node..."
|
||||||
$EXE node create "extra-worker" --cluster "$clustername" --role "worker" --wait --timeout 360s || failed "failed to add worker node"
|
$EXE node create "extra-agent" --cluster "$clustername" --role "agent" --wait --timeout 360s || failed "failed to add agent node"
|
||||||
|
|
||||||
info "Checking that we have 3 nodes available now..."
|
info "Checking that we have 3 nodes available now..."
|
||||||
check_multi_node "$clustername" 3 || failed "failed to verify number of nodes"
|
check_multi_node "$clustername" 3 || failed "failed to verify number of nodes"
|
||||||
|
@ -6,20 +6,20 @@ CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
|||||||
# shellcheck source=./common.sh
|
# shellcheck source=./common.sh
|
||||||
source "$CURR_DIR/common.sh"
|
source "$CURR_DIR/common.sh"
|
||||||
|
|
||||||
info "Creating cluster multimaster..."
|
info "Creating cluster multiserver..."
|
||||||
$EXE cluster create "multimaster" --masters 3 --api-port 6443 --wait --timeout 360s || failed "could not create cluster multimaster"
|
$EXE cluster create "multiserver" --servers 3 --api-port 6443 --wait --timeout 360s || failed "could not create cluster multiserver"
|
||||||
|
|
||||||
info "Checking that we have access to the cluster..."
|
info "Checking that we have access to the cluster..."
|
||||||
check_clusters "multimaster" || failed "error checking cluster"
|
check_clusters "multiserver" || failed "error checking cluster"
|
||||||
|
|
||||||
info "Sleeping for 5 seconds to give the cluster enough time to get ready..."
|
info "Sleeping for 5 seconds to give the cluster enough time to get ready..."
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
info "Checking that we have 3 master nodes online..."
|
info "Checking that we have 3 server nodes online..."
|
||||||
check_multi_node "multimaster" 3 || failed "failed to verify number of nodes"
|
check_multi_node "multiserver" 3 || failed "failed to verify number of nodes"
|
||||||
|
|
||||||
info "Deleting cluster multimaster..."
|
info "Deleting cluster multiserver..."
|
||||||
$EXE cluster delete "multimaster" || failed "could not delete the cluster multimaster"
|
$EXE cluster delete "multiserver" || failed "could not delete the cluster multiserver"
|
||||||
|
|
||||||
exit 0
|
exit 0
|
||||||
|
|
||||||
|
24
thoughts.md
24
thoughts.md
@ -36,10 +36,10 @@ Here's how k3d types should translate to a runtime type:
|
|||||||
|
|
||||||
## Node Configuration
|
## Node Configuration
|
||||||
|
|
||||||
- master node(s)
|
- server node(s)
|
||||||
- ENV
|
- ENV
|
||||||
- `K3S_CLUSTER_INIT`
|
- `K3S_CLUSTER_INIT`
|
||||||
- if num_masters > 1 && no external datastore configured
|
- if num_servers > 1 && no external datastore configured
|
||||||
- `K3S_KUBECONFIG_OUTPUT`
|
- `K3S_KUBECONFIG_OUTPUT`
|
||||||
- k3d default -> `/output/kubeconfig.yaml`
|
- k3d default -> `/output/kubeconfig.yaml`
|
||||||
- CMD/ARGS
|
- CMD/ARGS
|
||||||
@ -65,9 +65,9 @@ Here's how k3d types should translate to a runtime type:
|
|||||||
- `privileged`
|
- `privileged`
|
||||||
- Network
|
- Network
|
||||||
- cluster network or external/inherited
|
- cluster network or external/inherited
|
||||||
- worker nodes
|
- agent nodes
|
||||||
- ENV
|
- ENV
|
||||||
- `K3S_URL` to connect to master node
|
- `K3S_URL` to connect to server node
|
||||||
- server hostname + port (6443)
|
- server hostname + port (6443)
|
||||||
- cluster-specific or inherited
|
- cluster-specific or inherited
|
||||||
- CMD/ARGS
|
- CMD/ARGS
|
||||||
@ -81,23 +81,23 @@ Here's how k3d types should translate to a runtime type:
|
|||||||
|
|
||||||
- `--port [host:]port[:containerPort][/protocol][@group_identifier[[index] | @node_identifier]`
|
- `--port [host:]port[:containerPort][/protocol][@group_identifier[[index] | @node_identifier]`
|
||||||
- Examples:
|
- Examples:
|
||||||
- `--port 0.0.0.0:8080:8081/tcp@workers` -> whole group
|
- `--port 0.0.0.0:8080:8081/tcp@agents` -> whole group
|
||||||
- `--port 80@workers[0]` -> single instance of group by list index
|
- `--port 80@agents[0]` -> single instance of group by list index
|
||||||
- `--port 80@workers[0,2-3]` -> multiple instances of a group by index lists and ranges
|
- `--port 80@agents[0,2-3]` -> multiple instances of a group by index lists and ranges
|
||||||
- `--port 80@k3d-test-worker-0` -> single instance by specific node identifier
|
- `--port 80@k3d-test-agent-0` -> single instance by specific node identifier
|
||||||
- `--port 80@k3d-test-master-0@workers[1-5]` -> multiple instances by combination of node and group identifiers
|
- `--port 80@k3d-test-server-0@agents[1-5]` -> multiple instances by combination of node and group identifiers
|
||||||
|
|
||||||
- analogous for volumes
|
- analogous for volumes
|
||||||
|
|
||||||
## [WIP] Multi-Master Setup
|
## [WIP] Multi-Server Setup
|
||||||
|
|
||||||
- to make this possible, we always deploy a load-balancer (nginx) in front of the master nodes as an extra container
|
- to make this possible, we always deploy a load-balancer (nginx) in front of the server nodes as an extra container
|
||||||
- consider that in the kubeconfig file and `--tls-san`
|
- consider that in the kubeconfig file and `--tls-san`
|
||||||
|
|
||||||
### Variants
|
### Variants
|
||||||
|
|
||||||
- [x] embedded datastore (dqlite)
|
- [x] embedded datastore (dqlite)
|
||||||
- if `--masters` > 1 deploy a load-balancer in front of them as an extra container
|
- if `--servers` > 1 deploy a load-balancer in front of them as an extra container
|
||||||
- [ ] external datastore
|
- [ ] external datastore
|
||||||
|
|
||||||
## [DONE] Keep State in Docker Labels
|
## [DONE] Keep State in Docker Labels
|
||||||
|
Loading…
Reference in New Issue
Block a user