Brad Beam d8249c8779
refactor(init): Allow kubeadm init on controlplane (#658)
* refactor(init): Allow kubeadm init on controlplane

This shifts the cluster formation from init(bootstrap) and join(control plane)
to init(control plane).

This makes use of the previously implemented initToken to provide a TTL for
cluster initialization to take place and allows us to mostly treat all control
plane nodes equal. This also sets up the path for us to handle master upgrades
and not be concerned with odd behavior when upgrading the previously defined
init node.

To facilitate kubeadm init across all control plane nodes, we make use of the
initToken to run `kubeadm init phase certs` command to generate any missing
certificates once. All other control plane nodes will attempt to sync the
necessary certs/files via all defined trustd endpoints and being the startup
process.

* feat(init): Add service runner context to PreFunc

Signed-off-by: Brad Beam <brad.beam@talos-systems.com>
2019-05-24 16:05:49 -05:00

87 lines
2.7 KiB
Go

/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package generate
const initTempl = `---
version: ""
security:
os:
ca:
crt: "{{ .Certs.OsCert }}"
key: "{{ .Certs.OsKey }}"
kubernetes:
ca:
crt: "{{ .Certs.K8sCert }}"
key: "{{ .Certs.K8sKey }}"
services:
init:
cni: flannel
kubeadm:
initToken: {{ .InitToken }}
certificateKey: '{{ .KubeadmTokens.CertKey }}'
configuration: |
apiVersion: kubeadm.k8s.io/v1beta1
kind: InitConfiguration
bootstrapTokens:
- token: '{{ .KubeadmTokens.BootstrapToken }}'
ttl: 0s
localAPIEndpoint:
bindPort: 6443
nodeRegistration:
criSocket: /run/containerd/containerd.sock
---
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
clusterName: {{ .ClusterName }}
kubernetesVersion: {{ .KubernetesVersion }}
controlPlaneEndpoint: {{ .IP }}:443
apiServer:
certSANs: [ {{ range $i,$ip := .MasterIPs }}{{if $i}},{{end}}"{{$ip}}"{{end}}, "127.0.0.1" ]
extraArgs:
runtime-config: settings.k8s.io/v1alpha1=true
feature-gates: ExperimentalCriticalPodAnnotation=true
controllerManager:
extraArgs:
terminated-pod-gc-threshold: '100'
feature-gates: ExperimentalCriticalPodAnnotation=true
scheduler:
extraArgs:
feature-gates: ExperimentalCriticalPodAnnotation=true
networking:
dnsDomain: {{ .ServiceDomain }}
podSubnet: {{ index .PodNet 0 }}
serviceSubnet: {{ index .ServiceNet 0 }}
etcd:
local:
serverCertSANs:
- master-{{ .Index }}
- {{ .IP }}
peerCertSANs:
- master-{{ .Index }}
- {{ .IP }}
extraArgs:
initial-cluster: {{ range $i,$ip := .MasterIPs }}{{if $i}},{{end}}master-{{add $i 1}}=https://{{$ip}}:2380{{end}}
initial-cluster-state: new
listen-peer-urls: https://{{ .IP }}:2380
listen-client-urls: https://{{ .IP }}:2379
advertise-client-urls: https://{{ .IP }}:2379
initial-advertise-peer-urls: https://{{ .IP }}:2380
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
featureGates:
ExperimentalCriticalPodAnnotation: true
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
ipvs:
scheduler: lc
trustd:
token: '{{ .TrustdInfo.Token }}'
endpoints: [ {{ .Endpoints }} ]
certSANs: [ "{{ index .MasterIPs .Index }}", "127.0.0.1" ]
`