talos/internal/pkg/cluster/check/kubernetes.go
Andrew Rynhard 49307d554d refactor: improve machined
This is a rewrite of machined. It addresses some of the limitations and
complexity in the implementation. This introduces the idea of a
controller. A controller is responsible for managing the runtime, the
sequencer, and a new state type introduced in this PR.

A few highlights are:

- no more event bus
- functional approach to tasks (no more types defined for each task)
  - the task function definition now offers a lot more context, like
    access to raw API requests, the current sequence, a logger, the new
    state interface, and the runtime interface.
- no more panics to handle reboots
- additional initialize and reboot sequences
- graceful gRPC server shutdown on critical errors
- config is now stored at install time to avoid having to download it at
  install time and at boot time
- upgrades now use the local config instead of downloading it
- the upgrade API's preserve option takes precedence over the config's
  install force option

Additionally, this pulls various packes in under machined to make the
code easier to navigate.

Signed-off-by: Andrew Rynhard <andrew@andrewrynhard.com>
2020-04-28 08:20:55 -07:00

167 lines
4.0 KiB
Go

// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
// Package check provides set of checks to verify cluster readiness.
package check
import (
"context"
"fmt"
"reflect"
"sort"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/talos-systems/talos/internal/app/machined/pkg/runtime"
"github.com/talos-systems/talos/internal/pkg/cluster"
)
// K8sAllNodesReportedAssertion checks whether all the nodes show up in node list.
func K8sAllNodesReportedAssertion(ctx context.Context, cluster ClusterInfo) error {
clientset, err := cluster.K8sClient(ctx)
if err != nil {
return err
}
expectedNodes := cluster.Nodes()
nodes, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
return err
}
var actualNodes []string
for _, node := range nodes.Items {
for _, nodeAddress := range node.Status.Addresses {
if nodeAddress.Type == v1.NodeInternalIP {
actualNodes = append(actualNodes, nodeAddress.Address)
}
}
}
sort.Strings(expectedNodes)
sort.Strings(actualNodes)
if reflect.DeepEqual(expectedNodes, actualNodes) {
return nil
}
return fmt.Errorf("expected %v nodes, but got %v nodes", expectedNodes, actualNodes)
}
// K8sFullControlPlaneAssertion checks whether all the master nodes are k8s master nodes.
//
//nolint: gocyclo
func K8sFullControlPlaneAssertion(ctx context.Context, cluster ClusterInfo) error {
clientset, err := cluster.K8sClient(ctx)
if err != nil {
return err
}
expectedNodes := append(cluster.NodesByType(runtime.MachineTypeInit), cluster.NodesByType(runtime.MachineTypeControlPlane)...)
nodes, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
return err
}
var actualNodes []string
for _, node := range nodes.Items {
for label := range node.Labels {
if label == "node-role.kubernetes.io/master" {
for _, nodeAddress := range node.Status.Addresses {
if nodeAddress.Type == v1.NodeInternalIP {
actualNodes = append(actualNodes, nodeAddress.Address)
break
}
}
break
}
}
}
sort.Strings(expectedNodes)
sort.Strings(actualNodes)
if reflect.DeepEqual(expectedNodes, actualNodes) {
return nil
}
return fmt.Errorf("expected %v nodes, but got %v nodes", expectedNodes, actualNodes)
}
// K8sAllNodesReadyAssertion checks whether all the nodes are Ready.
func K8sAllNodesReadyAssertion(ctx context.Context, cluster cluster.K8sProvider) error {
clientset, err := cluster.K8sClient(ctx)
if err != nil {
return err
}
nodes, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
return err
}
var notReadyNodes []string
for _, node := range nodes.Items {
for _, cond := range node.Status.Conditions {
if cond.Type == v1.NodeReady {
if cond.Status != v1.ConditionTrue {
notReadyNodes = append(notReadyNodes, node.Name)
break
}
}
}
}
if len(notReadyNodes) == 0 {
return nil
}
return fmt.Errorf("some nodes are not ready: %v", notReadyNodes)
}
// K8sPodReadyAssertion checks whether all the nodes are Ready.
func K8sPodReadyAssertion(ctx context.Context, cluster cluster.K8sProvider, namespace, labelSelector string) error {
clientset, err := cluster.K8sClient(ctx)
if err != nil {
return err
}
pods, err := clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: labelSelector,
})
if err != nil {
return err
}
if len(pods.Items) == 0 {
return fmt.Errorf("no pods found for namespace %q and label %q", namespace, labelSelector)
}
var notReadyPods []string
for _, pod := range pods.Items {
for _, cond := range pod.Status.Conditions {
if cond.Type == v1.PodReady {
if cond.Status != v1.ConditionTrue {
notReadyPods = append(notReadyPods, pod.Name)
break
}
}
}
}
if len(notReadyPods) == 0 {
return nil
}
return fmt.Errorf("some pods are not ready: %v", notReadyPods)
}