mirror of
https://github.com/siderolabs/talos.git
synced 2025-10-05 20:51:15 +02:00
This is a rewrite of machined. It addresses some of the limitations and complexity in the implementation. This introduces the idea of a controller. A controller is responsible for managing the runtime, the sequencer, and a new state type introduced in this PR. A few highlights are: - no more event bus - functional approach to tasks (no more types defined for each task) - the task function definition now offers a lot more context, like access to raw API requests, the current sequence, a logger, the new state interface, and the runtime interface. - no more panics to handle reboots - additional initialize and reboot sequences - graceful gRPC server shutdown on critical errors - config is now stored at install time to avoid having to download it at install time and at boot time - upgrades now use the local config instead of downloading it - the upgrade API's preserve option takes precedence over the config's install force option Additionally, this pulls various packes in under machined to make the code easier to navigate. Signed-off-by: Andrew Rynhard <andrew@andrewrynhard.com>
68 lines
3.0 KiB
Go
68 lines
3.0 KiB
Go
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
|
|
package check
|
|
|
|
import (
|
|
"context"
|
|
"time"
|
|
|
|
"github.com/talos-systems/talos/internal/app/machined/pkg/runtime"
|
|
"github.com/talos-systems/talos/internal/pkg/conditions"
|
|
)
|
|
|
|
// DefaultClusterChecks returns a set of default Talos cluster readiness checks.
|
|
func DefaultClusterChecks() []ClusterCheck {
|
|
return []ClusterCheck{
|
|
// wait for etcd to be healthy on all control plane nodes
|
|
func(cluster ClusterInfo) conditions.Condition {
|
|
return conditions.PollingCondition("etcd to be healthy", func(ctx context.Context) error {
|
|
return ServiceHealthAssertion(ctx, cluster, "etcd", WithNodeTypes(runtime.MachineTypeInit, runtime.MachineTypeControlPlane))
|
|
}, 5*time.Minute, 5*time.Second)
|
|
},
|
|
// wait for bootkube to finish on init node
|
|
func(cluster ClusterInfo) conditions.Condition {
|
|
return conditions.PollingCondition("bootkube to finish", func(ctx context.Context) error {
|
|
return ServiceStateAssertion(ctx, cluster, "bootkube", "Finished", "Skipped")
|
|
}, 5*time.Minute, 5*time.Second)
|
|
},
|
|
// wait for apid to be ready on all the nodes
|
|
func(cluster ClusterInfo) conditions.Condition {
|
|
return conditions.PollingCondition("apid to be ready", func(ctx context.Context) error {
|
|
return ApidReadyAssertion(ctx, cluster)
|
|
}, 2*time.Minute, 5*time.Second)
|
|
},
|
|
// wait for all the nodes to report in at k8s level
|
|
func(cluster ClusterInfo) conditions.Condition {
|
|
return conditions.PollingCondition("all k8s nodes to report", func(ctx context.Context) error {
|
|
return K8sAllNodesReportedAssertion(ctx, cluster)
|
|
}, 5*time.Minute, 5*time.Second)
|
|
},
|
|
// wait for all the nodes to report ready at k8s level
|
|
func(cluster ClusterInfo) conditions.Condition {
|
|
return conditions.PollingCondition("all k8s nodes to report ready", func(ctx context.Context) error {
|
|
return K8sAllNodesReadyAssertion(ctx, cluster)
|
|
}, 10*time.Minute, 5*time.Second)
|
|
},
|
|
// wait for HA k8s control plane
|
|
func(cluster ClusterInfo) conditions.Condition {
|
|
return conditions.PollingCondition("all master nodes to be part of k8s control plane", func(ctx context.Context) error {
|
|
return K8sFullControlPlaneAssertion(ctx, cluster)
|
|
}, 2*time.Minute, 5*time.Second)
|
|
},
|
|
// wait for kube-proxy to report ready
|
|
func(cluster ClusterInfo) conditions.Condition {
|
|
return conditions.PollingCondition("kube-proxy to report ready", func(ctx context.Context) error {
|
|
return K8sPodReadyAssertion(ctx, cluster, "kube-system", "k8s-app=kube-proxy")
|
|
}, 3*time.Minute, 5*time.Second)
|
|
},
|
|
// wait for coredns to report ready
|
|
func(cluster ClusterInfo) conditions.Condition {
|
|
return conditions.PollingCondition("coredns to report ready", func(ctx context.Context) error {
|
|
return K8sPodReadyAssertion(ctx, cluster, "kube-system", "k8s-app=kube-dns")
|
|
}, 3*time.Minute, 5*time.Second)
|
|
},
|
|
}
|
|
}
|