mirror of
https://github.com/siderolabs/talos.git
synced 2025-10-03 03:31:12 +02:00
Introduce `cluster.NodeInfo` to represent the basic info about a node which can be used in the health checks. This information, where possible, will be populated by the discovery service in following PRs. Part of siderolabs#5554. Signed-off-by: Utku Ozdemir <utku.ozdemir@siderolabs.com>
98 lines
2.3 KiB
Go
98 lines
2.3 KiB
Go
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
|
|
// Package check provides set of checks to verify cluster readiness.
|
|
package check
|
|
|
|
import (
|
|
"context"
|
|
"net/netip"
|
|
"time"
|
|
|
|
"github.com/talos-systems/talos/pkg/cluster"
|
|
"github.com/talos-systems/talos/pkg/conditions"
|
|
"github.com/talos-systems/talos/pkg/machinery/generic/slices"
|
|
)
|
|
|
|
const updateInterval = 100 * time.Millisecond
|
|
|
|
// ClusterInfo is interface requires by checks.
|
|
type ClusterInfo interface {
|
|
cluster.ClientProvider
|
|
cluster.K8sProvider
|
|
cluster.Info
|
|
}
|
|
|
|
// ClusterCheck implements a function which returns condition based on ClusterAccess.
|
|
type ClusterCheck func(ClusterInfo) conditions.Condition
|
|
|
|
// Reporter presents wait progress.
|
|
//
|
|
// It is supposed that reporter drops duplicate messages.
|
|
type Reporter interface {
|
|
Update(check conditions.Condition)
|
|
}
|
|
|
|
// Wait run the checks against the cluster and waits for the full set to succeed.
|
|
//
|
|
// Context ctx might have a timeout set to limit overall wait time.
|
|
// Each check might define its own timeout.
|
|
func Wait(ctx context.Context, cluster ClusterInfo, checks []ClusterCheck, reporter Reporter) error {
|
|
for _, check := range checks {
|
|
select {
|
|
case <-ctx.Done():
|
|
return ctx.Err()
|
|
default:
|
|
}
|
|
|
|
condition := check(cluster)
|
|
|
|
errCh := make(chan error, 1)
|
|
|
|
go func(condition conditions.Condition) {
|
|
errCh <- condition.Wait(ctx)
|
|
}(condition)
|
|
|
|
var err error
|
|
|
|
func() {
|
|
ticker := time.NewTicker(updateInterval)
|
|
defer ticker.Stop()
|
|
|
|
// report initial state
|
|
reporter.Update(condition)
|
|
|
|
// report last state
|
|
defer reporter.Update(condition)
|
|
|
|
for {
|
|
select {
|
|
case err = <-errCh:
|
|
return
|
|
case <-ticker.C:
|
|
reporter.Update(condition)
|
|
}
|
|
}
|
|
}()
|
|
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func flatMapNodeInfosToIPs(nodes []cluster.NodeInfo) []netip.Addr {
|
|
return slices.FlatMap(nodes, func(node cluster.NodeInfo) []netip.Addr { return node.IPs })
|
|
}
|
|
|
|
func mapNodeInfosToInternalIPs(nodes []cluster.NodeInfo) []netip.Addr {
|
|
return slices.Map(nodes, func(node cluster.NodeInfo) netip.Addr { return node.InternalIP })
|
|
}
|
|
|
|
func mapIPsToStrings(input []netip.Addr) []string {
|
|
return slices.Map(input, func(ip netip.Addr) string { return ip.String() })
|
|
}
|