mirror of
https://github.com/siderolabs/talos.git
synced 2025-08-18 12:37:05 +02:00
This change is only moving packages and updating import paths. Goal: expose `internal/pkg/provision` as `pkg/provision` to enable other projects to import Talos provisioning library. As cluster checks are almost always required as part of provisioning process, package `internal/pkg/cluster` was also made public as `pkg/cluster`. Other changes were direct dependencies discovered by `importvet` which were updated. Public packages (useful, general purpose packages with stable API): * `internal/pkg/conditions` -> `pkg/conditions` * `internal/pkg/tail` -> `pkg/tail` Private packages (used only on provisioning library internally): * `internal/pkg/inmemhttp` -> `pkg/provision/internal/inmemhttp` * `internal/pkg/kernel/vmlinuz` -> `pkg/provision/internal/vmlinuz` * `internal/pkg/cniutils` -> `pkg/provision/internal/cniutils` Signed-off-by: Andrey Smirnov <smirnov.andrey@gmail.com>
84 lines
1.8 KiB
Go
84 lines
1.8 KiB
Go
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
|
|
// Package check provides set of checks to verify cluster readiness.
|
|
package check
|
|
|
|
import (
|
|
"context"
|
|
"time"
|
|
|
|
"github.com/talos-systems/talos/pkg/cluster"
|
|
"github.com/talos-systems/talos/pkg/conditions"
|
|
)
|
|
|
|
const updateInterval = 100 * time.Millisecond
|
|
|
|
// ClusterInfo is interface requires by checks.
|
|
type ClusterInfo interface {
|
|
cluster.ClientProvider
|
|
cluster.K8sProvider
|
|
cluster.Info
|
|
}
|
|
|
|
// ClusterCheck implements a function which returns condition based on ClusterAccess.
|
|
type ClusterCheck func(ClusterInfo) conditions.Condition
|
|
|
|
// Reporter presents wait progress.
|
|
//
|
|
// It is supposed that reporter drops duplicate messages.
|
|
type Reporter interface {
|
|
Update(check conditions.Condition)
|
|
}
|
|
|
|
// Wait run the checks against the cluster and waits for the full set to succeed.
|
|
//
|
|
// Context ctx might have a timeout set to limit overall wait time.
|
|
// Each check might define its own timeout.
|
|
func Wait(ctx context.Context, cluster ClusterInfo, checks []ClusterCheck, reporter Reporter) error {
|
|
for _, check := range checks {
|
|
select {
|
|
case <-ctx.Done():
|
|
return ctx.Err()
|
|
default:
|
|
}
|
|
|
|
condition := check(cluster)
|
|
|
|
errCh := make(chan error, 1)
|
|
|
|
go func(condition conditions.Condition) {
|
|
errCh <- condition.Wait(ctx)
|
|
}(condition)
|
|
|
|
var err error
|
|
|
|
func() {
|
|
ticker := time.NewTicker(updateInterval)
|
|
defer ticker.Stop()
|
|
|
|
// report initial state
|
|
reporter.Update(condition)
|
|
|
|
// report last state
|
|
defer reporter.Update(condition)
|
|
|
|
for {
|
|
select {
|
|
case err = <-errCh:
|
|
return
|
|
case <-ticker.C:
|
|
reporter.Update(condition)
|
|
}
|
|
}
|
|
}()
|
|
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|