Andrey Smirnov 0081ac5fac refactor: extract Talos cluster provisioner as common code
This extracts Docker Talos cluster provisioner as common code
which might be shared between `osctl cluster` and integration-test.

There should be almost no functional changes.

As proof of concept, abstract cluster readiness checks were implemented
based on provisioned cluster state. It implements same checks as
`basic-integration.sh` in pure Go via Talos/K8s clients.

`conditions` package was promoted from machined-internal to
`internal/pkg` as it is used to run the checks.

Signed-off-by: Andrey Smirnov <smirnov.andrey@gmail.com>
2019-12-27 12:14:19 -08:00

62 lines
2.7 KiB
Go

// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
package check
import (
"context"
"time"
"github.com/talos-systems/talos/internal/pkg/conditions"
"github.com/talos-systems/talos/internal/pkg/provision"
)
// DefaultClusterChecks returns a set of default Talos cluster readiness checks.
func DefaultClusterChecks() []ClusterCheck {
return []ClusterCheck{
// wait for bootkube to finish on init node
func(cluster provision.ClusterAccess) conditions.Condition {
return conditions.PollingCondition("bootkube to finish", func(ctx context.Context) error {
return ServiceStateAssertion(ctx, cluster, "bootkube", "Finished")
}, 5*time.Minute, 5*time.Second)
},
// wait for apid to be ready on all the nodes
func(cluster provision.ClusterAccess) conditions.Condition {
return conditions.PollingCondition("apid to be ready", func(ctx context.Context) error {
return ApidReadyAssertion(ctx, cluster)
}, 2*time.Minute, 5*time.Second)
},
// wait for all the nodes to report in at k8s level
func(cluster provision.ClusterAccess) conditions.Condition {
return conditions.PollingCondition("all k8s nodes to report", func(ctx context.Context) error {
return K8sAllNodesReportedAssertion(ctx, cluster)
}, 5*time.Minute, 5*time.Second)
},
// wait for all the nodes to report ready at k8s level
func(cluster provision.ClusterAccess) conditions.Condition {
return conditions.PollingCondition("all k8s nodes to report ready", func(ctx context.Context) error {
return K8sAllNodesReadyAssertion(ctx, cluster)
}, 10*time.Minute, 5*time.Second)
},
// wait for HA k8s control plane
func(cluster provision.ClusterAccess) conditions.Condition {
return conditions.PollingCondition("all master nodes to be part of k8s control plane", func(ctx context.Context) error {
return K8sFullControlPlaneAssertion(ctx, cluster)
}, 2*time.Minute, 5*time.Second)
},
// wait for kube-proxy to report ready
func(cluster provision.ClusterAccess) conditions.Condition {
return conditions.PollingCondition("kube-proxy to report ready", func(ctx context.Context) error {
return K8sPodReadyAssertion(ctx, cluster, "kube-system", "k8s-app=kube-proxy")
}, 3*time.Minute, 5*time.Second)
},
// wait for kube-dns to report ready
func(cluster provision.ClusterAccess) conditions.Condition {
return conditions.PollingCondition("kube-dns to report ready", func(ctx context.Context) error {
return K8sPodReadyAssertion(ctx, cluster, "kube-system", "k8s-app=kube-dns")
}, 3*time.Minute, 5*time.Second)
},
}
}