mirror of
https://github.com/siderolabs/talos.git
synced 2025-08-12 09:37:05 +02:00
This enables golangci-lint via build tags for integration tests (this should have been done long ago!), and fixes the linting errors. Two tests were updated to reduce flakiness: * apply config: wait for nodes to issue "boot done" sequence event before proceeding * recover: kill pods even if they appear after the initial set gets killed (potential race condition with previous test). Signed-off-by: Andrey Smirnov <smirnov.andrey@gmail.com>
94 lines
2.5 KiB
Go
94 lines
2.5 KiB
Go
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
|
|
// +build integration_cli
|
|
|
|
package cli
|
|
|
|
import (
|
|
"regexp"
|
|
"strings"
|
|
|
|
"github.com/talos-systems/talos/internal/integration/base"
|
|
"github.com/talos-systems/talos/pkg/machinery/config/types/v1alpha1/machine"
|
|
)
|
|
|
|
// HealthSuite verifies health command.
|
|
type HealthSuite struct {
|
|
base.CLISuite
|
|
}
|
|
|
|
// SuiteName ...
|
|
func (suite *HealthSuite) SuiteName() string {
|
|
return "cli.HealthSuite"
|
|
}
|
|
|
|
// TestClientSide does successful health check run from client-side.
|
|
//
|
|
//nolint: gocyclo
|
|
func (suite *HealthSuite) TestClientSide() {
|
|
if suite.Cluster == nil {
|
|
suite.T().Skip("Cluster is not available, skipping test")
|
|
}
|
|
|
|
args := []string{"--server=false"}
|
|
|
|
bootstrapAPIIsUsed := true
|
|
|
|
for _, node := range suite.Cluster.Info().Nodes {
|
|
if node.Type == machine.TypeInit {
|
|
bootstrapAPIIsUsed = false
|
|
}
|
|
}
|
|
|
|
if bootstrapAPIIsUsed {
|
|
for _, node := range suite.Cluster.Info().Nodes {
|
|
switch node.Type {
|
|
case machine.TypeControlPlane:
|
|
args = append(args, "--control-plane-nodes", node.PrivateIP.String())
|
|
case machine.TypeJoin:
|
|
args = append(args, "--worker-nodes", node.PrivateIP.String())
|
|
case machine.TypeInit, machine.TypeUnknown:
|
|
panic("unexpected")
|
|
}
|
|
}
|
|
} else {
|
|
for _, node := range suite.Cluster.Info().Nodes {
|
|
switch node.Type {
|
|
case machine.TypeInit:
|
|
args = append(args, "--init-node", node.PrivateIP.String())
|
|
case machine.TypeControlPlane:
|
|
args = append(args, "--control-plane-nodes", node.PrivateIP.String())
|
|
case machine.TypeJoin:
|
|
args = append(args, "--worker-nodes", node.PrivateIP.String())
|
|
case machine.TypeUnknown:
|
|
panic("unexpected")
|
|
}
|
|
}
|
|
}
|
|
|
|
if suite.K8sEndpoint != "" {
|
|
args = append(args, "--k8s-endpoint", strings.Split(suite.K8sEndpoint, ":")[0])
|
|
}
|
|
|
|
suite.RunCLI(append([]string{"health"}, args...),
|
|
base.StderrNotEmpty(),
|
|
base.StdoutEmpty(),
|
|
base.StderrShouldMatch(regexp.MustCompile(`waiting for all k8s nodes to report ready`)),
|
|
)
|
|
}
|
|
|
|
// TestServerSide does successful health check run from server-side.
|
|
func (suite *HealthSuite) TestServerSide() {
|
|
suite.RunCLI([]string{"health", "--nodes", suite.RandomDiscoveredNode(machine.TypeControlPlane)},
|
|
base.StderrNotEmpty(),
|
|
base.StdoutEmpty(),
|
|
base.StderrShouldMatch(regexp.MustCompile(`waiting for all k8s nodes to report ready`)),
|
|
)
|
|
}
|
|
|
|
func init() {
|
|
allSuites = append(allSuites, new(HealthSuite))
|
|
}
|