test: default to using the bootstrap API

This moves our test scripts to using the bootstrap API. Some
automation around invoking the bootstrap API was also added
to give the same ease of use when creating clusters with the
CLI.

Signed-off-by: Andrew Rynhard <andrew@andrewrynhard.com>
This commit is contained in:
Andrew Rynhard 2020-06-13 23:43:22 +00:00 committed by talos-bot
parent 8b0fd616df
commit d0d2ac3c74
33 changed files with 221 additions and 151 deletions

View File

@ -10,7 +10,7 @@ ARTIFACTS := _out
TOOLS ?= autonomy/tools:v0.2.0-1-g418e800
GO_VERSION ?= 1.14
OPERATING_SYSTEM := $(shell uname -s | tr "[:upper:]" "[:lower:]")
OSCTL_DEFAULT_TARGET := talosctl-$(OPERATING_SYSTEM)
TALOSCTL_DEFAULT_TARGET := talosctl-$(OPERATING_SYSTEM)
INTEGRATION_TEST_DEFAULT_TARGET := integration-test-$(OPERATING_SYSTEM)
INTEGRATION_TEST_PROVISION_DEFAULT_TARGET := integration-test-provision-$(OPERATING_SYSTEM)
KUBECTL_URL ?= https://storage.googleapis.com/kubernetes-release/release/v1.19.0-beta.1/bin/$(OPERATING_SYSTEM)/amd64/kubectl
@ -136,7 +136,7 @@ talos: ## Builds the Talos container image and outputs it to the artifact direct
talosctl-%:
@$(MAKE) local-$@ DEST=$(ARTIFACTS)
talosctl: $(OSCTL_DEFAULT_TARGET) ## Builds the talosctl binary for the local machine.
talosctl: $(TALOSCTL_DEFAULT_TARGET) ## Builds the talosctl binary for the local machine.
image-%: ## Builds the specified image. Valid options are aws, azure, digital-ocean, gcp, and vmware (e.g. image-aws)
@docker run --rm -v /dev:/dev -v $(PWD)/$(ARTIFACTS):/out --privileged autonomy/installer:$(TAG) image --platform $*
@ -204,7 +204,7 @@ e2e-%: $(ARTIFACTS)/$(INTEGRATION_TEST_DEFAULT_TARGET)-amd64 $(ARTIFACTS)/sonobu
SHA=$(SHA) \
IMAGE=$(REGISTRY_AND_USERNAME)/talos:$(TAG) \
ARTIFACTS=$(ARTIFACTS) \
OSCTL=$(PWD)/$(ARTIFACTS)/$(OSCTL_DEFAULT_TARGET)-amd64 \
TALOSCTL=$(PWD)/$(ARTIFACTS)/$(TALOSCTL_DEFAULT_TARGET)-amd64 \
INTEGRATION_TEST=$(PWD)/$(ARTIFACTS)/$(INTEGRATION_TEST_DEFAULT_TARGET)-amd64 \
KUBECTL=$(PWD)/$(ARTIFACTS)/kubectl \
SONOBUOY=$(PWD)/$(ARTIFACTS)/sonobuoy \
@ -216,13 +216,13 @@ provision-tests-prepare: release-artifacts $(ARTIFACTS)/$(INTEGRATION_TEST_PROVI
provision-tests: provision-tests-prepare
@$(MAKE) hack-test-$@ \
TAG=$(TAG) \
OSCTL=$(PWD)/$(ARTIFACTS)/$(OSCTL_DEFAULT_TARGET)-amd64 \
TALOSCTL=$(PWD)/$(ARTIFACTS)/$(TALOSCTL_DEFAULT_TARGET)-amd64 \
INTEGRATION_TEST=$(PWD)/$(ARTIFACTS)/$(INTEGRATION_TEST_PROVISION_DEFAULT_TARGET)-amd64
provision-tests-track-%:
@$(MAKE) hack-test-provision-tests \
TAG=$(TAG) \
OSCTL=$(PWD)/$(ARTIFACTS)/$(OSCTL_DEFAULT_TARGET)-amd64 \
TALOSCTL=$(PWD)/$(ARTIFACTS)/$(TALOSCTL_DEFAULT_TARGET)-amd64 \
INTEGRATION_TEST=$(PWD)/$(ARTIFACTS)/$(INTEGRATION_TEST_PROVISION_DEFAULT_TARGET)-amd64 \
INTEGRATION_TEST_RUN="TestIntegration/.+-TR$*"

View File

@ -11,6 +11,7 @@ import (
"math/big"
"net"
"os"
"sort"
"strings"
"time"
@ -23,11 +24,13 @@ import (
"github.com/talos-systems/talos/internal/pkg/provision/access"
"github.com/talos-systems/talos/internal/pkg/provision/providers"
"github.com/talos-systems/talos/pkg/cli"
"github.com/talos-systems/talos/pkg/client"
clientconfig "github.com/talos-systems/talos/pkg/client/config"
"github.com/talos-systems/talos/pkg/config"
"github.com/talos-systems/talos/pkg/config/types/v1alpha1/generate"
"github.com/talos-systems/talos/pkg/constants"
talosnet "github.com/talos-systems/talos/pkg/net"
"github.com/talos-systems/talos/pkg/retry"
)
var (
@ -208,7 +211,6 @@ func create(ctx context.Context) (err error) {
}
genOptions = append(genOptions, generate.WithEndpointList(endpointList))
configBundleOpts = append(configBundleOpts,
config.WithInputOptions(
&config.InputOptions{
@ -232,6 +234,18 @@ func create(ctx context.Context) (err error) {
for i := 0; i < masters; i++ {
var cfg runtime.Configurator
nodeReq := provision.NodeRequest{
Name: fmt.Sprintf("%s-master-%d", clusterName, i+1),
IP: ips[i],
Memory: memory,
NanoCPUs: nanoCPUs,
DiskSize: diskSize,
}
if i == 0 {
nodeReq.Ports = []string{"50000:50000/tcp", "6443:6443/tcp"}
}
if withInitNode {
if i == 0 {
cfg = configBundle.Init()
@ -239,18 +253,12 @@ func create(ctx context.Context) (err error) {
cfg = configBundle.ControlPlane()
}
} else {
cfg = configBundle.ControlPlaneCfg
cfg = configBundle.ControlPlane()
}
request.Nodes = append(request.Nodes,
provision.NodeRequest{
Name: fmt.Sprintf("%s-master-%d", clusterName, i+1),
IP: ips[i],
Memory: memory,
NanoCPUs: nanoCPUs,
DiskSize: diskSize,
Config: cfg,
})
nodeReq.Config = cfg
request.Nodes = append(request.Nodes, nodeReq)
}
for i := 1; i <= workers; i++ {
@ -275,6 +283,53 @@ func create(ctx context.Context) (err error) {
return err
}
clusterAccess := access.NewAdapter(cluster, provisionOptions...)
defer clusterAccess.Close() //nolint: errcheck
if !withInitNode {
cli, err := clusterAccess.Client()
if err != nil {
return retry.UnexpectedError(err)
}
nodes := clusterAccess.NodesByType(runtime.MachineTypeControlPlane)
if len(nodes) == 0 {
return fmt.Errorf("expected at least 1 control plane node, got %d", len(nodes))
}
sort.Strings(nodes)
node := nodes[0]
nodeCtx := client.WithNodes(ctx, node)
fmt.Println("waiting for API")
err = retry.Constant(5*time.Minute, retry.WithUnits(500*time.Millisecond)).Retry(func() error {
retryCtx, cancel := context.WithTimeout(nodeCtx, 500*time.Millisecond)
defer cancel()
if _, err = cli.Version(retryCtx); err != nil {
return retry.ExpectedError(err)
}
return nil
})
if err != nil {
return err
}
fmt.Println("bootstrapping cluster")
bootstrapCtx, cancel := context.WithTimeout(nodeCtx, 30*time.Second)
defer cancel()
if err = cli.Bootstrap(bootstrapCtx); err != nil {
return err
}
}
if !clusterWait {
return nil
}
@ -283,9 +338,6 @@ func create(ctx context.Context) (err error) {
checkCtx, checkCtxCancel := context.WithTimeout(ctx, clusterWaitTimeout)
defer checkCtxCancel()
clusterAccess := access.NewAdapter(cluster, provisionOptions...)
defer clusterAccess.Close() //nolint: errcheck
return check.Wait(checkCtx, clusterAccess, check.DefaultClusterChecks(), check.StderrReporter())
}

View File

@ -17,7 +17,7 @@ PROVISIONER=docker
CLUSTER_NAME=e2e-${PROVISIONER}
function create_cluster {
"${OSCTL}" cluster create \
"${TALOSCTL}" cluster create \
--provisioner "${PROVISIONER}" \
--name "${CLUSTER_NAME}" \
--image "${IMAGE}" \
@ -25,7 +25,8 @@ function create_cluster {
--mtu 1500 \
--memory 2048 \
--cpus 4.0 \
--endpoint "${ENDPOINT}"
--endpoint "${ENDPOINT}" \
--with-init-node=false
}
create_cluster

View File

@ -20,7 +20,7 @@ case "${REGISTRY:-false}" in
esac
function create_cluster {
"${OSCTL}" cluster create \
"${TALOSCTL}" cluster create \
--provisioner "${PROVISIONER}" \
--name "${CLUSTER_NAME}" \
--masters=3 \
@ -29,6 +29,7 @@ function create_cluster {
--cpus 2.0 \
--cidr 172.20.0.0/24 \
--install-image ${REGISTRY:-docker.io}/autonomy/installer:${INSTALLER_TAG} \
--with-init-node=false \
${FIRECRACKER_FLAGS}
}

View File

@ -5,7 +5,7 @@
# - TAG
# - SHA
# - ARTIFACTS
# - OSCTL
# - TALOSCTL
# - INTEGRATION_TEST
# - KUBECTL
# - SONOBUOY
@ -59,7 +59,7 @@ function create_cluster_capi {
[[ $(date +%s) -gt $timeout ]] && exit 1
sleep 10
done
${OSCTL} config endpoint "$(${KUBECTL} --kubeconfig /tmp/e2e/docker/kubeconfig get machine -o go-template --template='{{range .status.addresses}}{{if eq .type "ExternalIP"}}{{.address}}{{end}}{{end}}' ${NAME_PREFIX}-controlplane-0)"
${TALOSCTL} config endpoint "$(${KUBECTL} --kubeconfig /tmp/e2e/docker/kubeconfig get machine -o go-template --template='{{range .status.addresses}}{{if eq .type "ExternalIP"}}{{.address}}{{end}}{{end}}' ${NAME_PREFIX}-controlplane-0)"
# Wait for the kubeconfig from capi master-0
timeout=$(($(date +%s) + ${TIMEOUT}))
@ -94,11 +94,11 @@ function create_cluster_capi {
}
function run_talos_integration_test {
"${INTEGRATION_TEST}" -test.v -talos.failfast -talos.talosctlpath "${OSCTL}" -talos.provisioner "${PROVISIONER}" -talos.name "${CLUSTER_NAME}"
"${INTEGRATION_TEST}" -test.v -talos.failfast -talos.talosctlpath "${TALOSCTL}" -talos.provisioner "${PROVISIONER}" -talos.name "${CLUSTER_NAME}"
}
function run_talos_integration_test_docker {
"${INTEGRATION_TEST}" -test.v -talos.talosctlpath "${OSCTL}" -talos.k8sendpoint ${ENDPOINT}:6443 -talos.provisioner "${PROVISIONER}" -talos.name "${CLUSTER_NAME}"
"${INTEGRATION_TEST}" -test.v -talos.talosctlpath "${TALOSCTL}" -talos.k8sendpoint ${ENDPOINT}:6443 -talos.provisioner "${PROVISIONER}" -talos.name "${CLUSTER_NAME}"
}
function run_kubernetes_integration_test {
@ -112,7 +112,7 @@ function run_kubernetes_integration_test {
[[ $(date +%s) -gt $timeout ]] && exit 1
echo "attempting to run sonobuoy"
sleep 10
done
done
${SONOBUOY} status --kubeconfig ${KUBECONFIG} --json | jq . | tee ${TMP}/sonobuoy-status.json
if [ $(cat ${TMP}/sonobuoy-status.json | jq -r '.plugins[] | select(.plugin == "e2e") | ."result-status"') != 'passed' ]; then exit 1; fi
}
@ -130,12 +130,12 @@ function run_worker_cis_benchmark {
}
function get_kubeconfig {
"${OSCTL}" kubeconfig "${TMP}"
"${TALOSCTL}" kubeconfig "${TMP}"
}
function dump_cluster_state {
nodes=$(${KUBECTL} get nodes -o jsonpath="{.items[*].status.addresses[?(@.type == 'InternalIP')].address}" | tr [:space:] ',')
"${OSCTL}" -n ${nodes} services
"${TALOSCTL}" -n ${nodes} services
${KUBECTL} get nodes -o wide
${KUBECTL} get pods --all-namespaces -o wide
}

View File

@ -16,4 +16,4 @@ if [ "${INTEGRATION_TEST_RUN:-undefined}" != "undefined" ]; then
INTEGRATION_TEST_FLAGS="${INTEGRATION_TEST_FLAGS} -test.run ${INTEGRATION_TEST_RUN}"
fi
"${INTEGRATION_TEST}" -test.v -talos.talosctlpath "${OSCTL}" -talos.provision.mem 2048 -talos.provision.cpu 2 ${INTEGRATION_TEST_FLAGS}
"${INTEGRATION_TEST}" -test.v -talos.talosctlpath "${TALOSCTL}" -talos.provision.mem 2048 -talos.provision.cpu 2 ${INTEGRATION_TEST_FLAGS}

View File

@ -38,7 +38,7 @@ func (cliSuite *CLISuite) DiscoverNodes() []string {
return nil
}
func (cliSuite *CLISuite) buildOsctlCmd(args []string) *exec.Cmd {
func (cliSuite *CLISuite) buildCLICmd(args []string) *exec.Cmd {
// TODO: add support for calling `talosctl config endpoint` before running talosctl
args = append([]string{"--talosconfig", cliSuite.TalosConfig}, args...)
@ -46,14 +46,14 @@ func (cliSuite *CLISuite) buildOsctlCmd(args []string) *exec.Cmd {
return exec.Command(cliSuite.TalosctlPath, args...)
}
// RunOsctl runs talosctl binary with the options provided
func (cliSuite *CLISuite) RunOsctl(args []string, options ...RunOption) {
Run(&cliSuite.Suite, cliSuite.buildOsctlCmd(args), options...)
// RunCLI runs talosctl binary with the options provided
func (cliSuite *CLISuite) RunCLI(args []string, options ...RunOption) {
Run(&cliSuite.Suite, cliSuite.buildCLICmd(args), options...)
}
func (cliSuite *CLISuite) RunAndWaitForMatch(args []string, regex *regexp.Regexp, duration time.Duration, options ...retry.Option) {
cliSuite.Assert().NoError(retry.Constant(duration, options...).Retry(func() error {
stdout, _, err := RunAndWait(&cliSuite.Suite, cliSuite.buildOsctlCmd(args))
stdout, _, err := RunAndWait(&cliSuite.Suite, cliSuite.buildCLICmd(args))
if err != nil {
return retry.UnexpectedError(err)
}

View File

@ -22,8 +22,8 @@ func (suite *CompletionSuite) SuiteName() string {
// TestSuccess runs comand with success.
func (suite *CompletionSuite) TestSuccess() {
suite.RunOsctl([]string{"completion", "bash"})
suite.RunOsctl([]string{"completion", "zsh"})
suite.RunCLI([]string{"completion", "bash"})
suite.RunCLI([]string{"completion", "zsh"})
}
func init() {

View File

@ -24,23 +24,23 @@ func (suite *ContainersSuite) SuiteName() string {
// TestContainerd inspects containers via containerd driver.
func (suite *ContainersSuite) TestContainerd() {
suite.RunOsctl([]string{"containers"},
suite.RunCLI([]string{"containers"},
base.StdoutShouldMatch(regexp.MustCompile(`IMAGE`)),
base.StdoutShouldMatch(regexp.MustCompile(`talos/osd`)),
)
suite.RunOsctl([]string{"containers", "-k"},
suite.RunCLI([]string{"containers", "-k"},
base.StdoutShouldMatch(regexp.MustCompile(`kubelet`)),
)
}
// TestCRI inspects containers via CRI driver.
func (suite *ContainersSuite) TestCRI() {
suite.RunOsctl([]string{"containers", "-c"},
suite.RunCLI([]string{"containers", "-c"},
base.ShouldFail(),
base.StdoutEmpty(),
base.StderrNotEmpty(),
base.StderrShouldMatch(regexp.MustCompile(`CRI inspector is supported only for K8s namespace`)))
suite.RunOsctl([]string{"containers", "-ck"},
suite.RunCLI([]string{"containers", "-ck"},
base.StdoutShouldMatch(regexp.MustCompile(`kube-system/kube-apiserver`)),
)
}

View File

@ -32,7 +32,7 @@ func (suite *CopySuite) TestSuccess() {
defer os.RemoveAll(tempDir) //nolint: errcheck
suite.RunOsctl([]string{"copy", "/etc/os-release", tempDir},
suite.RunCLI([]string{"copy", "/etc/os-release", tempDir},
base.StdoutEmpty())
_, err = os.Stat(filepath.Join(tempDir, "os-release"))
@ -41,7 +41,7 @@ func (suite *CopySuite) TestSuccess() {
// TestMultiNodeFail verifies that command fails with multiple nodes.
func (suite *CopySuite) TestMultiNodeFail() {
suite.RunOsctl([]string{"copy", "--nodes", "127.0.0.1", "--nodes", "127.0.0.1", "/etc/os-release", "."},
suite.RunCLI([]string{"copy", "--nodes", "127.0.0.1", "--nodes", "127.0.0.1", "/etc/os-release", "."},
base.ShouldFail(),
base.StderrNotEmpty(),
base.StdoutEmpty(),

View File

@ -41,7 +41,7 @@ func (suite *CrashdumpSuite) TestRun() {
}
}
suite.RunOsctl(append([]string{"crashdump"}, args...),
suite.RunCLI(append([]string{"crashdump"}, args...),
base.StdoutShouldMatch(regexp.MustCompile(`> containerd`)),
)
}

View File

@ -26,7 +26,7 @@ func (suite *DmesgSuite) SuiteName() string {
// TestHasOutput verifies that dmesg is displayed.
func (suite *DmesgSuite) TestHasOutput() {
suite.RunOsctl([]string{"dmesg"}) // default checks for stdout not empty
suite.RunCLI([]string{"dmesg"}) // default checks for stdout not empty
}
// TestClusterHasOutput verifies that each node in the cluster has some output
@ -42,7 +42,7 @@ func (suite *DmesgSuite) TestClusterHasOutput() {
regexp.MustCompile(fmt.Sprintf(`(?m)^%s:`, regexp.QuoteMeta(node)))))
}
suite.RunOsctl([]string{"--nodes", strings.Join(nodes, ","), "dmesg"},
suite.RunCLI([]string{"--nodes", strings.Join(nodes, ","), "dmesg"},
matchers...)
}

View File

@ -44,7 +44,7 @@ func (suite *GenSuite) TearDownTest() {
// TestCA ...
func (suite *GenSuite) TestCA() {
suite.RunOsctl([]string{"gen", "ca", "--organization", "Foo"},
suite.RunCLI([]string{"gen", "ca", "--organization", "Foo"},
base.StdoutEmpty())
suite.Assert().FileExists("Foo.crt")
@ -54,7 +54,7 @@ func (suite *GenSuite) TestCA() {
// TestKey ...
func (suite *GenSuite) TestKey() {
suite.RunOsctl([]string{"gen", "key", "--name", "Foo"},
suite.RunCLI([]string{"gen", "key", "--name", "Foo"},
base.StdoutEmpty())
suite.Assert().FileExists("Foo.key")
@ -62,10 +62,10 @@ func (suite *GenSuite) TestKey() {
// TestCSR ...
func (suite *GenSuite) TestCSR() {
suite.RunOsctl([]string{"gen", "key", "--name", "Foo"},
suite.RunCLI([]string{"gen", "key", "--name", "Foo"},
base.StdoutEmpty())
suite.RunOsctl([]string{"gen", "csr", "--key", "Foo.key", "--ip", "10.0.0.1"},
suite.RunCLI([]string{"gen", "csr", "--key", "Foo.key", "--ip", "10.0.0.1"},
base.StdoutEmpty())
suite.Assert().FileExists("Foo.csr")
@ -73,16 +73,16 @@ func (suite *GenSuite) TestCSR() {
// TestCrt ...
func (suite *GenSuite) TestCrt() {
suite.RunOsctl([]string{"gen", "ca", "--organization", "Foo"},
suite.RunCLI([]string{"gen", "ca", "--organization", "Foo"},
base.StdoutEmpty())
suite.RunOsctl([]string{"gen", "key", "--name", "Bar"},
suite.RunCLI([]string{"gen", "key", "--name", "Bar"},
base.StdoutEmpty())
suite.RunOsctl([]string{"gen", "csr", "--key", "Bar.key", "--ip", "10.0.0.1"},
suite.RunCLI([]string{"gen", "csr", "--key", "Bar.key", "--ip", "10.0.0.1"},
base.StdoutEmpty())
suite.RunOsctl([]string{"gen", "crt", "--ca", "Foo", "--csr", "Bar.csr", "--name", "foobar"},
suite.RunCLI([]string{"gen", "crt", "--ca", "Foo", "--csr", "Bar.csr", "--name", "foobar"},
base.StdoutEmpty())
suite.Assert().FileExists("foobar.crt")
@ -90,7 +90,7 @@ func (suite *GenSuite) TestCrt() {
// TestKeypair ...
func (suite *GenSuite) TestKeypair() {
suite.RunOsctl([]string{"gen", "keypair", "--organization", "Foo", "--ip", "10.0.0.1"},
suite.RunCLI([]string{"gen", "keypair", "--organization", "Foo", "--ip", "10.0.0.1"},
base.StdoutEmpty())
suite.Assert().FileExists("Foo.crt")

View File

@ -8,6 +8,7 @@ package cli
import (
"regexp"
"sort"
"strings"
"github.com/talos-systems/talos/internal/app/machined/pkg/runtime"
@ -31,14 +32,46 @@ func (suite *HealthSuite) TestRun() {
}
args := []string{}
bootstrapAPIIsUsed := true
for _, node := range suite.Cluster.Info().Nodes {
switch node.Type {
case runtime.MachineTypeInit:
args = append(args, "--init-node", node.PrivateIP.String())
case runtime.MachineTypeControlPlane:
args = append(args, "--control-plane-nodes", node.PrivateIP.String())
case runtime.MachineTypeJoin:
args = append(args, "--worker-nodes", node.PrivateIP.String())
if node.Type == runtime.MachineTypeInit {
bootstrapAPIIsUsed = false
}
}
if bootstrapAPIIsUsed {
nodes := []string{}
for _, node := range suite.Cluster.Info().Nodes {
switch node.Type {
case runtime.MachineTypeControlPlane:
nodes = append(nodes, node.PrivateIP.String())
case runtime.MachineTypeJoin:
args = append(args, "--worker-nodes", node.PrivateIP.String())
}
}
sort.Strings(nodes)
if len(nodes) > 0 {
args = append(args, "--init-node", nodes[0])
}
if len(nodes) > 1 {
args = append(args, "--control-plane-nodes", strings.Join(nodes[1:], ","))
}
} else {
for _, node := range suite.Cluster.Info().Nodes {
switch node.Type {
case runtime.MachineTypeInit:
args = append(args, "--init-node", node.PrivateIP.String())
case runtime.MachineTypeControlPlane:
args = append(args, "--control-plane-nodes", node.PrivateIP.String())
case runtime.MachineTypeJoin:
args = append(args, "--worker-nodes", node.PrivateIP.String())
}
}
}
@ -46,7 +79,7 @@ func (suite *HealthSuite) TestRun() {
args = append(args, "--k8s-endpoint", strings.Split(suite.K8sEndpoint, ":")[0])
}
suite.RunOsctl(append([]string{"health"}, args...),
suite.RunCLI(append([]string{"health"}, args...),
base.StderrNotEmpty(),
base.StdoutEmpty(),
base.StderrShouldMatch(regexp.MustCompile(`waiting for all k8s nodes to report ready`)),

View File

@ -24,7 +24,7 @@ func (suite *InterfacesSuite) SuiteName() string {
// TestSuccess verifies successful execution.
func (suite *InterfacesSuite) TestSuccess() {
suite.RunOsctl([]string{"interfaces"},
suite.RunCLI([]string{"interfaces"},
base.StdoutShouldMatch(regexp.MustCompile(`lo`)))
}

View File

@ -32,7 +32,7 @@ func (suite *KubeconfigSuite) TestDirectory() {
defer os.RemoveAll(tempDir) //nolint: errcheck
suite.RunOsctl([]string{"kubeconfig", tempDir},
suite.RunCLI([]string{"kubeconfig", tempDir},
base.StdoutEmpty())
suite.Require().FileExists(filepath.Join(tempDir, "kubeconfig"))
@ -54,7 +54,7 @@ func (suite *KubeconfigSuite) TestCwd() {
suite.Require().NoError(os.Chdir(tempDir))
suite.RunOsctl([]string{"kubeconfig"},
suite.RunCLI([]string{"kubeconfig"},
base.StdoutEmpty())
suite.Require().FileExists(filepath.Join(tempDir, "kubeconfig"))
@ -62,7 +62,7 @@ func (suite *KubeconfigSuite) TestCwd() {
// TestMultiNodeFail verifies that command fails with multiple nodes.
func (suite *KubeconfigSuite) TestMultiNodeFail() {
suite.RunOsctl([]string{"kubeconfig", "--nodes", "127.0.0.1", "--nodes", "127.0.0.1", "."},
suite.RunCLI([]string{"kubeconfig", "--nodes", "127.0.0.1", "--nodes", "127.0.0.1", "."},
base.ShouldFail(),
base.StderrNotEmpty(),
base.StdoutEmpty(),

View File

@ -24,7 +24,7 @@ func (suite *ListSuite) SuiteName() string {
// TestSuccess runs comand with success.
func (suite *ListSuite) TestSuccess() {
suite.RunOsctl([]string{"list", "/etc"},
suite.RunCLI([]string{"list", "/etc"},
base.StdoutShouldMatch(regexp.MustCompile(`os-release`)))
}

View File

@ -26,17 +26,17 @@ func (suite *LogsSuite) SuiteName() string {
// TestServiceLogs verifies that logs are displayed.
func (suite *LogsSuite) TestServiceLogs() {
suite.RunOsctl([]string{"logs", "kubelet"}) // default checks for stdout not empty
suite.RunCLI([]string{"logs", "kubelet"}) // default checks for stdout not empty
}
// TestTailLogs verifies that logs can be displayed with tail lines.
func (suite *LogsSuite) TestTailLogs() {
// run some machined API calls to produce enough log lines
for i := 0; i < 10; i++ {
suite.RunOsctl([]string{"version"})
suite.RunCLI([]string{"version"})
}
suite.RunOsctl([]string{"logs", "apid", "--tail", "5"},
suite.RunCLI([]string{"logs", "apid", "--tail", "5"},
base.StdoutMatchFunc(func(stdout string) error {
lines := strings.Count(stdout, "\n")
if lines != 5 {
@ -49,7 +49,7 @@ func (suite *LogsSuite) TestTailLogs() {
// TestServiceNotFound verifies that logs displays an error if service is not found.
func (suite *LogsSuite) TestServiceNotFound() {
suite.RunOsctl([]string{"logs", "servicenotfound"},
suite.RunCLI([]string{"logs", "servicenotfound"},
base.ShouldFail(),
base.StdoutEmpty(),
base.StderrNotEmpty(),

View File

@ -24,13 +24,13 @@ func (suite *MemorySuite) SuiteName() string {
// TestSuccess verifies successful execution.
func (suite *MemorySuite) TestSuccess() {
suite.RunOsctl([]string{"memory"},
suite.RunCLI([]string{"memory"},
base.StdoutShouldMatch(regexp.MustCompile(`FREE`)))
}
// TestVerbose verifies verbose mode.
func (suite *MemorySuite) TestVerbose() {
suite.RunOsctl([]string{"memory", "-v"},
suite.RunCLI([]string{"memory", "-v"},
base.StdoutShouldMatch(regexp.MustCompile(`MemFree: \d+ kB`)))
}

View File

@ -24,7 +24,7 @@ func (suite *MountsSuite) SuiteName() string {
// TestSuccess verifies successful execution.
func (suite *MountsSuite) TestSuccess() {
suite.RunOsctl([]string{"mounts"},
suite.RunCLI([]string{"mounts"},
base.StdoutShouldMatch(regexp.MustCompile(`FILESYSTEM`)))
}

View File

@ -24,7 +24,7 @@ func (suite *ProcessesSuite) SuiteName() string {
// TestSuccess verifies successful execution.
func (suite *ProcessesSuite) TestSuccess() {
suite.RunOsctl([]string{"processes"},
suite.RunCLI([]string{"processes"},
base.StdoutShouldMatch(regexp.MustCompile(`PID`)))
}

View File

@ -24,13 +24,13 @@ func (suite *ReadSuite) SuiteName() string {
// TestSuccess runs comand with success.
func (suite *ReadSuite) TestSuccess() {
suite.RunOsctl([]string{"read", "/etc/os-release"},
suite.RunCLI([]string{"read", "/etc/os-release"},
base.StdoutShouldMatch(regexp.MustCompile(`ID=talos`)))
}
// TestMultiNodeFail verifies that command fails with multiple nodes.
func (suite *ReadSuite) TestMultiNodeFail() {
suite.RunOsctl([]string{"read", "--nodes", "127.0.0.1", "--nodes", "127.0.0.1", "/etc/os-release"},
suite.RunCLI([]string{"read", "--nodes", "127.0.0.1", "--nodes", "127.0.0.1", "/etc/os-release"},
base.ShouldFail(),
base.StderrNotEmpty(),
base.StdoutEmpty(),

View File

@ -30,7 +30,7 @@ func (suite *RestartSuite) TestSystem() {
suite.T().Skip("skipping in short mode")
}
suite.RunOsctl([]string{"restart", "trustd"},
suite.RunCLI([]string{"restart", "trustd"},
base.StdoutEmpty())
time.Sleep(50 * time.Millisecond)
@ -44,7 +44,7 @@ func (suite *RestartSuite) TestK8s() {
suite.T().Skip("skipping in short mode")
}
suite.RunOsctl([]string{"restart", "-k", "kubelet"},
suite.RunCLI([]string{"restart", "-k", "kubelet"},
base.StdoutEmpty())
time.Sleep(50 * time.Millisecond)

View File

@ -24,7 +24,7 @@ func (suite *RoutesSuite) SuiteName() string {
// TestSuccess verifies successful execution.
func (suite *RoutesSuite) TestSuccess() {
suite.RunOsctl([]string{"routes"},
suite.RunCLI([]string{"routes"},
base.StdoutShouldMatch(regexp.MustCompile(`GATEWAY`)),
base.StdoutShouldMatch(regexp.MustCompile(`127\.0\.0\.0/8`)),
)

View File

@ -24,7 +24,7 @@ func (suite *ServicesSuite) SuiteName() string {
// TestList verifies service list.
func (suite *ServicesSuite) TestList() {
suite.RunOsctl([]string{"services"},
suite.RunCLI([]string{"services"},
base.StdoutShouldMatch(regexp.MustCompile(`STATE`)),
base.StdoutShouldMatch(regexp.MustCompile(`osd`)),
base.StdoutShouldMatch(regexp.MustCompile(`apid`)),
@ -33,13 +33,13 @@ func (suite *ServicesSuite) TestList() {
// TestStatus verifies service status.
func (suite *ServicesSuite) TestStatus() {
suite.RunOsctl([]string{"service", "apid"},
suite.RunCLI([]string{"service", "apid"},
base.StdoutShouldMatch(regexp.MustCompile(`STATE`)),
base.StdoutShouldMatch(regexp.MustCompile(`apid`)),
base.StdoutShouldMatch(regexp.MustCompile(`\[Running\]`)),
)
suite.RunOsctl([]string{"service", "osd", "status"},
suite.RunCLI([]string{"service", "osd", "status"},
base.StdoutShouldMatch(regexp.MustCompile(`STATE`)),
base.StdoutShouldMatch(regexp.MustCompile(`osd`)),
base.StdoutShouldMatch(regexp.MustCompile(`\[Running\]`)),

View File

@ -24,11 +24,11 @@ func (suite *StatsSuite) SuiteName() string {
// TestContainerd inspects stats via containerd driver.
func (suite *StatsSuite) TestContainerd() {
suite.RunOsctl([]string{"stats"},
suite.RunCLI([]string{"stats"},
base.StdoutShouldMatch(regexp.MustCompile(`CPU`)),
base.StdoutShouldMatch(regexp.MustCompile(`osd`)),
)
suite.RunOsctl([]string{"stats", "-k"},
suite.RunCLI([]string{"stats", "-k"},
base.StdoutShouldMatch(regexp.MustCompile(`CPU`)),
base.StdoutShouldMatch(regexp.MustCompile(`kubelet`)),
base.StdoutShouldMatch(regexp.MustCompile(`k8s.io`)),
@ -37,12 +37,12 @@ func (suite *StatsSuite) TestContainerd() {
// TestCRI inspects stats via CRI driver.
func (suite *StatsSuite) TestCRI() {
suite.RunOsctl([]string{"stats", "-c"},
suite.RunCLI([]string{"stats", "-c"},
base.ShouldFail(),
base.StdoutEmpty(),
base.StderrNotEmpty(),
base.StderrShouldMatch(regexp.MustCompile(`CRI inspector is supported only for K8s namespace`)))
suite.RunOsctl([]string{"stats", "-ck"},
suite.RunCLI([]string{"stats", "-ck"},
base.StdoutShouldMatch(regexp.MustCompile(`CPU`)),
base.StdoutShouldMatch(regexp.MustCompile(`kube-system/kube-apiserver`)),
base.StdoutShouldMatch(regexp.MustCompile(`k8s.io`)),

View File

@ -44,11 +44,11 @@ func (suite *ValidateSuite) TearDownTest() {
// TestValidate generates config and validates it for all the modes.
func (suite *ValidateSuite) TestValidate() {
suite.RunOsctl([]string{"gen", "config", "foobar", "https://10.0.0.1"})
suite.RunCLI([]string{"gen", "config", "foobar", "https://10.0.0.1"})
for _, configFile := range []string{"init.yaml", "controlplane.yaml", "join.yaml"} {
for _, mode := range []string{"cloud", "container", "metal"} {
suite.RunOsctl([]string{"validate", "-m", mode, "-c", configFile})
suite.RunCLI([]string{"validate", "-m", mode, "-c", configFile})
}
}
}

View File

@ -24,7 +24,7 @@ func (suite *VersionSuite) SuiteName() string {
// TestExpectedVersionMaster verifies master node version matches expected
func (suite *VersionSuite) TestExpectedVersionMaster() {
suite.RunOsctl([]string{"version"},
suite.RunCLI([]string{"version"},
base.StdoutShouldMatch(regexp.MustCompile(`Client:\n\s*Tag:\s*`+regexp.QuoteMeta(suite.Version))),
base.StdoutShouldMatch(regexp.MustCompile(`Server:\n(\s*NODE:[^\n]+\n)?\s*Tag:\s*`+regexp.QuoteMeta(suite.Version))),
)
@ -32,14 +32,14 @@ func (suite *VersionSuite) TestExpectedVersionMaster() {
// TestShortVersion verifies short version output.
func (suite *VersionSuite) TestShortVersion() {
suite.RunOsctl([]string{"version", "--short"},
suite.RunCLI([]string{"version", "--short"},
base.StdoutShouldMatch(regexp.MustCompile(`Client\s*`+regexp.QuoteMeta(suite.Version))),
)
}
// TestClientVersion verifies only client version output.
func (suite *VersionSuite) TestClient() {
suite.RunOsctl([]string{"version", "--client"},
suite.RunCLI([]string{"version", "--client"},
base.StdoutShouldMatch(regexp.MustCompile(`Client:\n\s*Tag:\s*`+regexp.QuoteMeta(suite.Version))),
base.StdoutShouldNotMatch(regexp.MustCompile(`Server`)),
)

View File

@ -6,6 +6,7 @@ package check
import (
"context"
"errors"
"time"
"github.com/talos-systems/talos/internal/app/machined/pkg/runtime"
@ -24,7 +25,16 @@ func DefaultClusterChecks() []ClusterCheck {
// wait for bootkube to finish on init node
func(cluster ClusterInfo) conditions.Condition {
return conditions.PollingCondition("bootkube to finish", func(ctx context.Context) error {
return ServiceStateAssertion(ctx, cluster, "bootkube", "Finished", "Skipped")
err := ServiceStateAssertion(ctx, cluster, "bootkube", "Finished", "Skipped")
if err != nil {
if errors.Is(err, ErrServiceNotFound) {
return nil
}
return err
}
return nil
}, 5*time.Minute, 5*time.Second)
},
// wait for apid to be ready on all the nodes

View File

@ -8,11 +8,15 @@ package check
import (
"context"
"fmt"
"sort"
"github.com/talos-systems/talos/internal/app/machined/pkg/runtime"
"github.com/talos-systems/talos/pkg/client"
)
// ErrServiceNotFound is an error that indicates that a service was not found.
var ErrServiceNotFound = fmt.Errorf("service not found")
// ServiceStateAssertion checks whether service reached some specified state.
//
//nolint: gocyclo
@ -22,14 +26,27 @@ func ServiceStateAssertion(ctx context.Context, cluster ClusterInfo, service str
return err
}
// perform check against "init" node
initNodes := cluster.NodesByType(runtime.MachineTypeInit)
var node string
if len(initNodes) != 1 {
return fmt.Errorf("init node not found, len(initNodes) = %d", len(initNodes))
switch {
case len(cluster.NodesByType(runtime.MachineTypeInit)) > 0:
nodes := cluster.NodesByType(runtime.MachineTypeInit)
if len(nodes) != 1 {
return fmt.Errorf("expected 1 init node, got %d", len(nodes))
}
node = nodes[0]
case len(cluster.NodesByType(runtime.MachineTypeControlPlane)) > 0:
nodes := cluster.NodesByType(runtime.MachineTypeControlPlane)
sort.Strings(nodes)
node = nodes[0]
default:
return fmt.Errorf("no bootstrap node found")
}
nodeCtx := client.WithNodes(ctx, initNodes[0])
nodeCtx := client.WithNodes(ctx, node)
servicesInfo, err := cli.ServiceInfo(nodeCtx, service)
if err != nil {
@ -57,7 +74,7 @@ func ServiceStateAssertion(ctx context.Context, cluster ClusterInfo, service str
}
if !serviceOk {
return fmt.Errorf("service %q not found", service)
return ErrServiceNotFound
}
return nil

View File

@ -109,9 +109,8 @@ func (p *provisioner) createNode(ctx context.Context, clusterReq provision.Clust
// Mutate the container configurations based on the node type.
switch nodeReq.Config.Machine().Type() {
case runtime.MachineTypeInit:
portsToOpen := []string{"50000:50000/tcp", "6443:6443/tcp"}
if nodeReq.Config.Machine().Type() == runtime.MachineTypeInit || nodeReq.Config.Machine().Type() == runtime.MachineTypeControlPlane {
portsToOpen := nodeReq.Ports
if len(options.DockerPorts) > 0 {
portsToOpen = append(portsToOpen, options.DockerPorts...)
@ -128,8 +127,6 @@ func (p *provisioner) createNode(ctx context.Context, clusterReq provision.Clust
hostConfig.PortBindings = generatedPortMap.portBindings
fallthrough
case runtime.MachineTypeControlPlane:
containerConfig.Volumes[constants.EtcdDataPath] = struct{}{}
if nodeReq.IP == nil {

View File

@ -116,4 +116,6 @@ type NodeRequest struct {
Memory int64
// Disk (volume) size in bytes, if applicable
DiskSize int64
// Ports
Ports []string
}

View File

@ -5,59 +5,16 @@
package generate
import (
"net/url"
v1alpha1 "github.com/talos-systems/talos/pkg/config/types/v1alpha1"
)
func controlPlaneUd(in *Input) (*v1alpha1.Config, error) {
config := &v1alpha1.Config{
ConfigVersion: "v1alpha1",
ConfigDebug: in.Debug,
ConfigPersist: in.Persist,
}
machine := &v1alpha1.MachineConfig{
MachineType: "controlplane",
MachineToken: in.TrustdInfo.Token,
MachineCA: in.Certs.OS,
MachineCertSANs: in.AdditionalMachineCertSANs,
MachineKubelet: &v1alpha1.KubeletConfig{},
MachineNetwork: in.NetworkConfig,
MachineInstall: &v1alpha1.InstallConfig{
InstallDisk: in.InstallDisk,
InstallImage: in.InstallImage,
InstallBootloader: true,
},
MachineRegistries: v1alpha1.RegistriesConfig{
RegistryMirrors: in.RegistryMirrors,
},
}
controlPlaneURL, err := url.Parse(in.ControlPlaneEndpoint)
config, err := initUd(in)
if err != nil {
return config, err
return nil, err
}
cluster := &v1alpha1.ClusterConfig{
BootstrapToken: in.Secrets.BootstrapToken,
ControlPlane: &v1alpha1.ControlPlaneConfig{
Endpoint: &v1alpha1.Endpoint{URL: controlPlaneURL},
},
EtcdConfig: &v1alpha1.EtcdConfig{
RootCA: in.Certs.Etcd,
},
ClusterNetwork: &v1alpha1.ClusterNetworkConfig{
DNSDomain: in.ServiceDomain,
PodSubnet: in.PodNet,
ServiceSubnet: in.ServiceNet,
},
ClusterCA: in.Certs.K8s,
ClusterAESCBCEncryptionSecret: in.Secrets.AESCBCEncryptionSecret,
}
config.MachineConfig = machine
config.ClusterConfig = cluster
config.MachineConfig.MachineType = "controlplane"
return config, nil
}