test: add a test to deploy and destroy workload cluster

This cluster has a single controlplane node just for the sake of testing
with another cluster in place.

Signed-off-by: Andrey Smirnov <smirnov.andrey@gmail.com>
This commit is contained in:
Andrey Smirnov 2020-11-26 18:18:26 +03:00 committed by talos-bot
parent 72d73d42a7
commit ad5f1ed9a9
7 changed files with 224 additions and 118 deletions

View File

@ -21,26 +21,26 @@ spec:
jsonPath: .status.ready jsonPath: .status.ready
name: Ready name: Ready
type: string type: string
- description: Metal Machine
jsonPath: .spec.metalMachineRef.name
name: MetalMachine
priority: 1
type: string
- description: Server ID - description: Server ID
jsonPath: .metadata.name jsonPath: .metadata.name
name: Server name: Server
priority: 1 priority: 1
type: string type: string
- description: Cluster to which this ServerBinding belongs
jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name
name: Cluster
priority: 1
type: string
- description: Server Class - description: Server Class
jsonPath: .spec.serverClassRef.name jsonPath: .spec.serverClassRef.name
name: ServerClass name: ServerClass
priority: 1 priority: 1
type: string type: string
- description: Metal Machine
jsonPath: .spec.metalMachineRef.name
name: MetalMachine
priority: 1
type: string
- description: Cluster to which this ServerBinding belongs
jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name
name: Cluster
priority: 1
type: string
name: v1alpha3 name: v1alpha3
schema: schema:
openAPIV3Schema: openAPIV3Schema:

View File

@ -14,6 +14,7 @@ import (
cabpt "github.com/talos-systems/cluster-api-bootstrap-provider-talos/api/v1alpha3" cabpt "github.com/talos-systems/cluster-api-bootstrap-provider-talos/api/v1alpha3"
cacpt "github.com/talos-systems/cluster-api-control-plane-provider-talos/api/v1alpha3" cacpt "github.com/talos-systems/cluster-api-control-plane-provider-talos/api/v1alpha3"
"github.com/talos-systems/go-retry/retry"
taloscluster "github.com/talos-systems/talos/pkg/cluster" taloscluster "github.com/talos-systems/talos/pkg/cluster"
talosclusterapi "github.com/talos-systems/talos/pkg/machinery/api/cluster" talosclusterapi "github.com/talos-systems/talos/pkg/machinery/api/cluster"
talosclient "github.com/talos-systems/talos/pkg/machinery/client" talosclient "github.com/talos-systems/talos/pkg/machinery/client"
@ -104,6 +105,10 @@ func NewCluster(ctx context.Context, metalClient runtimeclient.Reader, clusterNa
continue continue
} }
if !metalMachine.DeletionTimestamp.IsZero() {
continue
}
var server metal.Server var server metal.Server
if err := metalClient.Get(ctx, types.NamespacedName{Namespace: metalMachine.Spec.ServerRef.Namespace, Name: metalMachine.Spec.ServerRef.Name}, &server); err != nil { if err := metalClient.Get(ctx, types.NamespacedName{Namespace: metalMachine.Spec.ServerRef.Namespace, Name: metalMachine.Spec.ServerRef.Name}, &server); err != nil {
@ -177,6 +182,13 @@ func NewCluster(ctx context.Context, metalClient runtimeclient.Reader, clusterNa
// Health runs the healthcheck for the cluster. // Health runs the healthcheck for the cluster.
func (cluster *Cluster) Health(ctx context.Context) error { func (cluster *Cluster) Health(ctx context.Context) error {
return retry.Constant(5*time.Minute, retry.WithUnits(10*time.Second)).Retry(func() error {
// retry health checks as sometimes bootstrap bootkube issues break the check
return retry.ExpectedError(cluster.health(ctx))
})
}
func (cluster *Cluster) health(ctx context.Context) error {
resp, err := cluster.client.ClusterHealthCheck(talosclient.WithNodes(ctx, cluster.controlPlaneNodes[0]), 3*time.Minute, &talosclusterapi.ClusterInfo{ resp, err := cluster.client.ClusterHealthCheck(talosclient.WithNodes(ctx, cluster.controlPlaneNodes[0]), 3*time.Minute, &talosclusterapi.ClusterInfo{
ControlPlaneNodes: cluster.controlPlaneNodes, ControlPlaneNodes: cluster.controlPlaneNodes,
WorkerNodes: cluster.workerNodes, WorkerNodes: cluster.workerNodes,

View File

@ -0,0 +1,157 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
package tests
import (
"context"
"fmt"
"os"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/talos-systems/go-retry/retry"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
"k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/restmapper"
"sigs.k8s.io/cluster-api/api/v1alpha3"
capiclient "sigs.k8s.io/cluster-api/cmd/clusterctl/client"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/talos-systems/sidero/sfyra/pkg/capi"
"github.com/talos-systems/sidero/sfyra/pkg/loadbalancer"
"github.com/talos-systems/sidero/sfyra/pkg/talos"
"github.com/talos-systems/sidero/sfyra/pkg/vm"
)
func deployCluster(ctx context.Context, t *testing.T, metalClient client.Client, capiCluster talos.Cluster, vmSet *vm.Set,
capiManager *capi.Manager, clusterName, serverClassName string, loadbalancerPort int, controlPlaneNodes, workerNodes int64) (*loadbalancer.ControlPlane, *capi.Cluster) {
t.Logf("deploying cluster %q from server class %q with loadbalancer port %d", clusterName, serverClassName, loadbalancerPort)
kubeconfig, err := capiManager.GetKubeconfig(ctx)
require.NoError(t, err)
config, err := capiCluster.KubernetesClient().K8sRestConfig(ctx)
require.NoError(t, err)
capiClient := capiManager.GetManagerClient()
loadbalancer, err := loadbalancer.NewControlPlane(metalClient, vmSet.BridgeIP(), loadbalancerPort, "default", clusterName, false)
require.NoError(t, err)
os.Setenv("CONTROL_PLANE_ENDPOINT", vmSet.BridgeIP().String())
os.Setenv("CONTROL_PLANE_PORT", strconv.Itoa(loadbalancerPort))
os.Setenv("CONTROL_PLANE_SERVERCLASS", serverClassName)
os.Setenv("WORKER_SERVERCLASS", serverClassName)
// TODO: make it configurable
os.Setenv("KUBERNETES_VERSION", "v1.19.4")
templateOptions := capiclient.GetClusterTemplateOptions{
Kubeconfig: kubeconfig,
ClusterName: clusterName,
ControlPlaneMachineCount: &controlPlaneNodes,
WorkerMachineCount: &workerNodes,
}
template, err := capiClient.GetClusterTemplate(templateOptions)
require.NoError(t, err)
dc, err := discovery.NewDiscoveryClientForConfig(config)
require.NoError(t, err)
mapper := restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(dc))
dyn, err := dynamic.NewForConfig(config)
require.NoError(t, err)
for _, obj := range template.Objs() {
var mapping *meta.RESTMapping
mapping, err = mapper.RESTMapping(obj.GroupVersionKind().GroupKind(), obj.GroupVersionKind().Version)
require.NoError(t, err)
var dr dynamic.ResourceInterface
if mapping.Scope.Name() == meta.RESTScopeNameNamespace {
// namespaced resources should specify the namespace
dr = dyn.Resource(mapping.Resource).Namespace(obj.GetNamespace())
} else {
// for cluster-wide resources
dr = dyn.Resource(mapping.Resource)
}
var data []byte
data, err = obj.MarshalJSON()
require.NoError(t, err)
t.Logf("applying %s", string(data))
obj := obj
_, err = dr.Create(ctx, &obj, metav1.CreateOptions{
FieldManager: "sfyra",
})
if err != nil {
if apierrors.IsAlreadyExists(err) {
_, err = dr.Patch(ctx, obj.GetName(), types.ApplyPatchType, data, metav1.PatchOptions{
FieldManager: "sfyra",
})
}
}
require.NoError(t, err)
}
t.Log("waiting for the cluster to be provisioned")
require.NoError(t, retry.Constant(10*time.Minute, retry.WithUnits(10*time.Second), retry.WithErrorLogging(true)).Retry(func() error {
return capi.CheckClusterReady(ctx, metalClient, clusterName)
}))
t.Log("verifying cluster health")
deployedCluster, err := capi.NewCluster(ctx, metalClient, clusterName, vmSet.BridgeIP())
require.NoError(t, err)
require.NoError(t, deployedCluster.Health(ctx))
return loadbalancer, deployedCluster
}
func deleteCluster(ctx context.Context, t *testing.T, metalClient client.Client, clusterName string) {
var cluster v1alpha3.Cluster
err := metalClient.Get(ctx, types.NamespacedName{Namespace: "default", Name: clusterName}, &cluster)
require.NoError(t, err)
t.Logf("deleting cluster %q", clusterName)
err = metalClient.Delete(ctx, &cluster)
require.NoError(t, err)
require.NoError(t, retry.Constant(3*time.Minute, retry.WithUnits(10*time.Second)).Retry(func() error {
err = metalClient.Get(ctx, types.NamespacedName{Namespace: "default", Name: clusterName}, &cluster)
if err == nil {
err = metalClient.Delete(ctx, &cluster)
if err != nil {
return retry.UnexpectedError(err)
}
return retry.ExpectedError(fmt.Errorf("cluster is not deleted yet"))
}
if apierrors.IsNotFound(err) {
return nil
}
return retry.UnexpectedError(err)
}))
}

View File

@ -6,26 +6,11 @@ package tests
import ( import (
"context" "context"
"os"
"strconv"
"testing" "testing"
"time"
"github.com/stretchr/testify/require"
"github.com/talos-systems/go-retry/retry"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
"k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/restmapper"
capiclient "sigs.k8s.io/cluster-api/cmd/clusterctl/client"
"sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/talos-systems/sidero/sfyra/pkg/capi" "github.com/talos-systems/sidero/sfyra/pkg/capi"
"github.com/talos-systems/sidero/sfyra/pkg/loadbalancer"
"github.com/talos-systems/sidero/sfyra/pkg/talos" "github.com/talos-systems/sidero/sfyra/pkg/talos"
"github.com/talos-systems/sidero/sfyra/pkg/vm" "github.com/talos-systems/sidero/sfyra/pkg/vm"
) )
@ -36,97 +21,8 @@ const (
) )
// TestManagementCluster deploys the management cluster via CAPI. // TestManagementCluster deploys the management cluster via CAPI.
//
//nolint: gocognit
func TestManagementCluster(ctx context.Context, metalClient client.Client, cluster talos.Cluster, vmSet *vm.Set, capiManager *capi.Manager) TestFunc { func TestManagementCluster(ctx context.Context, metalClient client.Client, cluster talos.Cluster, vmSet *vm.Set, capiManager *capi.Manager) TestFunc {
return func(t *testing.T) { return func(t *testing.T) {
kubeconfig, err := capiManager.GetKubeconfig(ctx) deployCluster(ctx, t, metalClient, cluster, vmSet, capiManager, managementClusterName, defaultServerClassName, managementClusterLBPort, 1, 1)
require.NoError(t, err)
config, err := cluster.KubernetesClient().K8sRestConfig(ctx)
require.NoError(t, err)
capiClient := capiManager.GetManagerClient()
nodeCount := int64(1)
_, err = loadbalancer.NewControlPlane(metalClient, vmSet.BridgeIP(), managementClusterLBPort, "default", managementClusterName, false)
require.NoError(t, err)
os.Setenv("CONTROL_PLANE_ENDPOINT", vmSet.BridgeIP().String())
os.Setenv("CONTROL_PLANE_PORT", strconv.Itoa(managementClusterLBPort))
os.Setenv("CONTROL_PLANE_SERVERCLASS", serverClassName)
os.Setenv("WORKER_SERVERCLASS", serverClassName)
// TODO: make it configurable
os.Setenv("KUBERNETES_VERSION", "v1.19.0")
templateOptions := capiclient.GetClusterTemplateOptions{
Kubeconfig: kubeconfig,
ClusterName: managementClusterName,
ControlPlaneMachineCount: &nodeCount,
WorkerMachineCount: &nodeCount,
}
template, err := capiClient.GetClusterTemplate(templateOptions)
require.NoError(t, err)
dc, err := discovery.NewDiscoveryClientForConfig(config)
require.NoError(t, err)
mapper := restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(dc))
dyn, err := dynamic.NewForConfig(config)
require.NoError(t, err)
for _, obj := range template.Objs() {
var mapping *meta.RESTMapping
mapping, err = mapper.RESTMapping(obj.GroupVersionKind().GroupKind(), obj.GroupVersionKind().Version)
require.NoError(t, err)
var dr dynamic.ResourceInterface
if mapping.Scope.Name() == meta.RESTScopeNameNamespace {
// namespaced resources should specify the namespace
dr = dyn.Resource(mapping.Resource).Namespace(obj.GetNamespace())
} else {
// for cluster-wide resources
dr = dyn.Resource(mapping.Resource)
}
var data []byte
data, err = obj.MarshalJSON()
require.NoError(t, err)
t.Logf("applying %s", string(data))
obj := obj
_, err = dr.Create(ctx, &obj, metav1.CreateOptions{
FieldManager: "sfyra",
})
if err != nil {
if apierrors.IsAlreadyExists(err) {
_, err = dr.Patch(ctx, obj.GetName(), types.ApplyPatchType, data, metav1.PatchOptions{
FieldManager: "sfyra",
})
}
}
require.NoError(t, err)
}
t.Log("waiting for the cluster to be provisioned")
require.NoError(t, retry.Constant(10*time.Minute, retry.WithUnits(10*time.Second)).Retry(func() error {
return capi.CheckClusterReady(ctx, metalClient, managementClusterName)
}))
t.Log("verifying cluster health")
cluster, err := capi.NewCluster(ctx, metalClient, managementClusterName, vmSet.BridgeIP())
require.NoError(t, err)
require.NoError(t, cluster.Health(ctx))
} }
} }

View File

@ -38,7 +38,10 @@ import (
"github.com/talos-systems/sidero/sfyra/pkg/vm" "github.com/talos-systems/sidero/sfyra/pkg/vm"
) )
const serverClassName = "default" const (
defaultServerClassName = "default"
workloadServerClassName = "workload"
)
// TestServerClassDefault verifies server class creation. // TestServerClassDefault verifies server class creation.
func TestServerClassDefault(ctx context.Context, metalClient client.Client, vmSet *vm.Set) TestFunc { func TestServerClassDefault(ctx context.Context, metalClient client.Client, vmSet *vm.Set) TestFunc {
@ -53,14 +56,14 @@ func TestServerClassDefault(ctx context.Context, metalClient client.Client, vmSe
}, },
} }
serverClass, err := createServerClass(ctx, metalClient, serverClassName, classSpec) serverClass, err := createServerClass(ctx, metalClient, defaultServerClassName, classSpec)
require.NoError(t, err) require.NoError(t, err)
numNodes := len(vmSet.Nodes()) numNodes := len(vmSet.Nodes())
// wait for the server class to gather all nodes (all nodes should match) // wait for the server class to gather all nodes (all nodes should match)
require.NoError(t, retry.Constant(2*time.Minute, retry.WithUnits(10*time.Second)).Retry(func() error { require.NoError(t, retry.Constant(2*time.Minute, retry.WithUnits(10*time.Second)).Retry(func() error {
if err := metalClient.Get(ctx, types.NamespacedName{Name: serverClassName}, &serverClass); err != nil { if err := metalClient.Get(ctx, types.NamespacedName{Name: defaultServerClassName}, &serverClass); err != nil {
return retry.UnexpectedError(err) return retry.UnexpectedError(err)
} }
@ -86,6 +89,9 @@ func TestServerClassDefault(ctx context.Context, metalClient client.Client, vmSe
sort.Strings(actualUUIDs) sort.Strings(actualUUIDs)
assert.Equal(t, expectedUUIDs, actualUUIDs) assert.Equal(t, expectedUUIDs, actualUUIDs)
_, err = createServerClass(ctx, metalClient, workloadServerClassName, classSpec)
require.NoError(t, err)
} }
} }

View File

@ -119,6 +119,10 @@ func Run(ctx context.Context, cluster talos.Cluster, vmSet *vm.Set, capiManager
"TestServerReset", "TestServerReset",
TestServerReset(ctx, metalClient), TestServerReset(ctx, metalClient),
}, },
{
"TestWorkloadCluster",
TestWorkloadCluster(ctx, metalClient, cluster, vmSet, capiManager),
},
} }
testsToRun := []testing.InternalTest{} testsToRun := []testing.InternalTest{}

View File

@ -0,0 +1,31 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
package tests
import (
"context"
"testing"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/talos-systems/sidero/sfyra/pkg/capi"
"github.com/talos-systems/sidero/sfyra/pkg/talos"
"github.com/talos-systems/sidero/sfyra/pkg/vm"
)
const (
workloadClusterName = "workload-cluster"
workloadClusterLBPort = 20000
)
// TestWorkloadCluster deploys and destroys the workload cluster via CAPI.
func TestWorkloadCluster(ctx context.Context, metalClient client.Client, cluster talos.Cluster, vmSet *vm.Set, capiManager *capi.Manager) TestFunc {
return func(t *testing.T) {
loadbalancer, _ := deployCluster(ctx, t, metalClient, cluster, vmSet, capiManager, workloadClusterName, workloadServerClassName, workloadClusterLBPort, 1, 0)
defer loadbalancer.Close()
deleteCluster(ctx, t, metalClient, workloadClusterName)
}
}