mirror of
https://github.com/siderolabs/talos.git
synced 2025-10-01 18:51:13 +02:00
chore(ci): add tests with longhorn v2 engine
Add tests with longhorn v2 engine. Fixes: #9669 Signed-off-by: Noel Georgi <git@frezbo.dev>
This commit is contained in:
parent
689ea1dbfe
commit
82c9ec158e
14
.github/workflows/ci.yaml
vendored
14
.github/workflows/ci.yaml
vendored
@ -1,6 +1,6 @@
|
|||||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||||
#
|
#
|
||||||
# Generated on 2025-01-22T14:28:18Z by kres 3075de9.
|
# Generated on 2025-01-22T17:37:55Z by kres 3075de9.
|
||||||
|
|
||||||
name: default
|
name: default
|
||||||
concurrency:
|
concurrency:
|
||||||
@ -2957,10 +2957,14 @@ jobs:
|
|||||||
EXTRA_TEST_ARGS: -talos.csi=longhorn
|
EXTRA_TEST_ARGS: -talos.csi=longhorn
|
||||||
GITHUB_STEP_NAME: ${{ github.job}}-e2e-qemu-csi-longhorn
|
GITHUB_STEP_NAME: ${{ github.job}}-e2e-qemu-csi-longhorn
|
||||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||||
QEMU_MEMORY_WORKERS: "3072"
|
QEMU_EXTRA_DISKS: "1"
|
||||||
|
QEMU_EXTRA_DISKS_DRIVERS: nvme
|
||||||
|
QEMU_EXTRA_DISKS_SIZE: "12288"
|
||||||
|
QEMU_MEMORY_WORKERS: "4096"
|
||||||
QEMU_WORKERS: "3"
|
QEMU_WORKERS: "3"
|
||||||
SHORT_INTEGRATION_TEST: "yes"
|
SHORT_INTEGRATION_TEST: "yes"
|
||||||
WITH_CONFIG_PATCH: '@_out/installer-extensions-patch.yaml:@_out/kubelet-fat-patch.yaml:@hack/test/patches/longhorn.yaml'
|
WITH_CONFIG_PATCH_CONTROLPLANE: '@hack/test/patches/longhorn-cp.yaml'
|
||||||
|
WITH_CONFIG_PATCH_WORKER: '@_out/installer-extensions-patch.yaml:@_out/kubelet-fat-patch.yaml:@hack/test/patches/longhorn.yaml'
|
||||||
run: |
|
run: |
|
||||||
sudo -E make e2e-qemu
|
sudo -E make e2e-qemu
|
||||||
- name: save artifacts
|
- name: save artifacts
|
||||||
@ -3067,7 +3071,7 @@ jobs:
|
|||||||
QEMU_MEMORY_WORKERS: "4096"
|
QEMU_MEMORY_WORKERS: "4096"
|
||||||
QEMU_WORKERS: "3"
|
QEMU_WORKERS: "3"
|
||||||
SHORT_INTEGRATION_TEST: "yes"
|
SHORT_INTEGRATION_TEST: "yes"
|
||||||
WITH_CONFIG_PATCH: '@hack/test/patches/openebs-cp.yaml'
|
WITH_CONFIG_PATCH_CONTROLPLANE: '@hack/test/patches/openebs-cp.yaml'
|
||||||
WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/openebs.yaml'
|
WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/openebs.yaml'
|
||||||
run: |
|
run: |
|
||||||
sudo -E make e2e-qemu
|
sudo -E make e2e-qemu
|
||||||
@ -3175,7 +3179,7 @@ jobs:
|
|||||||
QEMU_MEMORY_WORKERS: "8192"
|
QEMU_MEMORY_WORKERS: "8192"
|
||||||
QEMU_WORKERS: "3"
|
QEMU_WORKERS: "3"
|
||||||
SHORT_INTEGRATION_TEST: "yes"
|
SHORT_INTEGRATION_TEST: "yes"
|
||||||
WITH_CONFIG_PATCH: '@hack/test/patches/rook-ceph.yaml'
|
WITH_CONFIG_PATCH_CONTROLPLANE: '@hack/test/patches/rook-ceph.yaml'
|
||||||
run: |
|
run: |
|
||||||
sudo -E make e2e-qemu
|
sudo -E make e2e-qemu
|
||||||
- name: save artifacts
|
- name: save artifacts
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||||
#
|
#
|
||||||
# Generated on 2025-01-21T10:29:14Z by kres 3075de9.
|
# Generated on 2025-01-22T17:37:55Z by kres 3075de9.
|
||||||
|
|
||||||
name: integration-qemu-csi-longhorn-cron
|
name: integration-qemu-csi-longhorn-cron
|
||||||
concurrency:
|
concurrency:
|
||||||
@ -112,10 +112,14 @@ jobs:
|
|||||||
EXTRA_TEST_ARGS: -talos.csi=longhorn
|
EXTRA_TEST_ARGS: -talos.csi=longhorn
|
||||||
GITHUB_STEP_NAME: ${{ github.job}}-e2e-qemu-csi-longhorn
|
GITHUB_STEP_NAME: ${{ github.job}}-e2e-qemu-csi-longhorn
|
||||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||||
QEMU_MEMORY_WORKERS: "3072"
|
QEMU_EXTRA_DISKS: "1"
|
||||||
|
QEMU_EXTRA_DISKS_DRIVERS: nvme
|
||||||
|
QEMU_EXTRA_DISKS_SIZE: "12288"
|
||||||
|
QEMU_MEMORY_WORKERS: "4096"
|
||||||
QEMU_WORKERS: "3"
|
QEMU_WORKERS: "3"
|
||||||
SHORT_INTEGRATION_TEST: "yes"
|
SHORT_INTEGRATION_TEST: "yes"
|
||||||
WITH_CONFIG_PATCH: '@_out/installer-extensions-patch.yaml:@_out/kubelet-fat-patch.yaml:@hack/test/patches/longhorn.yaml'
|
WITH_CONFIG_PATCH_CONTROLPLANE: '@hack/test/patches/longhorn-cp.yaml'
|
||||||
|
WITH_CONFIG_PATCH_WORKER: '@_out/installer-extensions-patch.yaml:@_out/kubelet-fat-patch.yaml:@hack/test/patches/longhorn.yaml'
|
||||||
run: |
|
run: |
|
||||||
sudo -E make e2e-qemu
|
sudo -E make e2e-qemu
|
||||||
- name: save artifacts
|
- name: save artifacts
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||||
#
|
#
|
||||||
# Generated on 2025-01-21T10:29:14Z by kres 3075de9.
|
# Generated on 2025-01-22T14:23:24Z by kres 3075de9.
|
||||||
|
|
||||||
name: integration-qemu-csi-openebs-cron
|
name: integration-qemu-csi-openebs-cron
|
||||||
concurrency:
|
concurrency:
|
||||||
@ -89,7 +89,7 @@ jobs:
|
|||||||
QEMU_MEMORY_WORKERS: "4096"
|
QEMU_MEMORY_WORKERS: "4096"
|
||||||
QEMU_WORKERS: "3"
|
QEMU_WORKERS: "3"
|
||||||
SHORT_INTEGRATION_TEST: "yes"
|
SHORT_INTEGRATION_TEST: "yes"
|
||||||
WITH_CONFIG_PATCH: '@hack/test/patches/openebs-cp.yaml'
|
WITH_CONFIG_PATCH_CONTROLPLANE: '@hack/test/patches/openebs-cp.yaml'
|
||||||
WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/openebs.yaml'
|
WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/openebs.yaml'
|
||||||
run: |
|
run: |
|
||||||
sudo -E make e2e-qemu
|
sudo -E make e2e-qemu
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||||
#
|
#
|
||||||
# Generated on 2025-01-21T10:29:14Z by kres 3075de9.
|
# Generated on 2025-01-22T14:23:24Z by kres 3075de9.
|
||||||
|
|
||||||
name: integration-qemu-csi-rook-ceph-cron
|
name: integration-qemu-csi-rook-ceph-cron
|
||||||
concurrency:
|
concurrency:
|
||||||
@ -89,7 +89,7 @@ jobs:
|
|||||||
QEMU_MEMORY_WORKERS: "8192"
|
QEMU_MEMORY_WORKERS: "8192"
|
||||||
QEMU_WORKERS: "3"
|
QEMU_WORKERS: "3"
|
||||||
SHORT_INTEGRATION_TEST: "yes"
|
SHORT_INTEGRATION_TEST: "yes"
|
||||||
WITH_CONFIG_PATCH: '@hack/test/patches/rook-ceph.yaml'
|
WITH_CONFIG_PATCH_CONTROLPLANE: '@hack/test/patches/rook-ceph.yaml'
|
||||||
run: |
|
run: |
|
||||||
sudo -E make e2e-qemu
|
sudo -E make e2e-qemu
|
||||||
- name: save artifacts
|
- name: save artifacts
|
||||||
|
12
.kres.yaml
12
.kres.yaml
@ -1352,7 +1352,7 @@ spec:
|
|||||||
QEMU_MEMORY_WORKERS: 8192
|
QEMU_MEMORY_WORKERS: 8192
|
||||||
QEMU_EXTRA_DISKS: 1
|
QEMU_EXTRA_DISKS: 1
|
||||||
QEMU_EXTRA_DISKS_SIZE: 12288
|
QEMU_EXTRA_DISKS_SIZE: 12288
|
||||||
WITH_CONFIG_PATCH: "@hack/test/patches/rook-ceph.yaml"
|
WITH_CONFIG_PATCH_CONTROLPLANE: "@hack/test/patches/rook-ceph.yaml"
|
||||||
EXTRA_TEST_ARGS: -talos.csi=rook-ceph
|
EXTRA_TEST_ARGS: -talos.csi=rook-ceph
|
||||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||||
- name: save-fio-benchmark
|
- name: save-fio-benchmark
|
||||||
@ -1441,8 +1441,12 @@ spec:
|
|||||||
GITHUB_STEP_NAME: ${{ github.job}}-e2e-qemu-csi-longhorn
|
GITHUB_STEP_NAME: ${{ github.job}}-e2e-qemu-csi-longhorn
|
||||||
SHORT_INTEGRATION_TEST: yes
|
SHORT_INTEGRATION_TEST: yes
|
||||||
QEMU_WORKERS: 3
|
QEMU_WORKERS: 3
|
||||||
QEMU_MEMORY_WORKERS: 3072
|
QEMU_MEMORY_WORKERS: 4096
|
||||||
WITH_CONFIG_PATCH: "@_out/installer-extensions-patch.yaml:@_out/kubelet-fat-patch.yaml:@hack/test/patches/longhorn.yaml"
|
QEMU_EXTRA_DISKS: 1
|
||||||
|
QEMU_EXTRA_DISKS_SIZE: 12288
|
||||||
|
QEMU_EXTRA_DISKS_DRIVERS: nvme
|
||||||
|
WITH_CONFIG_PATCH_CONTROLPLANE: "@hack/test/patches/longhorn-cp.yaml"
|
||||||
|
WITH_CONFIG_PATCH_WORKER: "@_out/installer-extensions-patch.yaml:@_out/kubelet-fat-patch.yaml:@hack/test/patches/longhorn.yaml"
|
||||||
EXTRA_TEST_ARGS: -talos.csi=longhorn
|
EXTRA_TEST_ARGS: -talos.csi=longhorn
|
||||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||||
- name: save-fio-benchmark
|
- name: save-fio-benchmark
|
||||||
@ -1508,7 +1512,7 @@ spec:
|
|||||||
QEMU_MEMORY_WORKERS: 4096
|
QEMU_MEMORY_WORKERS: 4096
|
||||||
QEMU_EXTRA_DISKS: 1
|
QEMU_EXTRA_DISKS: 1
|
||||||
QEMU_EXTRA_DISKS_SIZE: 12288
|
QEMU_EXTRA_DISKS_SIZE: 12288
|
||||||
WITH_CONFIG_PATCH: "@hack/test/patches/openebs-cp.yaml"
|
WITH_CONFIG_PATCH_CONTROLPLANE: "@hack/test/patches/openebs-cp.yaml"
|
||||||
WITH_CONFIG_PATCH_WORKER: "@hack/test/patches/openebs.yaml"
|
WITH_CONFIG_PATCH_WORKER: "@hack/test/patches/openebs.yaml"
|
||||||
EXTRA_TEST_ARGS: -talos.csi=openebs
|
EXTRA_TEST_ARGS: -talos.csi=openebs
|
||||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||||
|
@ -139,6 +139,18 @@ case "${WITH_ISO:-false}" in
|
|||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
|
case "${WITH_CONFIG_PATCH_CONTROLPLANE:-false}" in
|
||||||
|
false)
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
[[ ! ${WITH_CONFIG_PATCH_CONTROLPLANE} =~ ^@ ]] && echo "WITH_CONFIG_PATCH_CONTROLPLANE variable should start with @" && exit 1
|
||||||
|
|
||||||
|
for i in ${WITH_CONFIG_PATCH_CONTROLPLANE//:/ }; do
|
||||||
|
QEMU_FLAGS+=("--config-patch-control-plane=${i}")
|
||||||
|
done
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
case "${WITH_CONFIG_PATCH_WORKER:-false}" in
|
case "${WITH_CONFIG_PATCH_WORKER:-false}" in
|
||||||
false)
|
false)
|
||||||
;;
|
;;
|
||||||
|
9
hack/test/patches/longhorn-cp.yaml
Normal file
9
hack/test/patches/longhorn-cp.yaml
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
cluster:
|
||||||
|
apiServer:
|
||||||
|
admissionControl:
|
||||||
|
- name: PodSecurity
|
||||||
|
configuration:
|
||||||
|
exemptions:
|
||||||
|
namespaces:
|
||||||
|
- longhorn-system
|
@ -1,4 +1,12 @@
|
|||||||
|
---
|
||||||
machine:
|
machine:
|
||||||
|
sysctls:
|
||||||
|
vm.nr_hugepages: "1024"
|
||||||
|
kernel:
|
||||||
|
modules:
|
||||||
|
- name: nvme_tcp
|
||||||
|
- name: vfio_pci
|
||||||
|
- name: uio_pci_generic
|
||||||
kubelet:
|
kubelet:
|
||||||
extraMounts:
|
extraMounts:
|
||||||
- destination: /var/lib/longhorn
|
- destination: /var/lib/longhorn
|
||||||
@ -8,11 +16,3 @@ machine:
|
|||||||
- bind
|
- bind
|
||||||
- rshared
|
- rshared
|
||||||
- rw
|
- rw
|
||||||
cluster:
|
|
||||||
apiServer:
|
|
||||||
admissionControl:
|
|
||||||
- name: PodSecurity
|
|
||||||
configuration:
|
|
||||||
exemptions:
|
|
||||||
namespaces:
|
|
||||||
- longhorn-system
|
|
||||||
|
@ -1,10 +1,9 @@
|
|||||||
|
---
|
||||||
cluster:
|
cluster:
|
||||||
apiServer:
|
apiServer:
|
||||||
admissionControl:
|
admissionControl:
|
||||||
- name: PodSecurity
|
- name: PodSecurity
|
||||||
configuration:
|
configuration:
|
||||||
apiVersion: pod-security.admission.config.k8s.io/v1beta1
|
|
||||||
kind: PodSecurityConfiguration
|
|
||||||
exemptions:
|
exemptions:
|
||||||
namespaces:
|
namespaces:
|
||||||
- openebs
|
- openebs
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
---
|
||||||
machine:
|
machine:
|
||||||
sysctls:
|
sysctls:
|
||||||
vm.nr_hugepages: "1024"
|
vm.nr_hugepages: "1024"
|
||||||
|
@ -33,6 +33,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/yaml"
|
"k8s.io/apimachinery/pkg/util/yaml"
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
"k8s.io/client-go/discovery"
|
"k8s.io/client-go/discovery"
|
||||||
@ -588,7 +589,7 @@ func (k8sSuite *K8sSuite) WaitForResource(ctx context.Context, namespace, group,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !exists {
|
if !exists {
|
||||||
return true, fmt.Errorf("resource %s/%s/%s/%s not found", group, version, kind, resourceName)
|
return true, errors.NewNotFound(mapping.Resource.GroupResource(), resourceName)
|
||||||
}
|
}
|
||||||
|
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -657,15 +658,15 @@ func (k8sSuite *K8sSuite) GetUnstructuredResource(ctx context.Context, namespace
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RunFIOTest runs the FIO test with the given storage class and size using kubestr.
|
// RunFIOTest runs the FIO test with the given storage class and size using kubestr.
|
||||||
func (k8sSuite *K8sSuite) RunFIOTest(ctx context.Context, storageClasss, size string) error {
|
func (k8sSuite *K8sSuite) RunFIOTest(ctx context.Context, storageClass, size string) error {
|
||||||
args := []string{
|
args := []string{
|
||||||
"--outfile",
|
"--outfile",
|
||||||
fmt.Sprintf("/tmp/fio-%s.json", storageClasss),
|
fmt.Sprintf("/tmp/fio-%s.json", storageClass),
|
||||||
"--output",
|
"--output",
|
||||||
"json",
|
"json",
|
||||||
"fio",
|
"fio",
|
||||||
"--storageclass",
|
"--storageclass",
|
||||||
storageClasss,
|
storageClass,
|
||||||
"--size",
|
"--size",
|
||||||
size,
|
size,
|
||||||
}
|
}
|
||||||
@ -821,6 +822,24 @@ func (k8sSuite *K8sSuite) DeleteManifests(ctx context.Context, manifests []unstr
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PatchK8sObject patches the kubernetes object with the given namespace, group, kind, version and name.
|
||||||
|
func (k8sSuite *K8sSuite) PatchK8sObject(ctx context.Context, namespace, group, kind, version, resourceName string, patchBytes []byte) {
|
||||||
|
patchBytes, err := yaml.ToJSON(patchBytes)
|
||||||
|
k8sSuite.Require().NoError(err, "error converting patch to JSON")
|
||||||
|
|
||||||
|
mapping, err := k8sSuite.Mapper.RESTMapping(schema.GroupKind{
|
||||||
|
Group: group,
|
||||||
|
Kind: kind,
|
||||||
|
}, version)
|
||||||
|
|
||||||
|
k8sSuite.Require().NoError(err, "error creating mapping for resource %s/%s/%s", group, kind, version)
|
||||||
|
|
||||||
|
dr := k8sSuite.DynamicClient.Resource(mapping.Resource).Namespace(namespace)
|
||||||
|
|
||||||
|
_, err = dr.Patch(ctx, resourceName, types.MergePatchType, patchBytes, metav1.PatchOptions{})
|
||||||
|
k8sSuite.Require().NoError(err, "error patching resource %s/%s/%s/%s", group, version, kind, resourceName)
|
||||||
|
}
|
||||||
|
|
||||||
// ToUnstructured converts the given runtime.Object to unstructured.Unstructured.
|
// ToUnstructured converts the given runtime.Object to unstructured.Unstructured.
|
||||||
func (k8sSuite *K8sSuite) ToUnstructured(obj runtime.Object) unstructured.Unstructured {
|
func (k8sSuite *K8sSuite) ToUnstructured(obj runtime.Object) unstructured.Unstructured {
|
||||||
unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
|
unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
|
||||||
|
@ -13,7 +13,7 @@ const (
|
|||||||
RookCephHelmChartVersion = "v1.16.0"
|
RookCephHelmChartVersion = "v1.16.0"
|
||||||
// LongHornHelmChartVersion is the version of the Longhorn Helm chart to use.
|
// LongHornHelmChartVersion is the version of the Longhorn Helm chart to use.
|
||||||
// renovate: datasource=helm versioning=helm depName=longhorn registryUrl=https://charts.longhorn.io
|
// renovate: datasource=helm versioning=helm depName=longhorn registryUrl=https://charts.longhorn.io
|
||||||
LongHornHelmChartVersion = "v1.7.2"
|
LongHornHelmChartVersion = "v1.8.0"
|
||||||
// OpenEBSChartVersion is the version of the OpenEBS Helm chart to use.
|
// OpenEBSChartVersion is the version of the OpenEBS Helm chart to use.
|
||||||
// renovate: datasource=helm versioning=helm depName=openebs registryUrl=https://openebs.github.io/openebs
|
// renovate: datasource=helm versioning=helm depName=openebs registryUrl=https://openebs.github.io/openebs
|
||||||
OpenEBSChartVersion = "v4.1.2"
|
OpenEBSChartVersion = "v4.1.2"
|
||||||
|
@ -28,6 +28,15 @@ var (
|
|||||||
|
|
||||||
//go:embed testdata/pod-iscsi-volume.yaml
|
//go:embed testdata/pod-iscsi-volume.yaml
|
||||||
podWithISCSIVolumeTemplate []byte
|
podWithISCSIVolumeTemplate []byte
|
||||||
|
|
||||||
|
//go:embed testdata/longhorn-v2-engine-values.yaml
|
||||||
|
longhornEngineV2Values []byte
|
||||||
|
|
||||||
|
//go:embed testdata/longhorn-v2-storageclass.yaml
|
||||||
|
longhornV2StorageClassManifest []byte
|
||||||
|
|
||||||
|
//go:embed testdata/longhorn-v2-disk-patch.yaml
|
||||||
|
longhornNodeDiskPatch []byte
|
||||||
)
|
)
|
||||||
|
|
||||||
// LongHornSuite tests deploying Longhorn.
|
// LongHornSuite tests deploying Longhorn.
|
||||||
@ -41,8 +50,6 @@ func (suite *LongHornSuite) SuiteName() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TestDeploy tests deploying Longhorn and running a simple test.
|
// TestDeploy tests deploying Longhorn and running a simple test.
|
||||||
//
|
|
||||||
//nolint:gocyclo
|
|
||||||
func (suite *LongHornSuite) TestDeploy() {
|
func (suite *LongHornSuite) TestDeploy() {
|
||||||
suite.T().Parallel()
|
suite.T().Parallel()
|
||||||
|
|
||||||
@ -69,124 +76,157 @@ func (suite *LongHornSuite) TestDeploy() {
|
|||||||
LongHornHelmChartVersion,
|
LongHornHelmChartVersion,
|
||||||
"longhorn",
|
"longhorn",
|
||||||
"longhorn",
|
"longhorn",
|
||||||
nil,
|
longhornEngineV2Values,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
suite.T().Fatalf("failed to install Longhorn chart: %v", err)
|
suite.T().Fatalf("failed to install Longhorn chart: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
longhornV2StorageClassunstructured := suite.ParseManifests(longhornV2StorageClassManifest)
|
||||||
|
|
||||||
|
suite.T().Cleanup(func() {
|
||||||
|
suite.DeleteManifests(ctx, longhornV2StorageClassunstructured)
|
||||||
|
})
|
||||||
|
|
||||||
|
suite.ApplyManifests(ctx, longhornV2StorageClassunstructured)
|
||||||
|
|
||||||
|
nodes := suite.DiscoverNodeInternalIPsByType(ctx, machine.TypeWorker)
|
||||||
|
|
||||||
|
suite.Require().Equal(3, len(nodes), "expected 3 worker nodes")
|
||||||
|
|
||||||
|
for _, node := range nodes {
|
||||||
|
k8sNode, err := suite.GetK8sNodeByInternalIP(ctx, node)
|
||||||
|
suite.Require().NoError(err)
|
||||||
|
|
||||||
|
suite.Require().NoError(suite.WaitForResource(ctx, "longhorn-system", "longhorn.io", "Node", "v1beta2", k8sNode.Name, "{.status.diskStatus.*.conditions[?(@.type==\"Ready\")].status}", "True"))
|
||||||
|
suite.Require().NoError(suite.WaitForResource(ctx, "longhorn-system", "longhorn.io", "Node", "v1beta2", k8sNode.Name, "{.status.diskStatus.*.conditions[?(@.type==\"Schedulable\")].status}", "True"))
|
||||||
|
|
||||||
|
suite.PatchK8sObject(ctx, "longhorn-system", "longhorn.io", "Node", "v1beta2", k8sNode.Name, longhornNodeDiskPatch)
|
||||||
|
}
|
||||||
|
|
||||||
suite.T().Run("fio", func(t *testing.T) {
|
suite.T().Run("fio", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
suite.Require().NoError(suite.RunFIOTest(ctx, "longhorn", "10G"))
|
suite.Require().NoError(suite.RunFIOTest(ctx, "longhorn", "10G"))
|
||||||
})
|
})
|
||||||
|
|
||||||
|
suite.T().Run("fio-v2", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
suite.Require().NoError(suite.RunFIOTest(ctx, "longhorn-v2", "10G"))
|
||||||
|
})
|
||||||
|
|
||||||
suite.T().Run("iscsi", func(t *testing.T) {
|
suite.T().Run("iscsi", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
longHornISCSIVolumeManifestUnstructured := suite.ParseManifests(longHornISCSIVolumeManifest)
|
suite.testDeployISCSI(ctx)
|
||||||
|
|
||||||
defer func() {
|
|
||||||
cleanUpCtx, cleanupCancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
|
||||||
defer cleanupCancel()
|
|
||||||
|
|
||||||
suite.DeleteManifests(cleanUpCtx, longHornISCSIVolumeManifestUnstructured)
|
|
||||||
}()
|
|
||||||
|
|
||||||
suite.ApplyManifests(ctx, longHornISCSIVolumeManifestUnstructured)
|
|
||||||
|
|
||||||
tmpl, err := template.New("longhorn-iscsi-volumeattachment").Parse(string(longHornISCSIVolumeAttachmentManifestTemplate))
|
|
||||||
suite.Require().NoError(err)
|
|
||||||
|
|
||||||
var longHornISCSIVolumeAttachmentManifest bytes.Buffer
|
|
||||||
|
|
||||||
node := suite.RandomDiscoveredNodeInternalIP(machine.TypeWorker)
|
|
||||||
|
|
||||||
nodeInfo, err := suite.GetK8sNodeByInternalIP(ctx, node)
|
|
||||||
if err != nil {
|
|
||||||
suite.T().Fatalf("failed to get K8s node by internal IP: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tmpl.Execute(&longHornISCSIVolumeAttachmentManifest, struct {
|
|
||||||
NodeID string
|
|
||||||
}{
|
|
||||||
NodeID: nodeInfo.Name,
|
|
||||||
}); err != nil {
|
|
||||||
suite.T().Fatalf("failed to render Longhorn ISCSI volume manifest: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
longHornISCSIVolumeAttachmentManifestUnstructured := suite.ParseManifests(longHornISCSIVolumeAttachmentManifest.Bytes())
|
|
||||||
|
|
||||||
suite.ApplyManifests(ctx, longHornISCSIVolumeAttachmentManifestUnstructured)
|
|
||||||
|
|
||||||
if err := suite.WaitForResource(ctx, "longhorn-system", "longhorn.io", "Volume", "v1beta2", "iscsi", "{.status.robustness}", "healthy"); err != nil {
|
|
||||||
suite.T().Fatalf("failed to wait for LongHorn Engine to be Ready: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := suite.WaitForResource(ctx, "longhorn-system", "longhorn.io", "Volume", "v1beta2", "iscsi", "{.status.state}", "attached"); err != nil {
|
|
||||||
suite.T().Fatalf("failed to wait for LongHorn Engine to be Ready: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := suite.WaitForResource(ctx, "longhorn-system", "longhorn.io", "Engine", "v1beta2", "iscsi-e-0", "{.status.currentState}", "running"); err != nil {
|
|
||||||
suite.T().Fatalf("failed to wait for LongHorn Engine to be Ready: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
unstructured, err := suite.GetUnstructuredResource(ctx, "longhorn-system", "longhorn.io", "Engine", "v1beta2", "iscsi-e-0")
|
|
||||||
if err != nil {
|
|
||||||
suite.T().Fatalf("failed to get LongHorn Engine resource: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var endpointData string
|
|
||||||
|
|
||||||
if status, ok := unstructured.Object["status"].(map[string]interface{}); ok {
|
|
||||||
endpointData, ok = status["endpoint"].(string)
|
|
||||||
if !ok {
|
|
||||||
suite.T().Fatalf("failed to get LongHorn Engine endpoint")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tmpl, err = template.New("pod-iscsi-volume").Parse(string(podWithISCSIVolumeTemplate))
|
|
||||||
suite.Require().NoError(err)
|
|
||||||
|
|
||||||
// endpoint is of the form `iscsi://10.244.0.5:3260/iqn.2019-10.io.longhorn:iscsi/1`
|
|
||||||
// trim the iscsi:// prefix
|
|
||||||
endpointData = strings.TrimPrefix(endpointData, "iscsi://")
|
|
||||||
// trim the /1 suffix
|
|
||||||
endpointData = strings.TrimSuffix(endpointData, "/1")
|
|
||||||
|
|
||||||
targetPortal, IQN, ok := strings.Cut(endpointData, "/")
|
|
||||||
if !ok {
|
|
||||||
suite.T().Fatalf("failed to parse endpoint data from %s", endpointData)
|
|
||||||
}
|
|
||||||
|
|
||||||
var podWithISCSIVolume bytes.Buffer
|
|
||||||
|
|
||||||
if err := tmpl.Execute(&podWithISCSIVolume, struct {
|
|
||||||
NodeName string
|
|
||||||
TargetPortal string
|
|
||||||
IQN string
|
|
||||||
}{
|
|
||||||
NodeName: nodeInfo.Name,
|
|
||||||
TargetPortal: targetPortal,
|
|
||||||
IQN: IQN,
|
|
||||||
}); err != nil {
|
|
||||||
suite.T().Fatalf("failed to render pod with ISCSI volume manifest: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
podWithISCSIVolumeUnstructured := suite.ParseManifests(podWithISCSIVolume.Bytes())
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
cleanUpCtx, cleanupCancel := context.WithTimeout(context.Background(), time.Minute)
|
|
||||||
defer cleanupCancel()
|
|
||||||
|
|
||||||
suite.DeleteManifests(cleanUpCtx, podWithISCSIVolumeUnstructured)
|
|
||||||
}()
|
|
||||||
|
|
||||||
suite.ApplyManifests(ctx, podWithISCSIVolumeUnstructured)
|
|
||||||
|
|
||||||
suite.Require().NoError(suite.WaitForPodToBeRunning(ctx, 3*time.Minute, "default", "iscsipd"))
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//nolint:gocyclo
|
||||||
|
func (suite *LongHornSuite) testDeployISCSI(ctx context.Context) {
|
||||||
|
longHornISCSIVolumeManifestUnstructured := suite.ParseManifests(longHornISCSIVolumeManifest)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
cleanUpCtx, cleanupCancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||||
|
defer cleanupCancel()
|
||||||
|
|
||||||
|
suite.DeleteManifests(cleanUpCtx, longHornISCSIVolumeManifestUnstructured)
|
||||||
|
}()
|
||||||
|
|
||||||
|
suite.ApplyManifests(ctx, longHornISCSIVolumeManifestUnstructured)
|
||||||
|
|
||||||
|
tmpl, err := template.New("longhorn-iscsi-volumeattachment").Parse(string(longHornISCSIVolumeAttachmentManifestTemplate))
|
||||||
|
suite.Require().NoError(err)
|
||||||
|
|
||||||
|
var longHornISCSIVolumeAttachmentManifest bytes.Buffer
|
||||||
|
|
||||||
|
node := suite.RandomDiscoveredNodeInternalIP(machine.TypeWorker)
|
||||||
|
|
||||||
|
nodeInfo, err := suite.GetK8sNodeByInternalIP(ctx, node)
|
||||||
|
if err != nil {
|
||||||
|
suite.T().Fatalf("failed to get K8s node by internal IP: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := tmpl.Execute(&longHornISCSIVolumeAttachmentManifest, struct {
|
||||||
|
NodeID string
|
||||||
|
}{
|
||||||
|
NodeID: nodeInfo.Name,
|
||||||
|
}); err != nil {
|
||||||
|
suite.T().Fatalf("failed to render Longhorn ISCSI volume manifest: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
longHornISCSIVolumeAttachmentManifestUnstructured := suite.ParseManifests(longHornISCSIVolumeAttachmentManifest.Bytes())
|
||||||
|
|
||||||
|
suite.ApplyManifests(ctx, longHornISCSIVolumeAttachmentManifestUnstructured)
|
||||||
|
|
||||||
|
if err := suite.WaitForResource(ctx, "longhorn-system", "longhorn.io", "Volume", "v1beta2", "iscsi", "{.status.robustness}", "healthy"); err != nil {
|
||||||
|
suite.T().Fatalf("failed to wait for LongHorn Engine to be Ready: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := suite.WaitForResource(ctx, "longhorn-system", "longhorn.io", "Volume", "v1beta2", "iscsi", "{.status.state}", "attached"); err != nil {
|
||||||
|
suite.T().Fatalf("failed to wait for LongHorn Engine to be Ready: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := suite.WaitForResource(ctx, "longhorn-system", "longhorn.io", "Engine", "v1beta2", "iscsi-e-0", "{.status.currentState}", "running"); err != nil {
|
||||||
|
suite.T().Fatalf("failed to wait for LongHorn Engine to be Ready: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
unstructured, err := suite.GetUnstructuredResource(ctx, "longhorn-system", "longhorn.io", "Engine", "v1beta2", "iscsi-e-0")
|
||||||
|
if err != nil {
|
||||||
|
suite.T().Fatalf("failed to get LongHorn Engine resource: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var endpointData string
|
||||||
|
|
||||||
|
if status, ok := unstructured.Object["status"].(map[string]interface{}); ok {
|
||||||
|
endpointData, ok = status["endpoint"].(string)
|
||||||
|
if !ok {
|
||||||
|
suite.T().Fatalf("failed to get LongHorn Engine endpoint")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tmpl, err = template.New("pod-iscsi-volume").Parse(string(podWithISCSIVolumeTemplate))
|
||||||
|
suite.Require().NoError(err)
|
||||||
|
|
||||||
|
// endpoint is of the form `iscsi://10.244.0.5:3260/iqn.2019-10.io.longhorn:iscsi/1`
|
||||||
|
// trim the iscsi:// prefix
|
||||||
|
endpointData = strings.TrimPrefix(endpointData, "iscsi://")
|
||||||
|
// trim the /1 suffix
|
||||||
|
endpointData = strings.TrimSuffix(endpointData, "/1")
|
||||||
|
|
||||||
|
targetPortal, IQN, ok := strings.Cut(endpointData, "/")
|
||||||
|
if !ok {
|
||||||
|
suite.T().Fatalf("failed to parse endpoint data from %s", endpointData)
|
||||||
|
}
|
||||||
|
|
||||||
|
var podWithISCSIVolume bytes.Buffer
|
||||||
|
|
||||||
|
if err := tmpl.Execute(&podWithISCSIVolume, struct {
|
||||||
|
NodeName string
|
||||||
|
TargetPortal string
|
||||||
|
IQN string
|
||||||
|
}{
|
||||||
|
NodeName: nodeInfo.Name,
|
||||||
|
TargetPortal: targetPortal,
|
||||||
|
IQN: IQN,
|
||||||
|
}); err != nil {
|
||||||
|
suite.T().Fatalf("failed to render pod with ISCSI volume manifest: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
podWithISCSIVolumeUnstructured := suite.ParseManifests(podWithISCSIVolume.Bytes())
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
cleanUpCtx, cleanupCancel := context.WithTimeout(context.Background(), time.Minute)
|
||||||
|
defer cleanupCancel()
|
||||||
|
|
||||||
|
suite.DeleteManifests(cleanUpCtx, podWithISCSIVolumeUnstructured)
|
||||||
|
}()
|
||||||
|
|
||||||
|
suite.ApplyManifests(ctx, podWithISCSIVolumeUnstructured)
|
||||||
|
|
||||||
|
suite.Require().NoError(suite.WaitForPodToBeRunning(ctx, 3*time.Minute, "default", "iscsipd"))
|
||||||
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
allSuites = append(allSuites, new(LongHornSuite))
|
allSuites = append(allSuites, new(LongHornSuite))
|
||||||
}
|
}
|
||||||
|
10
internal/integration/k8s/testdata/longhorn-v2-disk-patch.yaml
vendored
Normal file
10
internal/integration/k8s/testdata/longhorn-v2-disk-patch.yaml
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
spec:
|
||||||
|
disks:
|
||||||
|
nvme:
|
||||||
|
allowScheduling: true
|
||||||
|
evictionRequested: false
|
||||||
|
path: /dev/nvme0n1
|
||||||
|
storageReserved: 0
|
||||||
|
tags: []
|
||||||
|
diskType: block
|
2
internal/integration/k8s/testdata/longhorn-v2-engine-values.yaml
vendored
Normal file
2
internal/integration/k8s/testdata/longhorn-v2-engine-values.yaml
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
defaultSettings:
|
||||||
|
v2DataEngine: true
|
13
internal/integration/k8s/testdata/longhorn-v2-storageclass.yaml
vendored
Normal file
13
internal/integration/k8s/testdata/longhorn-v2-storageclass.yaml
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
kind: StorageClass
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: longhorn-v2
|
||||||
|
provisioner: driver.longhorn.io
|
||||||
|
allowVolumeExpansion: true
|
||||||
|
reclaimPolicy: Delete
|
||||||
|
volumeBindingMode: Immediate
|
||||||
|
parameters:
|
||||||
|
numberOfReplicas: "3"
|
||||||
|
staleReplicaTimeout: "2880"
|
||||||
|
fsType: "ext4"
|
||||||
|
dataEngine: "v2"
|
Loading…
x
Reference in New Issue
Block a user