From 82c9ec158e82efea80daaf76fef9fbd31c3eb823 Mon Sep 17 00:00:00 2001 From: Noel Georgi Date: Wed, 22 Jan 2025 19:20:31 +0530 Subject: [PATCH] chore(ci): add tests with longhorn v2 engine Add tests with longhorn v2 engine. Fixes: #9669 Signed-off-by: Noel Georgi --- .github/workflows/ci.yaml | 14 +- .../integration-qemu-csi-longhorn-cron.yaml | 10 +- .../integration-qemu-csi-openebs-cron.yaml | 4 +- .../integration-qemu-csi-rook-ceph-cron.yaml | 4 +- .kres.yaml | 12 +- hack/test/e2e-qemu.sh | 12 + hack/test/patches/longhorn-cp.yaml | 9 + hack/test/patches/longhorn.yaml | 16 +- hack/test/patches/openebs-cp.yaml | 3 +- hack/test/patches/openebs.yaml | 1 + internal/integration/base/k8s.go | 27 +- internal/integration/k8s/constants.go | 2 +- internal/integration/k8s/longhorn.go | 248 ++++++++++-------- .../k8s/testdata/longhorn-v2-disk-patch.yaml | 10 + .../testdata/longhorn-v2-engine-values.yaml | 2 + .../testdata/longhorn-v2-storageclass.yaml | 13 + 16 files changed, 252 insertions(+), 135 deletions(-) create mode 100644 hack/test/patches/longhorn-cp.yaml create mode 100644 internal/integration/k8s/testdata/longhorn-v2-disk-patch.yaml create mode 100644 internal/integration/k8s/testdata/longhorn-v2-engine-values.yaml create mode 100644 internal/integration/k8s/testdata/longhorn-v2-storageclass.yaml diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 9d63b81dc..a81677fce 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -1,6 +1,6 @@ # THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT. # -# Generated on 2025-01-22T14:28:18Z by kres 3075de9. +# Generated on 2025-01-22T17:37:55Z by kres 3075de9. name: default concurrency: @@ -2957,10 +2957,14 @@ jobs: EXTRA_TEST_ARGS: -talos.csi=longhorn GITHUB_STEP_NAME: ${{ github.job}}-e2e-qemu-csi-longhorn IMAGE_REGISTRY: registry.dev.siderolabs.io - QEMU_MEMORY_WORKERS: "3072" + QEMU_EXTRA_DISKS: "1" + QEMU_EXTRA_DISKS_DRIVERS: nvme + QEMU_EXTRA_DISKS_SIZE: "12288" + QEMU_MEMORY_WORKERS: "4096" QEMU_WORKERS: "3" SHORT_INTEGRATION_TEST: "yes" - WITH_CONFIG_PATCH: '@_out/installer-extensions-patch.yaml:@_out/kubelet-fat-patch.yaml:@hack/test/patches/longhorn.yaml' + WITH_CONFIG_PATCH_CONTROLPLANE: '@hack/test/patches/longhorn-cp.yaml' + WITH_CONFIG_PATCH_WORKER: '@_out/installer-extensions-patch.yaml:@_out/kubelet-fat-patch.yaml:@hack/test/patches/longhorn.yaml' run: | sudo -E make e2e-qemu - name: save artifacts @@ -3067,7 +3071,7 @@ jobs: QEMU_MEMORY_WORKERS: "4096" QEMU_WORKERS: "3" SHORT_INTEGRATION_TEST: "yes" - WITH_CONFIG_PATCH: '@hack/test/patches/openebs-cp.yaml' + WITH_CONFIG_PATCH_CONTROLPLANE: '@hack/test/patches/openebs-cp.yaml' WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/openebs.yaml' run: | sudo -E make e2e-qemu @@ -3175,7 +3179,7 @@ jobs: QEMU_MEMORY_WORKERS: "8192" QEMU_WORKERS: "3" SHORT_INTEGRATION_TEST: "yes" - WITH_CONFIG_PATCH: '@hack/test/patches/rook-ceph.yaml' + WITH_CONFIG_PATCH_CONTROLPLANE: '@hack/test/patches/rook-ceph.yaml' run: | sudo -E make e2e-qemu - name: save artifacts diff --git a/.github/workflows/integration-qemu-csi-longhorn-cron.yaml b/.github/workflows/integration-qemu-csi-longhorn-cron.yaml index 1078a02d4..836632859 100644 --- a/.github/workflows/integration-qemu-csi-longhorn-cron.yaml +++ b/.github/workflows/integration-qemu-csi-longhorn-cron.yaml @@ -1,6 +1,6 @@ # THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT. # -# Generated on 2025-01-21T10:29:14Z by kres 3075de9. +# Generated on 2025-01-22T17:37:55Z by kres 3075de9. name: integration-qemu-csi-longhorn-cron concurrency: @@ -112,10 +112,14 @@ jobs: EXTRA_TEST_ARGS: -talos.csi=longhorn GITHUB_STEP_NAME: ${{ github.job}}-e2e-qemu-csi-longhorn IMAGE_REGISTRY: registry.dev.siderolabs.io - QEMU_MEMORY_WORKERS: "3072" + QEMU_EXTRA_DISKS: "1" + QEMU_EXTRA_DISKS_DRIVERS: nvme + QEMU_EXTRA_DISKS_SIZE: "12288" + QEMU_MEMORY_WORKERS: "4096" QEMU_WORKERS: "3" SHORT_INTEGRATION_TEST: "yes" - WITH_CONFIG_PATCH: '@_out/installer-extensions-patch.yaml:@_out/kubelet-fat-patch.yaml:@hack/test/patches/longhorn.yaml' + WITH_CONFIG_PATCH_CONTROLPLANE: '@hack/test/patches/longhorn-cp.yaml' + WITH_CONFIG_PATCH_WORKER: '@_out/installer-extensions-patch.yaml:@_out/kubelet-fat-patch.yaml:@hack/test/patches/longhorn.yaml' run: | sudo -E make e2e-qemu - name: save artifacts diff --git a/.github/workflows/integration-qemu-csi-openebs-cron.yaml b/.github/workflows/integration-qemu-csi-openebs-cron.yaml index 338209cb7..dc8d4a47d 100644 --- a/.github/workflows/integration-qemu-csi-openebs-cron.yaml +++ b/.github/workflows/integration-qemu-csi-openebs-cron.yaml @@ -1,6 +1,6 @@ # THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT. # -# Generated on 2025-01-21T10:29:14Z by kres 3075de9. +# Generated on 2025-01-22T14:23:24Z by kres 3075de9. name: integration-qemu-csi-openebs-cron concurrency: @@ -89,7 +89,7 @@ jobs: QEMU_MEMORY_WORKERS: "4096" QEMU_WORKERS: "3" SHORT_INTEGRATION_TEST: "yes" - WITH_CONFIG_PATCH: '@hack/test/patches/openebs-cp.yaml' + WITH_CONFIG_PATCH_CONTROLPLANE: '@hack/test/patches/openebs-cp.yaml' WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/openebs.yaml' run: | sudo -E make e2e-qemu diff --git a/.github/workflows/integration-qemu-csi-rook-ceph-cron.yaml b/.github/workflows/integration-qemu-csi-rook-ceph-cron.yaml index fcee018cf..3b3d8c86b 100644 --- a/.github/workflows/integration-qemu-csi-rook-ceph-cron.yaml +++ b/.github/workflows/integration-qemu-csi-rook-ceph-cron.yaml @@ -1,6 +1,6 @@ # THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT. # -# Generated on 2025-01-21T10:29:14Z by kres 3075de9. +# Generated on 2025-01-22T14:23:24Z by kres 3075de9. name: integration-qemu-csi-rook-ceph-cron concurrency: @@ -89,7 +89,7 @@ jobs: QEMU_MEMORY_WORKERS: "8192" QEMU_WORKERS: "3" SHORT_INTEGRATION_TEST: "yes" - WITH_CONFIG_PATCH: '@hack/test/patches/rook-ceph.yaml' + WITH_CONFIG_PATCH_CONTROLPLANE: '@hack/test/patches/rook-ceph.yaml' run: | sudo -E make e2e-qemu - name: save artifacts diff --git a/.kres.yaml b/.kres.yaml index 99ecc0faf..94116f41e 100644 --- a/.kres.yaml +++ b/.kres.yaml @@ -1352,7 +1352,7 @@ spec: QEMU_MEMORY_WORKERS: 8192 QEMU_EXTRA_DISKS: 1 QEMU_EXTRA_DISKS_SIZE: 12288 - WITH_CONFIG_PATCH: "@hack/test/patches/rook-ceph.yaml" + WITH_CONFIG_PATCH_CONTROLPLANE: "@hack/test/patches/rook-ceph.yaml" EXTRA_TEST_ARGS: -talos.csi=rook-ceph IMAGE_REGISTRY: registry.dev.siderolabs.io - name: save-fio-benchmark @@ -1441,8 +1441,12 @@ spec: GITHUB_STEP_NAME: ${{ github.job}}-e2e-qemu-csi-longhorn SHORT_INTEGRATION_TEST: yes QEMU_WORKERS: 3 - QEMU_MEMORY_WORKERS: 3072 - WITH_CONFIG_PATCH: "@_out/installer-extensions-patch.yaml:@_out/kubelet-fat-patch.yaml:@hack/test/patches/longhorn.yaml" + QEMU_MEMORY_WORKERS: 4096 + QEMU_EXTRA_DISKS: 1 + QEMU_EXTRA_DISKS_SIZE: 12288 + QEMU_EXTRA_DISKS_DRIVERS: nvme + WITH_CONFIG_PATCH_CONTROLPLANE: "@hack/test/patches/longhorn-cp.yaml" + WITH_CONFIG_PATCH_WORKER: "@_out/installer-extensions-patch.yaml:@_out/kubelet-fat-patch.yaml:@hack/test/patches/longhorn.yaml" EXTRA_TEST_ARGS: -talos.csi=longhorn IMAGE_REGISTRY: registry.dev.siderolabs.io - name: save-fio-benchmark @@ -1508,7 +1512,7 @@ spec: QEMU_MEMORY_WORKERS: 4096 QEMU_EXTRA_DISKS: 1 QEMU_EXTRA_DISKS_SIZE: 12288 - WITH_CONFIG_PATCH: "@hack/test/patches/openebs-cp.yaml" + WITH_CONFIG_PATCH_CONTROLPLANE: "@hack/test/patches/openebs-cp.yaml" WITH_CONFIG_PATCH_WORKER: "@hack/test/patches/openebs.yaml" EXTRA_TEST_ARGS: -talos.csi=openebs IMAGE_REGISTRY: registry.dev.siderolabs.io diff --git a/hack/test/e2e-qemu.sh b/hack/test/e2e-qemu.sh index 2a73c09bc..3b52dec94 100755 --- a/hack/test/e2e-qemu.sh +++ b/hack/test/e2e-qemu.sh @@ -139,6 +139,18 @@ case "${WITH_ISO:-false}" in ;; esac +case "${WITH_CONFIG_PATCH_CONTROLPLANE:-false}" in + false) + ;; + *) + [[ ! ${WITH_CONFIG_PATCH_CONTROLPLANE} =~ ^@ ]] && echo "WITH_CONFIG_PATCH_CONTROLPLANE variable should start with @" && exit 1 + + for i in ${WITH_CONFIG_PATCH_CONTROLPLANE//:/ }; do + QEMU_FLAGS+=("--config-patch-control-plane=${i}") + done + ;; +esac + case "${WITH_CONFIG_PATCH_WORKER:-false}" in false) ;; diff --git a/hack/test/patches/longhorn-cp.yaml b/hack/test/patches/longhorn-cp.yaml new file mode 100644 index 000000000..2416aa0f5 --- /dev/null +++ b/hack/test/patches/longhorn-cp.yaml @@ -0,0 +1,9 @@ +--- +cluster: + apiServer: + admissionControl: + - name: PodSecurity + configuration: + exemptions: + namespaces: + - longhorn-system diff --git a/hack/test/patches/longhorn.yaml b/hack/test/patches/longhorn.yaml index 9e2a89149..6640ba6d1 100644 --- a/hack/test/patches/longhorn.yaml +++ b/hack/test/patches/longhorn.yaml @@ -1,4 +1,12 @@ +--- machine: + sysctls: + vm.nr_hugepages: "1024" + kernel: + modules: + - name: nvme_tcp + - name: vfio_pci + - name: uio_pci_generic kubelet: extraMounts: - destination: /var/lib/longhorn @@ -8,11 +16,3 @@ machine: - bind - rshared - rw -cluster: - apiServer: - admissionControl: - - name: PodSecurity - configuration: - exemptions: - namespaces: - - longhorn-system diff --git a/hack/test/patches/openebs-cp.yaml b/hack/test/patches/openebs-cp.yaml index 34bf1f8ac..eed1286c6 100644 --- a/hack/test/patches/openebs-cp.yaml +++ b/hack/test/patches/openebs-cp.yaml @@ -1,10 +1,9 @@ +--- cluster: apiServer: admissionControl: - name: PodSecurity configuration: - apiVersion: pod-security.admission.config.k8s.io/v1beta1 - kind: PodSecurityConfiguration exemptions: namespaces: - openebs diff --git a/hack/test/patches/openebs.yaml b/hack/test/patches/openebs.yaml index 7a45f7445..339d25fdb 100644 --- a/hack/test/patches/openebs.yaml +++ b/hack/test/patches/openebs.yaml @@ -1,3 +1,4 @@ +--- machine: sysctls: vm.nr_hugepages: "1024" diff --git a/internal/integration/base/k8s.go b/internal/integration/base/k8s.go index eaaccb4ae..f557b0759 100644 --- a/internal/integration/base/k8s.go +++ b/internal/integration/base/k8s.go @@ -33,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" @@ -588,7 +589,7 @@ func (k8sSuite *K8sSuite) WaitForResource(ctx context.Context, namespace, group, } if !exists { - return true, fmt.Errorf("resource %s/%s/%s/%s not found", group, version, kind, resourceName) + return true, errors.NewNotFound(mapping.Resource.GroupResource(), resourceName) } return false, nil @@ -657,15 +658,15 @@ func (k8sSuite *K8sSuite) GetUnstructuredResource(ctx context.Context, namespace } // RunFIOTest runs the FIO test with the given storage class and size using kubestr. -func (k8sSuite *K8sSuite) RunFIOTest(ctx context.Context, storageClasss, size string) error { +func (k8sSuite *K8sSuite) RunFIOTest(ctx context.Context, storageClass, size string) error { args := []string{ "--outfile", - fmt.Sprintf("/tmp/fio-%s.json", storageClasss), + fmt.Sprintf("/tmp/fio-%s.json", storageClass), "--output", "json", "fio", "--storageclass", - storageClasss, + storageClass, "--size", size, } @@ -821,6 +822,24 @@ func (k8sSuite *K8sSuite) DeleteManifests(ctx context.Context, manifests []unstr } } +// PatchK8sObject patches the kubernetes object with the given namespace, group, kind, version and name. +func (k8sSuite *K8sSuite) PatchK8sObject(ctx context.Context, namespace, group, kind, version, resourceName string, patchBytes []byte) { + patchBytes, err := yaml.ToJSON(patchBytes) + k8sSuite.Require().NoError(err, "error converting patch to JSON") + + mapping, err := k8sSuite.Mapper.RESTMapping(schema.GroupKind{ + Group: group, + Kind: kind, + }, version) + + k8sSuite.Require().NoError(err, "error creating mapping for resource %s/%s/%s", group, kind, version) + + dr := k8sSuite.DynamicClient.Resource(mapping.Resource).Namespace(namespace) + + _, err = dr.Patch(ctx, resourceName, types.MergePatchType, patchBytes, metav1.PatchOptions{}) + k8sSuite.Require().NoError(err, "error patching resource %s/%s/%s/%s", group, version, kind, resourceName) +} + // ToUnstructured converts the given runtime.Object to unstructured.Unstructured. func (k8sSuite *K8sSuite) ToUnstructured(obj runtime.Object) unstructured.Unstructured { unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) diff --git a/internal/integration/k8s/constants.go b/internal/integration/k8s/constants.go index b335cd328..8fd0fb798 100644 --- a/internal/integration/k8s/constants.go +++ b/internal/integration/k8s/constants.go @@ -13,7 +13,7 @@ const ( RookCephHelmChartVersion = "v1.16.0" // LongHornHelmChartVersion is the version of the Longhorn Helm chart to use. // renovate: datasource=helm versioning=helm depName=longhorn registryUrl=https://charts.longhorn.io - LongHornHelmChartVersion = "v1.7.2" + LongHornHelmChartVersion = "v1.8.0" // OpenEBSChartVersion is the version of the OpenEBS Helm chart to use. // renovate: datasource=helm versioning=helm depName=openebs registryUrl=https://openebs.github.io/openebs OpenEBSChartVersion = "v4.1.2" diff --git a/internal/integration/k8s/longhorn.go b/internal/integration/k8s/longhorn.go index fd41d4691..6b125f385 100644 --- a/internal/integration/k8s/longhorn.go +++ b/internal/integration/k8s/longhorn.go @@ -28,6 +28,15 @@ var ( //go:embed testdata/pod-iscsi-volume.yaml podWithISCSIVolumeTemplate []byte + + //go:embed testdata/longhorn-v2-engine-values.yaml + longhornEngineV2Values []byte + + //go:embed testdata/longhorn-v2-storageclass.yaml + longhornV2StorageClassManifest []byte + + //go:embed testdata/longhorn-v2-disk-patch.yaml + longhornNodeDiskPatch []byte ) // LongHornSuite tests deploying Longhorn. @@ -41,8 +50,6 @@ func (suite *LongHornSuite) SuiteName() string { } // TestDeploy tests deploying Longhorn and running a simple test. -// -//nolint:gocyclo func (suite *LongHornSuite) TestDeploy() { suite.T().Parallel() @@ -69,124 +76,157 @@ func (suite *LongHornSuite) TestDeploy() { LongHornHelmChartVersion, "longhorn", "longhorn", - nil, + longhornEngineV2Values, ); err != nil { suite.T().Fatalf("failed to install Longhorn chart: %v", err) } + longhornV2StorageClassunstructured := suite.ParseManifests(longhornV2StorageClassManifest) + + suite.T().Cleanup(func() { + suite.DeleteManifests(ctx, longhornV2StorageClassunstructured) + }) + + suite.ApplyManifests(ctx, longhornV2StorageClassunstructured) + + nodes := suite.DiscoverNodeInternalIPsByType(ctx, machine.TypeWorker) + + suite.Require().Equal(3, len(nodes), "expected 3 worker nodes") + + for _, node := range nodes { + k8sNode, err := suite.GetK8sNodeByInternalIP(ctx, node) + suite.Require().NoError(err) + + suite.Require().NoError(suite.WaitForResource(ctx, "longhorn-system", "longhorn.io", "Node", "v1beta2", k8sNode.Name, "{.status.diskStatus.*.conditions[?(@.type==\"Ready\")].status}", "True")) + suite.Require().NoError(suite.WaitForResource(ctx, "longhorn-system", "longhorn.io", "Node", "v1beta2", k8sNode.Name, "{.status.diskStatus.*.conditions[?(@.type==\"Schedulable\")].status}", "True")) + + suite.PatchK8sObject(ctx, "longhorn-system", "longhorn.io", "Node", "v1beta2", k8sNode.Name, longhornNodeDiskPatch) + } + suite.T().Run("fio", func(t *testing.T) { t.Parallel() suite.Require().NoError(suite.RunFIOTest(ctx, "longhorn", "10G")) }) + suite.T().Run("fio-v2", func(t *testing.T) { + t.Parallel() + + suite.Require().NoError(suite.RunFIOTest(ctx, "longhorn-v2", "10G")) + }) + suite.T().Run("iscsi", func(t *testing.T) { t.Parallel() - longHornISCSIVolumeManifestUnstructured := suite.ParseManifests(longHornISCSIVolumeManifest) - - defer func() { - cleanUpCtx, cleanupCancel := context.WithTimeout(context.Background(), 2*time.Minute) - defer cleanupCancel() - - suite.DeleteManifests(cleanUpCtx, longHornISCSIVolumeManifestUnstructured) - }() - - suite.ApplyManifests(ctx, longHornISCSIVolumeManifestUnstructured) - - tmpl, err := template.New("longhorn-iscsi-volumeattachment").Parse(string(longHornISCSIVolumeAttachmentManifestTemplate)) - suite.Require().NoError(err) - - var longHornISCSIVolumeAttachmentManifest bytes.Buffer - - node := suite.RandomDiscoveredNodeInternalIP(machine.TypeWorker) - - nodeInfo, err := suite.GetK8sNodeByInternalIP(ctx, node) - if err != nil { - suite.T().Fatalf("failed to get K8s node by internal IP: %v", err) - } - - if err := tmpl.Execute(&longHornISCSIVolumeAttachmentManifest, struct { - NodeID string - }{ - NodeID: nodeInfo.Name, - }); err != nil { - suite.T().Fatalf("failed to render Longhorn ISCSI volume manifest: %v", err) - } - - longHornISCSIVolumeAttachmentManifestUnstructured := suite.ParseManifests(longHornISCSIVolumeAttachmentManifest.Bytes()) - - suite.ApplyManifests(ctx, longHornISCSIVolumeAttachmentManifestUnstructured) - - if err := suite.WaitForResource(ctx, "longhorn-system", "longhorn.io", "Volume", "v1beta2", "iscsi", "{.status.robustness}", "healthy"); err != nil { - suite.T().Fatalf("failed to wait for LongHorn Engine to be Ready: %v", err) - } - - if err := suite.WaitForResource(ctx, "longhorn-system", "longhorn.io", "Volume", "v1beta2", "iscsi", "{.status.state}", "attached"); err != nil { - suite.T().Fatalf("failed to wait for LongHorn Engine to be Ready: %v", err) - } - - if err := suite.WaitForResource(ctx, "longhorn-system", "longhorn.io", "Engine", "v1beta2", "iscsi-e-0", "{.status.currentState}", "running"); err != nil { - suite.T().Fatalf("failed to wait for LongHorn Engine to be Ready: %v", err) - } - - unstructured, err := suite.GetUnstructuredResource(ctx, "longhorn-system", "longhorn.io", "Engine", "v1beta2", "iscsi-e-0") - if err != nil { - suite.T().Fatalf("failed to get LongHorn Engine resource: %v", err) - } - - var endpointData string - - if status, ok := unstructured.Object["status"].(map[string]interface{}); ok { - endpointData, ok = status["endpoint"].(string) - if !ok { - suite.T().Fatalf("failed to get LongHorn Engine endpoint") - } - } - - tmpl, err = template.New("pod-iscsi-volume").Parse(string(podWithISCSIVolumeTemplate)) - suite.Require().NoError(err) - - // endpoint is of the form `iscsi://10.244.0.5:3260/iqn.2019-10.io.longhorn:iscsi/1` - // trim the iscsi:// prefix - endpointData = strings.TrimPrefix(endpointData, "iscsi://") - // trim the /1 suffix - endpointData = strings.TrimSuffix(endpointData, "/1") - - targetPortal, IQN, ok := strings.Cut(endpointData, "/") - if !ok { - suite.T().Fatalf("failed to parse endpoint data from %s", endpointData) - } - - var podWithISCSIVolume bytes.Buffer - - if err := tmpl.Execute(&podWithISCSIVolume, struct { - NodeName string - TargetPortal string - IQN string - }{ - NodeName: nodeInfo.Name, - TargetPortal: targetPortal, - IQN: IQN, - }); err != nil { - suite.T().Fatalf("failed to render pod with ISCSI volume manifest: %v", err) - } - - podWithISCSIVolumeUnstructured := suite.ParseManifests(podWithISCSIVolume.Bytes()) - - defer func() { - cleanUpCtx, cleanupCancel := context.WithTimeout(context.Background(), time.Minute) - defer cleanupCancel() - - suite.DeleteManifests(cleanUpCtx, podWithISCSIVolumeUnstructured) - }() - - suite.ApplyManifests(ctx, podWithISCSIVolumeUnstructured) - - suite.Require().NoError(suite.WaitForPodToBeRunning(ctx, 3*time.Minute, "default", "iscsipd")) + suite.testDeployISCSI(ctx) }) } +//nolint:gocyclo +func (suite *LongHornSuite) testDeployISCSI(ctx context.Context) { + longHornISCSIVolumeManifestUnstructured := suite.ParseManifests(longHornISCSIVolumeManifest) + + defer func() { + cleanUpCtx, cleanupCancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cleanupCancel() + + suite.DeleteManifests(cleanUpCtx, longHornISCSIVolumeManifestUnstructured) + }() + + suite.ApplyManifests(ctx, longHornISCSIVolumeManifestUnstructured) + + tmpl, err := template.New("longhorn-iscsi-volumeattachment").Parse(string(longHornISCSIVolumeAttachmentManifestTemplate)) + suite.Require().NoError(err) + + var longHornISCSIVolumeAttachmentManifest bytes.Buffer + + node := suite.RandomDiscoveredNodeInternalIP(machine.TypeWorker) + + nodeInfo, err := suite.GetK8sNodeByInternalIP(ctx, node) + if err != nil { + suite.T().Fatalf("failed to get K8s node by internal IP: %v", err) + } + + if err := tmpl.Execute(&longHornISCSIVolumeAttachmentManifest, struct { + NodeID string + }{ + NodeID: nodeInfo.Name, + }); err != nil { + suite.T().Fatalf("failed to render Longhorn ISCSI volume manifest: %v", err) + } + + longHornISCSIVolumeAttachmentManifestUnstructured := suite.ParseManifests(longHornISCSIVolumeAttachmentManifest.Bytes()) + + suite.ApplyManifests(ctx, longHornISCSIVolumeAttachmentManifestUnstructured) + + if err := suite.WaitForResource(ctx, "longhorn-system", "longhorn.io", "Volume", "v1beta2", "iscsi", "{.status.robustness}", "healthy"); err != nil { + suite.T().Fatalf("failed to wait for LongHorn Engine to be Ready: %v", err) + } + + if err := suite.WaitForResource(ctx, "longhorn-system", "longhorn.io", "Volume", "v1beta2", "iscsi", "{.status.state}", "attached"); err != nil { + suite.T().Fatalf("failed to wait for LongHorn Engine to be Ready: %v", err) + } + + if err := suite.WaitForResource(ctx, "longhorn-system", "longhorn.io", "Engine", "v1beta2", "iscsi-e-0", "{.status.currentState}", "running"); err != nil { + suite.T().Fatalf("failed to wait for LongHorn Engine to be Ready: %v", err) + } + + unstructured, err := suite.GetUnstructuredResource(ctx, "longhorn-system", "longhorn.io", "Engine", "v1beta2", "iscsi-e-0") + if err != nil { + suite.T().Fatalf("failed to get LongHorn Engine resource: %v", err) + } + + var endpointData string + + if status, ok := unstructured.Object["status"].(map[string]interface{}); ok { + endpointData, ok = status["endpoint"].(string) + if !ok { + suite.T().Fatalf("failed to get LongHorn Engine endpoint") + } + } + + tmpl, err = template.New("pod-iscsi-volume").Parse(string(podWithISCSIVolumeTemplate)) + suite.Require().NoError(err) + + // endpoint is of the form `iscsi://10.244.0.5:3260/iqn.2019-10.io.longhorn:iscsi/1` + // trim the iscsi:// prefix + endpointData = strings.TrimPrefix(endpointData, "iscsi://") + // trim the /1 suffix + endpointData = strings.TrimSuffix(endpointData, "/1") + + targetPortal, IQN, ok := strings.Cut(endpointData, "/") + if !ok { + suite.T().Fatalf("failed to parse endpoint data from %s", endpointData) + } + + var podWithISCSIVolume bytes.Buffer + + if err := tmpl.Execute(&podWithISCSIVolume, struct { + NodeName string + TargetPortal string + IQN string + }{ + NodeName: nodeInfo.Name, + TargetPortal: targetPortal, + IQN: IQN, + }); err != nil { + suite.T().Fatalf("failed to render pod with ISCSI volume manifest: %v", err) + } + + podWithISCSIVolumeUnstructured := suite.ParseManifests(podWithISCSIVolume.Bytes()) + + defer func() { + cleanUpCtx, cleanupCancel := context.WithTimeout(context.Background(), time.Minute) + defer cleanupCancel() + + suite.DeleteManifests(cleanUpCtx, podWithISCSIVolumeUnstructured) + }() + + suite.ApplyManifests(ctx, podWithISCSIVolumeUnstructured) + + suite.Require().NoError(suite.WaitForPodToBeRunning(ctx, 3*time.Minute, "default", "iscsipd")) +} + func init() { allSuites = append(allSuites, new(LongHornSuite)) } diff --git a/internal/integration/k8s/testdata/longhorn-v2-disk-patch.yaml b/internal/integration/k8s/testdata/longhorn-v2-disk-patch.yaml new file mode 100644 index 000000000..9692fee9d --- /dev/null +++ b/internal/integration/k8s/testdata/longhorn-v2-disk-patch.yaml @@ -0,0 +1,10 @@ +--- +spec: + disks: + nvme: + allowScheduling: true + evictionRequested: false + path: /dev/nvme0n1 + storageReserved: 0 + tags: [] + diskType: block diff --git a/internal/integration/k8s/testdata/longhorn-v2-engine-values.yaml b/internal/integration/k8s/testdata/longhorn-v2-engine-values.yaml new file mode 100644 index 000000000..9393755b4 --- /dev/null +++ b/internal/integration/k8s/testdata/longhorn-v2-engine-values.yaml @@ -0,0 +1,2 @@ +defaultSettings: + v2DataEngine: true diff --git a/internal/integration/k8s/testdata/longhorn-v2-storageclass.yaml b/internal/integration/k8s/testdata/longhorn-v2-storageclass.yaml new file mode 100644 index 000000000..bb890cf1b --- /dev/null +++ b/internal/integration/k8s/testdata/longhorn-v2-storageclass.yaml @@ -0,0 +1,13 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: longhorn-v2 +provisioner: driver.longhorn.io +allowVolumeExpansion: true +reclaimPolicy: Delete +volumeBindingMode: Immediate +parameters: + numberOfReplicas: "3" + staleReplicaTimeout: "2880" + fsType: "ext4" + dataEngine: "v2"