chore: better lvm2 tests

Use LVM2 tests that relies on module loading by lvm.

Fixes: #9300

Signed-off-by: Noel Georgi <git@frezbo.dev>
This commit is contained in:
Noel Georgi 2024-09-20 22:03:55 +05:30 committed by Andrey Smirnov
parent 908fd8789c
commit dec653bfe1
No known key found for this signature in database
GPG Key ID: FE042E3D4085A811
13 changed files with 58 additions and 30 deletions

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2024-09-20T00:40:37Z by kres 8be5fa7.
# Generated on 2024-09-23T13:32:15Z by kres 8be5fa7.
name: default
concurrency:
@ -1464,7 +1464,7 @@ jobs:
QEMU_EXTRA_DISKS: "3"
QEMU_MEMORY_WORKERS: "4096"
SHORT_INTEGRATION_TEST: "yes"
WITH_CONFIG_PATCH_WORKER: '@_out/installer-extensions-patch.yaml:@hack/test/patches/extensions.yaml'
WITH_CONFIG_PATCH_WORKER: '@_out/installer-extensions-patch.yaml:@hack/test/patches/extensions.yaml:@hack/test/patches/dm-raid-module.yaml'
run: |
sudo -E make e2e-qemu
- name: save artifacts
@ -2774,7 +2774,7 @@ jobs:
QEMU_EXTRA_DISKS: "3"
QEMU_EXTRA_DISKS_DRIVERS: ide,nvme
QEMU_EXTRA_DISKS_SIZE: "10240"
WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/ephemeral-nvme.yaml'
WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/ephemeral-nvme.yaml:@hack/test/patches/dm-raid-module.yaml'
run: |
sudo -E make e2e-qemu
- name: save artifacts
@ -2988,10 +2988,10 @@ jobs:
env:
EXTRA_TEST_ARGS: -talos.csi=rook-ceph
IMAGE_REGISTRY: registry.dev.siderolabs.io
QEMU_CPUS_WORKERS: "4"
QEMU_CPUS_WORKERS: "6"
QEMU_EXTRA_DISKS: "1"
QEMU_EXTRA_DISKS_SIZE: "12288"
QEMU_MEMORY_WORKERS: "5120"
QEMU_MEMORY_WORKERS: "8192"
QEMU_WORKERS: "3"
SHORT_INTEGRATION_TEST: "yes"
WITH_CONFIG_PATCH: '@hack/test/patches/rook-ceph.yaml'
@ -3086,10 +3086,10 @@ jobs:
- name: e2e-qemu
env:
IMAGE_REGISTRY: registry.dev.siderolabs.io
QEMU_EXTRA_DISKS: "2"
QEMU_EXTRA_DISKS: "3"
QEMU_EXTRA_DISKS_DRIVERS: ide,nvme
QEMU_EXTRA_DISKS_SIZE: "10240"
WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/ephemeral-nvme.yaml'
WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/ephemeral-nvme.yaml:@hack/test/patches/dm-raid-module.yaml'
WITH_DISK_ENCRYPTION: "true"
WITH_KUBESPAN: "true"
WITH_VIRTUAL_IP: "true"
@ -3193,7 +3193,11 @@ jobs:
- name: e2e-qemu-race
env:
IMAGE_REGISTRY: registry.dev.siderolabs.io
QEMU_EXTRA_DISKS: "3"
QEMU_EXTRA_DISKS_DRIVERS: ide,nvme
QEMU_EXTRA_DISKS_SIZE: "10240"
TAG_SUFFIX: -race
WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/ephemeral-nvme.yaml:@hack/test/patches/dm-raid-module.yaml'
run: |
sudo -E make e2e-qemu
- name: save artifacts

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2024-09-20T00:40:37Z by kres 8be5fa7.
# Generated on 2024-09-20T17:49:19Z by kres 8be5fa7.
name: integration-extensions-cron
concurrency:
@ -113,7 +113,7 @@ jobs:
QEMU_EXTRA_DISKS: "3"
QEMU_MEMORY_WORKERS: "4096"
SHORT_INTEGRATION_TEST: "yes"
WITH_CONFIG_PATCH_WORKER: '@_out/installer-extensions-patch.yaml:@hack/test/patches/extensions.yaml'
WITH_CONFIG_PATCH_WORKER: '@_out/installer-extensions-patch.yaml:@hack/test/patches/extensions.yaml:@hack/test/patches/dm-raid-module.yaml'
run: |
sudo -E make e2e-qemu
- name: save artifacts

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2024-09-18T10:36:36Z by kres 8be5fa7.
# Generated on 2024-09-20T16:31:33Z by kres 8be5fa7.
name: integration-qemu-cron
concurrency:
@ -84,7 +84,7 @@ jobs:
QEMU_EXTRA_DISKS: "3"
QEMU_EXTRA_DISKS_DRIVERS: ide,nvme
QEMU_EXTRA_DISKS_SIZE: "10240"
WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/ephemeral-nvme.yaml'
WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/ephemeral-nvme.yaml:@hack/test/patches/dm-raid-module.yaml'
run: |
sudo -E make e2e-qemu
- name: save artifacts

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2024-09-09T13:58:35Z by kres 8be5fa7.
# Generated on 2024-09-23T13:32:15Z by kres 8be5fa7.
name: integration-qemu-csi-rook-ceph-cron
concurrency:
@ -82,10 +82,10 @@ jobs:
env:
EXTRA_TEST_ARGS: -talos.csi=rook-ceph
IMAGE_REGISTRY: registry.dev.siderolabs.io
QEMU_CPUS_WORKERS: "4"
QEMU_CPUS_WORKERS: "6"
QEMU_EXTRA_DISKS: "1"
QEMU_EXTRA_DISKS_SIZE: "12288"
QEMU_MEMORY_WORKERS: "5120"
QEMU_MEMORY_WORKERS: "8192"
QEMU_WORKERS: "3"
SHORT_INTEGRATION_TEST: "yes"
WITH_CONFIG_PATCH: '@hack/test/patches/rook-ceph.yaml'

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2024-09-09T13:58:35Z by kres 8be5fa7.
# Generated on 2024-09-20T17:49:19Z by kres 8be5fa7.
name: integration-qemu-encrypted-vip-cron
concurrency:
@ -81,10 +81,10 @@ jobs:
- name: e2e-qemu
env:
IMAGE_REGISTRY: registry.dev.siderolabs.io
QEMU_EXTRA_DISKS: "2"
QEMU_EXTRA_DISKS: "3"
QEMU_EXTRA_DISKS_DRIVERS: ide,nvme
QEMU_EXTRA_DISKS_SIZE: "10240"
WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/ephemeral-nvme.yaml'
WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/ephemeral-nvme.yaml:@hack/test/patches/dm-raid-module.yaml'
WITH_DISK_ENCRYPTION: "true"
WITH_KUBESPAN: "true"
WITH_VIRTUAL_IP: "true"

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2024-09-09T13:58:35Z by kres 8be5fa7.
# Generated on 2024-09-21T05:02:59Z by kres 8be5fa7.
name: integration-qemu-race-cron
concurrency:
@ -90,7 +90,11 @@ jobs:
- name: e2e-qemu-race
env:
IMAGE_REGISTRY: registry.dev.siderolabs.io
QEMU_EXTRA_DISKS: "3"
QEMU_EXTRA_DISKS_DRIVERS: ide,nvme
QEMU_EXTRA_DISKS_SIZE: "10240"
TAG_SUFFIX: -race
WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/ephemeral-nvme.yaml:@hack/test/patches/dm-raid-module.yaml'
run: |
sudo -E make e2e-qemu
- name: save artifacts

View File

@ -332,7 +332,7 @@ spec:
QEMU_EXTRA_DISKS: "3"
QEMU_EXTRA_DISKS_SIZE: "10240"
QEMU_EXTRA_DISKS_DRIVERS: "ide,nvme"
WITH_CONFIG_PATCH_WORKER: "@hack/test/patches/ephemeral-nvme.yaml"
WITH_CONFIG_PATCH_WORKER: "@hack/test/patches/ephemeral-nvme.yaml:@hack/test/patches/dm-raid-module.yaml"
- name: save-talos-logs
conditions:
- always
@ -1017,7 +1017,7 @@ spec:
withSudo: true
environment:
QEMU_MEMORY_WORKERS: 4096
WITH_CONFIG_PATCH_WORKER: "@_out/installer-extensions-patch.yaml:@hack/test/patches/extensions.yaml"
WITH_CONFIG_PATCH_WORKER: "@_out/installer-extensions-patch.yaml:@hack/test/patches/extensions.yaml:@hack/test/patches/dm-raid-module.yaml"
QEMU_EXTRA_DISKS: 3
SHORT_INTEGRATION_TEST: yes
EXTRA_TEST_ARGS: -talos.extensions.qemu
@ -1153,10 +1153,10 @@ spec:
WITH_VIRTUAL_IP: true
WITH_KUBESPAN: true
IMAGE_REGISTRY: registry.dev.siderolabs.io
QEMU_EXTRA_DISKS: "2"
QEMU_EXTRA_DISKS: "3"
QEMU_EXTRA_DISKS_SIZE: "10240"
QEMU_EXTRA_DISKS_DRIVERS: "ide,nvme"
WITH_CONFIG_PATCH_WORKER: "@hack/test/patches/ephemeral-nvme.yaml"
WITH_CONFIG_PATCH_WORKER: "@hack/test/patches/ephemeral-nvme.yaml:@hack/test/patches/dm-raid-module.yaml"
- name: save-talos-logs
conditions:
- always
@ -1213,6 +1213,10 @@ spec:
command: e2e-qemu
withSudo: true
environment:
QEMU_EXTRA_DISKS: "3"
QEMU_EXTRA_DISKS_SIZE: "10240"
QEMU_EXTRA_DISKS_DRIVERS: "ide,nvme"
WITH_CONFIG_PATCH_WORKER: "@hack/test/patches/ephemeral-nvme.yaml:@hack/test/patches/dm-raid-module.yaml"
TAG_SUFFIX: -race
IMAGE_REGISTRY: registry.dev.siderolabs.io
- name: save-talos-logs
@ -1266,8 +1270,8 @@ spec:
environment:
SHORT_INTEGRATION_TEST: yes
QEMU_WORKERS: 3
QEMU_CPUS_WORKERS: 4
QEMU_MEMORY_WORKERS: 5120
QEMU_CPUS_WORKERS: 6
QEMU_MEMORY_WORKERS: 8192
QEMU_EXTRA_DISKS: 1
QEMU_EXTRA_DISKS_SIZE: 12288
WITH_CONFIG_PATCH: "@hack/test/patches/rook-ceph.yaml"

View File

@ -0,0 +1,4 @@
machine:
kernel:
modules:
- name: dm_raid

View File

@ -42,7 +42,7 @@ func (suite *CommonSuite) TearDownTest() {
// TestVirtioModulesLoaded verifies that the virtio modules are loaded.
func (suite *CommonSuite) TestVirtioModulesLoaded() {
if suite.Cluster == nil || suite.Cluster.Provisioner() != "qemu" {
if suite.Cluster == nil || suite.Cluster.Provisioner() != base.ProvisionerQEMU {
suite.T().Skip("skipping virtio test since provisioner is not qemu")
}

View File

@ -185,6 +185,10 @@ func (suite *VolumesSuite) TestLVMActivation() {
suite.T().Skip("skipping test in short mode.")
}
if suite.Cluster == nil || suite.Cluster.Provisioner() != base.ProvisionerQEMU {
suite.T().Skip("skipping test for non-qemu provisioner")
}
node := suite.RandomDiscoveredNodeInternalIP(machine.TypeWorker)
userDisks, err := suite.UserDisks(suite.ctx, node)
@ -211,7 +215,7 @@ func (suite *VolumesSuite) TestLVMActivation() {
stdout, _, err = podDef.Exec(
suite.ctx,
"nsenter --mount=/proc/1/ns/mnt -- lvcreate -n lv0 -L 1G vg0",
"nsenter --mount=/proc/1/ns/mnt -- lvcreate --mirrors=1 --type=raid1 --nosync -n lv0 -L 1G vg0",
)
suite.Require().NoError(err)
@ -255,7 +259,9 @@ func (suite *VolumesSuite) TestLVMActivation() {
}, 5*time.Minute,
)
suite.Require().True(suite.lvmVolumeExists(), "LVM volume group was not activated after reboot")
suite.Require().Eventually(func() bool {
return suite.lvmVolumeExists()
}, 5*time.Second, 1*time.Second, "LVM volume group was not activated after reboot")
}
func (suite *VolumesSuite) lvmVolumeExists() bool {
@ -275,7 +281,8 @@ func (suite *VolumesSuite) lvmVolumeExists() bool {
}
// we test with creating a volume group with two logical volumes
return lvmVolumeCount == 2
// one mirrored and one not, so we expect to see 6 volumes
return lvmVolumeCount == 6
}
func init() {

View File

@ -39,7 +39,7 @@ func (suite *WatchdogSuite) SuiteName() string {
func (suite *WatchdogSuite) SetupTest() {
suite.ctx, suite.ctxCancel = context.WithTimeout(context.Background(), 1*time.Minute)
if suite.Cluster == nil || suite.Cluster.Provisioner() != "qemu" {
if suite.Cluster == nil || suite.Cluster.Provisioner() != base.ProvisionerQEMU {
suite.T().Skip("skipping watchdog test since provisioner is not qemu")
}
}

View File

@ -15,6 +15,11 @@ import (
"github.com/siderolabs/talos/pkg/provision/access"
)
const (
// ProvisionerQEMU is the name of the QEMU provisioner.
ProvisionerQEMU = "qemu"
)
// TalosSuite defines most common settings for integration test suites.
type TalosSuite struct {
// Endpoint to use to connect, if not set config is used

View File

@ -35,7 +35,7 @@ func (suite *ApparmorSuite) TestApparmor() {
suite.T().Skip("without full cluster state reaching out to the node IP is not reliable")
}
if suite.Cluster.Provisioner() != "qemu" {
if suite.Cluster.Provisioner() != base.ProvisionerQEMU {
suite.T().Skip("skipping apparmor test since provisioner is not qemu")
}