mirror of
https://github.com/siderolabs/talos.git
synced 2026-05-04 12:01:12 +02:00
fix: zfs extensions test
Make sure we run the check commands also on the same node where we created the pool. Fixes: #13014 Signed-off-by: Noel Georgi <git@frezbo.dev>
This commit is contained in:
parent
1ef8e630ab
commit
7fa4d39197
7
.github/workflows/ci.yaml
vendored
7
.github/workflows/ci.yaml
vendored
@ -1,6 +1,6 @@
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2026-04-11T06:20:55Z by kres b6d29bf.
|
||||
# Generated on 2026-04-17T16:22:40Z by kres cfee956.
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.head_ref || github.run_id }}
|
||||
@ -4883,10 +4883,11 @@ jobs:
|
||||
EXTRA_TEST_ARGS: -talos.csi=longhorn
|
||||
GITHUB_STEP_NAME: ${{ github.job}}-e2e-qemu-csi-longhorn
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
QEMU_CPUS_WORKERS: "3"
|
||||
QEMU_EXTRA_DISKS: "1"
|
||||
QEMU_EXTRA_DISKS_DRIVERS: nvme
|
||||
QEMU_EXTRA_DISKS_SIZE: "12288"
|
||||
QEMU_MEMORY_WORKERS: "8192"
|
||||
QEMU_MEMORY_WORKERS: "10240"
|
||||
QEMU_SYSTEM_DISK_SIZE: "20480"
|
||||
QEMU_WORKERS: "3"
|
||||
SHORT_INTEGRATION_TEST: "yes"
|
||||
@ -5906,8 +5907,6 @@ jobs:
|
||||
PLATFORM: linux/amd64,linux/arm64
|
||||
run: |
|
||||
make images
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@cad07c2e89fa2edd6e2d7bab4c1aa38e53f76003 # version: v4.1.1
|
||||
- name: Sign artifacts
|
||||
run: |
|
||||
cosign sign-blob --bundle _out/initramfs-amd64.xz.bundle --yes _out/initramfs-amd64.xz
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2026-04-09T14:38:32Z by kres b6d29bf.
|
||||
# Generated on 2026-04-17T16:22:40Z by kres cfee956.
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.head_ref || github.run_id }}
|
||||
@ -111,10 +111,11 @@ jobs:
|
||||
EXTRA_TEST_ARGS: -talos.csi=longhorn
|
||||
GITHUB_STEP_NAME: ${{ github.job}}-e2e-qemu-csi-longhorn
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
QEMU_CPUS_WORKERS: "3"
|
||||
QEMU_EXTRA_DISKS: "1"
|
||||
QEMU_EXTRA_DISKS_DRIVERS: nvme
|
||||
QEMU_EXTRA_DISKS_SIZE: "12288"
|
||||
QEMU_MEMORY_WORKERS: "8192"
|
||||
QEMU_MEMORY_WORKERS: "10240"
|
||||
QEMU_SYSTEM_DISK_SIZE: "20480"
|
||||
QEMU_WORKERS: "3"
|
||||
SHORT_INTEGRATION_TEST: "yes"
|
||||
|
||||
@ -2303,7 +2303,8 @@ spec:
|
||||
GITHUB_STEP_NAME: ${{ github.job}}-e2e-qemu-csi-longhorn
|
||||
SHORT_INTEGRATION_TEST: yes
|
||||
QEMU_WORKERS: 3
|
||||
QEMU_MEMORY_WORKERS: 8192
|
||||
QEMU_MEMORY_WORKERS: 10240
|
||||
QEMU_CPUS_WORKERS: 3
|
||||
QEMU_SYSTEM_DISK_SIZE: 20480
|
||||
QEMU_EXTRA_DISKS: 1
|
||||
QEMU_EXTRA_DISKS_SIZE: 12288
|
||||
|
||||
@ -317,6 +317,11 @@ func (suite *ExtensionsSuiteQEMU) TestExtensionsStargz() {
|
||||
func (suite *ExtensionsSuiteQEMU) TestExtensionsMdADM() {
|
||||
node := suite.RandomDiscoveredNodeInternalIP(machine.TypeWorker)
|
||||
|
||||
k8sNode, err := suite.GetK8sNodeByInternalIP(suite.ctx, node)
|
||||
suite.Require().NoError(err)
|
||||
|
||||
nodeName := k8sNode.Name
|
||||
|
||||
userDisks := suite.UserDisks(suite.ctx, node)
|
||||
|
||||
suite.Require().GreaterOrEqual(len(userDisks), 2, "expected at least two user disks to be available")
|
||||
@ -326,6 +331,8 @@ func (suite *ExtensionsSuiteQEMU) TestExtensionsMdADM() {
|
||||
mdAdmCreatePodDef, err := suite.NewPrivilegedPod("mdadm-create")
|
||||
suite.Require().NoError(err)
|
||||
|
||||
mdAdmCreatePodDef.WithNodeName(nodeName)
|
||||
|
||||
suite.Require().NoError(mdAdmCreatePodDef.Create(suite.ctx, 5*time.Minute))
|
||||
|
||||
defer mdAdmCreatePodDef.Delete(suite.ctx) //nolint:errcheck
|
||||
@ -347,6 +354,8 @@ func (suite *ExtensionsSuiteQEMU) TestExtensionsMdADM() {
|
||||
deletePodDef, err := suite.NewPrivilegedPod("mdadm-destroy")
|
||||
suite.Require().NoError(err)
|
||||
|
||||
deletePodDef.WithNodeName(nodeName)
|
||||
|
||||
suite.Require().NoError(deletePodDef.Create(suite.ctx, 5*time.Minute))
|
||||
|
||||
defer deletePodDef.Delete(suite.ctx) //nolint:errcheck
|
||||
@ -403,9 +412,16 @@ func (suite *ExtensionsSuiteQEMU) TestExtensionsZFS() {
|
||||
|
||||
suite.Require().NotEmpty(userDisks, "expected at least one user disks to be available")
|
||||
|
||||
k8sNode, err := suite.GetK8sNodeByInternalIP(suite.ctx, node)
|
||||
suite.Require().NoError(err)
|
||||
|
||||
nodeName := k8sNode.Name
|
||||
|
||||
zfsPodDef, err := suite.NewPrivilegedPod("zpool-create")
|
||||
suite.Require().NoError(err)
|
||||
|
||||
zfsPodDef.WithNodeName(nodeName)
|
||||
|
||||
suite.Require().NoError(zfsPodDef.Create(suite.ctx, 5*time.Minute))
|
||||
|
||||
defer zfsPodDef.Delete(suite.ctx) //nolint:errcheck
|
||||
@ -432,6 +448,8 @@ func (suite *ExtensionsSuiteQEMU) TestExtensionsZFS() {
|
||||
deletePodDef, err := suite.NewPrivilegedPod("zpool-destroy")
|
||||
suite.Require().NoError(err)
|
||||
|
||||
deletePodDef.WithNodeName(nodeName)
|
||||
|
||||
suite.Require().NoError(deletePodDef.Create(suite.ctx, 5*time.Minute))
|
||||
|
||||
defer deletePodDef.Delete(suite.ctx) //nolint:errcheck
|
||||
@ -451,7 +469,7 @@ func (suite *ExtensionsSuiteQEMU) TestExtensionsZFS() {
|
||||
}
|
||||
}()
|
||||
|
||||
suite.Require().True(suite.checkZFSPoolMounted(), "expected zfs pool to be mounted")
|
||||
suite.Require().True(suite.checkZFSPoolMounted(node), "expected zfs pool to be mounted")
|
||||
|
||||
// now we want to reboot the node and make sure the pool is still mounted
|
||||
suite.AssertRebooted(
|
||||
@ -461,12 +479,10 @@ func (suite *ExtensionsSuiteQEMU) TestExtensionsZFS() {
|
||||
suite.CleanupFailedPods,
|
||||
)
|
||||
|
||||
suite.Require().True(suite.checkZFSPoolMounted(), "expected zfs pool to be mounted")
|
||||
suite.Require().True(suite.checkZFSPoolMounted(node), "expected zfs pool to be mounted")
|
||||
}
|
||||
|
||||
func (suite *ExtensionsSuiteQEMU) checkZFSPoolMounted() bool {
|
||||
node := suite.RandomDiscoveredNodeInternalIP(machine.TypeWorker)
|
||||
|
||||
func (suite *ExtensionsSuiteQEMU) checkZFSPoolMounted(node string) bool {
|
||||
ctx := client.WithNode(suite.ctx, node)
|
||||
|
||||
stream, err := suite.Client.LS(ctx, &machineapi.ListRequest{
|
||||
@ -497,9 +513,18 @@ func (suite *ExtensionsSuiteQEMU) checkZFSPoolMounted() bool {
|
||||
|
||||
// TestExtensionsUtilLinuxTools verifies util-linux-tools are working.
|
||||
func (suite *ExtensionsSuiteQEMU) TestExtensionsUtilLinuxTools() {
|
||||
node := suite.RandomDiscoveredNodeInternalIP(machine.TypeWorker)
|
||||
|
||||
k8sNode, err := suite.GetK8sNodeByInternalIP(suite.ctx, node)
|
||||
suite.Require().NoError(err)
|
||||
|
||||
nodeName := k8sNode.Name
|
||||
|
||||
utilLinuxPodDef, err := suite.NewPrivilegedPod("util-linux-tools-test")
|
||||
suite.Require().NoError(err)
|
||||
|
||||
utilLinuxPodDef.WithNodeName(nodeName)
|
||||
|
||||
suite.Require().NoError(utilLinuxPodDef.Create(suite.ctx, 5*time.Minute))
|
||||
|
||||
defer utilLinuxPodDef.Delete(suite.ctx) //nolint:errcheck
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user