mirror of
https://github.com/siderolabs/omni.git
synced 2025-08-06 09:37:01 +02:00
test: use go test
to build and run Omni integration tests
Some checks failed
default / default (push) Has been cancelled
default / e2e-backups (push) Has been cancelled
default / e2e-forced-removal (push) Has been cancelled
default / e2e-scaling (push) Has been cancelled
default / e2e-short (push) Has been cancelled
default / e2e-short-secureboot (push) Has been cancelled
default / e2e-templates (push) Has been cancelled
default / e2e-upgrades (push) Has been cancelled
default / e2e-workload-proxy (push) Has been cancelled
Some checks failed
default / default (push) Has been cancelled
default / e2e-backups (push) Has been cancelled
default / e2e-forced-removal (push) Has been cancelled
default / e2e-scaling (push) Has been cancelled
default / e2e-short (push) Has been cancelled
default / e2e-short-secureboot (push) Has been cancelled
default / e2e-templates (push) Has been cancelled
default / e2e-upgrades (push) Has been cancelled
default / e2e-workload-proxy (push) Has been cancelled
All test modules were moved under `integration` tag and are now in `internal/integration` folder: no more `cmd/integration-test` executable. New Kres version is able to build the same executable from the tests directory instead. All Omni related flags were renamed, for example `--endpoint` -> `--omni.endpoint`. 2 more functional changes: - Enabled `--test.failfast` for all test runs. - Removed finalizers, which were running if the test has failed. Both of these changes should make it easier to understand the test failure: Talos node logs won't be cluttered with the finalizer tearing down the cluster. Fixes: https://github.com/siderolabs/omni/issues/1171 Signed-off-by: Artem Chernyshev <artem.chernyshev@talos-systems.com>
This commit is contained in:
parent
df5a2b92f9
commit
c9c4c8e10d
@ -1,6 +1,6 @@
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2024-10-31T13:15:39Z by kres 6d3cad4-dirty.
|
||||
# Generated on 2025-05-30T17:31:23Z by kres 9f64b0d.
|
||||
|
||||
*
|
||||
!frontend/src
|
||||
@ -28,5 +28,6 @@
|
||||
!CONTRIBUTING.md
|
||||
!DEVELOPMENT.md
|
||||
!README.md
|
||||
!SECURITY.md
|
||||
!.markdownlint.json
|
||||
!.license-header.go.txt
|
||||
|
97
.github/workflows/ci.yaml
vendored
97
.github/workflows/ci.yaml
vendored
@ -1,6 +1,6 @@
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2025-05-22T17:54:46Z by kres 9f64b0d.
|
||||
# Generated on 2025-06-02T21:18:31Z by kres 99b55ad-dirty.
|
||||
|
||||
name: default
|
||||
concurrency:
|
||||
@ -108,39 +108,15 @@ jobs:
|
||||
- name: acompat
|
||||
run: |
|
||||
make acompat
|
||||
- name: integration-test
|
||||
- name: make-cookies
|
||||
run: |
|
||||
make integration-test
|
||||
- name: lint
|
||||
run: |
|
||||
make lint
|
||||
- name: Login to registry
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
- name: image-integration-test
|
||||
run: |
|
||||
make image-integration-test
|
||||
- name: push-omni-integration-test
|
||||
if: github.event_name != 'pull_request'
|
||||
env:
|
||||
PLATFORM: linux/amd64,linux/arm64
|
||||
PUSH: "true"
|
||||
run: |
|
||||
make image-integration-test
|
||||
- name: push-omni-integration-test-latest
|
||||
if: github.event_name != 'pull_request' && github.ref == 'refs/heads/main'
|
||||
env:
|
||||
PLATFORM: linux/amd64,linux/arm64
|
||||
PUSH: "true"
|
||||
run: |
|
||||
make image-integration-test IMAGE_TAG=latest
|
||||
make make-cookies
|
||||
- name: omni
|
||||
run: |
|
||||
make omni
|
||||
- name: lint
|
||||
run: |
|
||||
make lint
|
||||
- name: Login to registry
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3
|
||||
@ -168,13 +144,38 @@ jobs:
|
||||
- name: omnictl
|
||||
run: |
|
||||
make omnictl
|
||||
- name: integration-test
|
||||
run: |
|
||||
make integration-test
|
||||
- name: Login to registry
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
- name: image-integration-test
|
||||
run: |
|
||||
make image-integration-test
|
||||
- name: push-integration-test
|
||||
if: github.event_name != 'pull_request'
|
||||
env:
|
||||
PUSH: "true"
|
||||
run: |
|
||||
make image-integration-test
|
||||
- name: push-integration-test-latest
|
||||
if: github.event_name != 'pull_request' && github.ref == 'refs/heads/main'
|
||||
env:
|
||||
PUSH: "true"
|
||||
run: |
|
||||
make image-integration-test IMAGE_TAG=latest
|
||||
- name: run-integration-test
|
||||
if: github.event_name == 'pull_request'
|
||||
env:
|
||||
INTEGRATION_RUN_E2E_TEST: "true"
|
||||
INTEGRATION_TEST_ARGS: --test.run CleanState/|Auth/|DefaultCluster/|CLICommands/
|
||||
INTEGRATION_TEST_ARGS: --test.run TestIntegration/Suites/(CleanState|Auth|DefaultCluster|CLICommands)$
|
||||
RUN_TALEMU_TESTS: "true"
|
||||
TALEMU_TEST_ARGS: --test.run ImmediateClusterDestruction/|EncryptedCluster/|SinglenodeCluster/|ScaleUpAndDown/|ScaleUpAndDownMachineClassBasedMachineSets/|TalosUpgrades/|KubernetesUpgrades/|MaintenanceUpgrade/|ClusterTemplate/|ScaleUpAndDownAutoProvisionMachineSets/
|
||||
TALEMU_TEST_ARGS: --test.run TestIntegration/Suites/(ImmediateClusterDestruction|EncryptedCluster|SinglenodeCluster|ScaleUpAndDown|ScaleUpAndDownMachineClassBasedMachineSets|TalosUpgrades|KubernetesUpgrades|MaintenanceUpgrade|ClusterTemplate|ScaleUpAndDownAutoProvisionMachineSets)$
|
||||
WITH_DEBUG: "true"
|
||||
run: |
|
||||
sudo -E make run-integration-test
|
||||
@ -297,9 +298,9 @@ jobs:
|
||||
- name: run-integration-test
|
||||
env:
|
||||
INTEGRATION_RUN_E2E_TEST: "false"
|
||||
INTEGRATION_TEST_ARGS: --test.run CleanState/|EtcdBackupAndRestore
|
||||
INTEGRATION_TEST_ARGS: --test.run TestIntegration/Suites/(CleanState|EtcdBackupAndRestore)$
|
||||
RUN_TALEMU_TESTS: "false"
|
||||
TALEMU_TEST_ARGS: --test.run ImmediateClusterDestruction/|EncryptedCluster/|SinglenodeCluster/|ScaleUpAndDown/|ScaleUpAndDownMachineClassBasedMachineSets/|TalosUpgrades/|KubernetesUpgrades/|MaintenanceUpgrade/|ClusterTemplate/|ScaleUpAndDownAutoProvisionMachineSets/
|
||||
TALEMU_TEST_ARGS: --test.run TestIntegration/Suites/(ImmediateClusterDestruction|EncryptedCluster|SinglenodeCluster|ScaleUpAndDown|ScaleUpAndDownMachineClassBasedMachineSets|TalosUpgrades|KubernetesUpgrades|MaintenanceUpgrade|ClusterTemplate|ScaleUpAndDownAutoProvisionMachineSets)$
|
||||
WITH_DEBUG: "true"
|
||||
run: |
|
||||
sudo -E make run-integration-test
|
||||
@ -376,9 +377,9 @@ jobs:
|
||||
- name: run-integration-test
|
||||
env:
|
||||
INTEGRATION_RUN_E2E_TEST: "false"
|
||||
INTEGRATION_TEST_ARGS: --test.run CleanState/|ForcedMachineRemoval/|ReplaceControlPlanes/|ConfigPatching/|KubernetesNodeAudit/
|
||||
INTEGRATION_TEST_ARGS: --test.run TestIntegration/Suites/(CleanState|ForcedMachineRemoval|ReplaceControlPlanes|ConfigPatching|KubernetesNodeAudit)$
|
||||
RUN_TALEMU_TESTS: "false"
|
||||
TALEMU_TEST_ARGS: --test.run ImmediateClusterDestruction/|EncryptedCluster/|SinglenodeCluster/|ScaleUpAndDown/|ScaleUpAndDownMachineClassBasedMachineSets/|TalosUpgrades/|KubernetesUpgrades/|MaintenanceUpgrade/|ClusterTemplate/|ScaleUpAndDownAutoProvisionMachineSets/
|
||||
TALEMU_TEST_ARGS: --test.run TestIntegration/Suites/(ImmediateClusterDestruction|EncryptedCluster|SinglenodeCluster|ScaleUpAndDown|ScaleUpAndDownMachineClassBasedMachineSets|TalosUpgrades|KubernetesUpgrades|MaintenanceUpgrade|ClusterTemplate|ScaleUpAndDownAutoProvisionMachineSets)$
|
||||
WITH_DEBUG: "true"
|
||||
run: |
|
||||
sudo -E make run-integration-test
|
||||
@ -455,9 +456,9 @@ jobs:
|
||||
- name: run-integration-test
|
||||
env:
|
||||
INTEGRATION_RUN_E2E_TEST: "false"
|
||||
INTEGRATION_TEST_ARGS: --test.run CleanState/|ScaleUpAndDown/|ScaleUpAndDownMachineClassBasedMachineSets/|RollingUpdateParallelism
|
||||
INTEGRATION_TEST_ARGS: --test.run TestIntegration/Suites/(CleanState|ScaleUpAndDown|ScaleUpAndDownMachineClassBasedMachineSets|RollingUpdateParallelism)$
|
||||
RUN_TALEMU_TESTS: "false"
|
||||
TALEMU_TEST_ARGS: --test.run ImmediateClusterDestruction/|EncryptedCluster/|SinglenodeCluster/|ScaleUpAndDown/|ScaleUpAndDownMachineClassBasedMachineSets/|TalosUpgrades/|KubernetesUpgrades/|MaintenanceUpgrade/|ClusterTemplate/|ScaleUpAndDownAutoProvisionMachineSets/
|
||||
TALEMU_TEST_ARGS: --test.run TestIntegration/Suites/(ImmediateClusterDestruction|EncryptedCluster|SinglenodeCluster|ScaleUpAndDown|ScaleUpAndDownMachineClassBasedMachineSets|TalosUpgrades|KubernetesUpgrades|MaintenanceUpgrade|ClusterTemplate|ScaleUpAndDownAutoProvisionMachineSets)$
|
||||
WITH_DEBUG: "true"
|
||||
run: |
|
||||
sudo -E make run-integration-test
|
||||
@ -534,9 +535,9 @@ jobs:
|
||||
- name: run-integration-test
|
||||
env:
|
||||
INTEGRATION_RUN_E2E_TEST: "false"
|
||||
INTEGRATION_TEST_ARGS: --test.run CleanState/|TalosImageGeneration/|ImmediateClusterDestruction/|DefaultCluster/|EncryptedCluster/|SinglenodeCluster/|Auth/
|
||||
INTEGRATION_TEST_ARGS: --test.run TestIntegration/Suites/(CleanState|TalosImageGeneration|ImmediateClusterDestruction|DefaultCluster|EncryptedCluster|SinglenodeCluster|Auth)$
|
||||
RUN_TALEMU_TESTS: "false"
|
||||
TALEMU_TEST_ARGS: --test.run ImmediateClusterDestruction/|EncryptedCluster/|SinglenodeCluster/|ScaleUpAndDown/|ScaleUpAndDownMachineClassBasedMachineSets/|TalosUpgrades/|KubernetesUpgrades/|MaintenanceUpgrade/|ClusterTemplate/|ScaleUpAndDownAutoProvisionMachineSets/
|
||||
TALEMU_TEST_ARGS: --test.run TestIntegration/Suites/(ImmediateClusterDestruction|EncryptedCluster|SinglenodeCluster|ScaleUpAndDown|ScaleUpAndDownMachineClassBasedMachineSets|TalosUpgrades|KubernetesUpgrades|MaintenanceUpgrade|ClusterTemplate|ScaleUpAndDownAutoProvisionMachineSets)$
|
||||
WITH_DEBUG: "true"
|
||||
run: |
|
||||
sudo -E make run-integration-test
|
||||
@ -614,9 +615,9 @@ jobs:
|
||||
env:
|
||||
ENABLE_SECUREBOOT: "true"
|
||||
INTEGRATION_RUN_E2E_TEST: "false"
|
||||
INTEGRATION_TEST_ARGS: --test.run CleanState/|TalosImageGeneration/|ImmediateClusterDestruction/|DefaultCluster/|EncryptedCluster/|SinglenodeCluster/|Auth/
|
||||
INTEGRATION_TEST_ARGS: --test.run TestIntegration/Suites/(CleanState|TalosImageGeneration|ImmediateClusterDestruction|DefaultCluster|EncryptedCluster|SinglenodeCluster|Auth)$
|
||||
RUN_TALEMU_TESTS: "false"
|
||||
TALEMU_TEST_ARGS: --test.run ImmediateClusterDestruction/|EncryptedCluster/|SinglenodeCluster/|ScaleUpAndDown/|ScaleUpAndDownMachineClassBasedMachineSets/|TalosUpgrades/|KubernetesUpgrades/|MaintenanceUpgrade/|ClusterTemplate/|ScaleUpAndDownAutoProvisionMachineSets/
|
||||
TALEMU_TEST_ARGS: --test.run TestIntegration/Suites/(ImmediateClusterDestruction|EncryptedCluster|SinglenodeCluster|ScaleUpAndDown|ScaleUpAndDownMachineClassBasedMachineSets|TalosUpgrades|KubernetesUpgrades|MaintenanceUpgrade|ClusterTemplate|ScaleUpAndDownAutoProvisionMachineSets)$
|
||||
WITH_DEBUG: "true"
|
||||
run: |
|
||||
sudo -E make run-integration-test
|
||||
@ -693,9 +694,9 @@ jobs:
|
||||
- name: run-integration-test
|
||||
env:
|
||||
INTEGRATION_RUN_E2E_TEST: "false"
|
||||
INTEGRATION_TEST_ARGS: --test.run CleanState/|ClusterTemplate/
|
||||
INTEGRATION_TEST_ARGS: --test.run TestIntegration/Suites/(CleanState|ClusterTemplate)$
|
||||
RUN_TALEMU_TESTS: "false"
|
||||
TALEMU_TEST_ARGS: --test.run ImmediateClusterDestruction/|EncryptedCluster/|SinglenodeCluster/|ScaleUpAndDown/|ScaleUpAndDownMachineClassBasedMachineSets/|TalosUpgrades/|KubernetesUpgrades/|MaintenanceUpgrade/|ClusterTemplate/|ScaleUpAndDownAutoProvisionMachineSets/
|
||||
TALEMU_TEST_ARGS: --test.run TestIntegration/Suites/(ImmediateClusterDestruction|EncryptedCluster|SinglenodeCluster|ScaleUpAndDown|ScaleUpAndDownMachineClassBasedMachineSets|TalosUpgrades|KubernetesUpgrades|MaintenanceUpgrade|ClusterTemplate|ScaleUpAndDownAutoProvisionMachineSets)$
|
||||
WITH_DEBUG: "true"
|
||||
run: |
|
||||
sudo -E make run-integration-test
|
||||
@ -772,9 +773,9 @@ jobs:
|
||||
- name: run-integration-test
|
||||
env:
|
||||
INTEGRATION_RUN_E2E_TEST: "false"
|
||||
INTEGRATION_TEST_ARGS: --test.run CleanState/|TalosUpgrades/|KubernetesUpgrades/|MaintenanceUpgrade/
|
||||
INTEGRATION_TEST_ARGS: --test.run TestIntegration/Suites/(CleanState|TalosUpgrades|KubernetesUpgrades|MaintenanceUpgrade)$
|
||||
RUN_TALEMU_TESTS: "false"
|
||||
TALEMU_TEST_ARGS: --test.run ImmediateClusterDestruction/|EncryptedCluster/|SinglenodeCluster/|ScaleUpAndDown/|ScaleUpAndDownMachineClassBasedMachineSets/|TalosUpgrades/|KubernetesUpgrades/|MaintenanceUpgrade/|ClusterTemplate/|ScaleUpAndDownAutoProvisionMachineSets/
|
||||
TALEMU_TEST_ARGS: --test.run TestIntegration/Suites/(ImmediateClusterDestruction|EncryptedCluster|SinglenodeCluster|ScaleUpAndDown|ScaleUpAndDownMachineClassBasedMachineSets|TalosUpgrades|KubernetesUpgrades|MaintenanceUpgrade|ClusterTemplate|ScaleUpAndDownAutoProvisionMachineSets)$
|
||||
WITH_DEBUG: "true"
|
||||
run: |
|
||||
sudo -E make run-integration-test
|
||||
@ -851,9 +852,9 @@ jobs:
|
||||
- name: run-integration-test
|
||||
env:
|
||||
INTEGRATION_RUN_E2E_TEST: "false"
|
||||
INTEGRATION_TEST_ARGS: --test.run CleanState/|WorkloadProxy
|
||||
INTEGRATION_TEST_ARGS: --test.run TestIntegration/Suites/(CleanState|WorkloadProxy)$
|
||||
RUN_TALEMU_TESTS: "false"
|
||||
TALEMU_TEST_ARGS: --test.run ImmediateClusterDestruction/|EncryptedCluster/|SinglenodeCluster/|ScaleUpAndDown/|ScaleUpAndDownMachineClassBasedMachineSets/|TalosUpgrades/|KubernetesUpgrades/|MaintenanceUpgrade/|ClusterTemplate/|ScaleUpAndDownAutoProvisionMachineSets/
|
||||
TALEMU_TEST_ARGS: --test.run TestIntegration/Suites/(ImmediateClusterDestruction|EncryptedCluster|SinglenodeCluster|ScaleUpAndDown|ScaleUpAndDownMachineClassBasedMachineSets|TalosUpgrades|KubernetesUpgrades|MaintenanceUpgrade|ClusterTemplate|ScaleUpAndDownAutoProvisionMachineSets)$
|
||||
WITH_DEBUG: "true"
|
||||
run: |
|
||||
sudo -E make run-integration-test
|
||||
|
6
.github/workflows/e2e-backups-cron.yaml
vendored
6
.github/workflows/e2e-backups-cron.yaml
vendored
@ -1,6 +1,6 @@
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2025-05-22T17:54:46Z by kres 9f64b0d.
|
||||
# Generated on 2025-06-02T21:18:31Z by kres 99b55ad-dirty.
|
||||
|
||||
name: e2e-backups-cron
|
||||
concurrency:
|
||||
@ -61,9 +61,9 @@ jobs:
|
||||
- name: run-integration-test
|
||||
env:
|
||||
INTEGRATION_RUN_E2E_TEST: "false"
|
||||
INTEGRATION_TEST_ARGS: --test.run CleanState/|EtcdBackupAndRestore
|
||||
INTEGRATION_TEST_ARGS: --test.run TestIntegration/Suites/(CleanState|EtcdBackupAndRestore)$
|
||||
RUN_TALEMU_TESTS: "false"
|
||||
TALEMU_TEST_ARGS: --test.run ImmediateClusterDestruction/|EncryptedCluster/|SinglenodeCluster/|ScaleUpAndDown/|ScaleUpAndDownMachineClassBasedMachineSets/|TalosUpgrades/|KubernetesUpgrades/|MaintenanceUpgrade/|ClusterTemplate/|ScaleUpAndDownAutoProvisionMachineSets/
|
||||
TALEMU_TEST_ARGS: --test.run TestIntegration/Suites/(ImmediateClusterDestruction|EncryptedCluster|SinglenodeCluster|ScaleUpAndDown|ScaleUpAndDownMachineClassBasedMachineSets|TalosUpgrades|KubernetesUpgrades|MaintenanceUpgrade|ClusterTemplate|ScaleUpAndDownAutoProvisionMachineSets)$
|
||||
WITH_DEBUG: "true"
|
||||
run: |
|
||||
sudo -E make run-integration-test
|
||||
|
@ -1,6 +1,6 @@
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2025-05-22T17:54:46Z by kres 9f64b0d.
|
||||
# Generated on 2025-06-02T21:18:31Z by kres 99b55ad-dirty.
|
||||
|
||||
name: e2e-forced-removal-cron
|
||||
concurrency:
|
||||
@ -61,9 +61,9 @@ jobs:
|
||||
- name: run-integration-test
|
||||
env:
|
||||
INTEGRATION_RUN_E2E_TEST: "false"
|
||||
INTEGRATION_TEST_ARGS: --test.run CleanState/|ForcedMachineRemoval/|ReplaceControlPlanes/|ConfigPatching/|KubernetesNodeAudit/
|
||||
INTEGRATION_TEST_ARGS: --test.run TestIntegration/Suites/(CleanState|ForcedMachineRemoval|ReplaceControlPlanes|ConfigPatching|KubernetesNodeAudit)$
|
||||
RUN_TALEMU_TESTS: "false"
|
||||
TALEMU_TEST_ARGS: --test.run ImmediateClusterDestruction/|EncryptedCluster/|SinglenodeCluster/|ScaleUpAndDown/|ScaleUpAndDownMachineClassBasedMachineSets/|TalosUpgrades/|KubernetesUpgrades/|MaintenanceUpgrade/|ClusterTemplate/|ScaleUpAndDownAutoProvisionMachineSets/
|
||||
TALEMU_TEST_ARGS: --test.run TestIntegration/Suites/(ImmediateClusterDestruction|EncryptedCluster|SinglenodeCluster|ScaleUpAndDown|ScaleUpAndDownMachineClassBasedMachineSets|TalosUpgrades|KubernetesUpgrades|MaintenanceUpgrade|ClusterTemplate|ScaleUpAndDownAutoProvisionMachineSets)$
|
||||
WITH_DEBUG: "true"
|
||||
run: |
|
||||
sudo -E make run-integration-test
|
||||
|
6
.github/workflows/e2e-scaling-cron.yaml
vendored
6
.github/workflows/e2e-scaling-cron.yaml
vendored
@ -1,6 +1,6 @@
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2025-05-22T17:54:46Z by kres 9f64b0d.
|
||||
# Generated on 2025-06-02T21:18:31Z by kres 99b55ad-dirty.
|
||||
|
||||
name: e2e-scaling-cron
|
||||
concurrency:
|
||||
@ -61,9 +61,9 @@ jobs:
|
||||
- name: run-integration-test
|
||||
env:
|
||||
INTEGRATION_RUN_E2E_TEST: "false"
|
||||
INTEGRATION_TEST_ARGS: --test.run CleanState/|ScaleUpAndDown/|ScaleUpAndDownMachineClassBasedMachineSets/|RollingUpdateParallelism
|
||||
INTEGRATION_TEST_ARGS: --test.run TestIntegration/Suites/(CleanState|ScaleUpAndDown|ScaleUpAndDownMachineClassBasedMachineSets|RollingUpdateParallelism)$
|
||||
RUN_TALEMU_TESTS: "false"
|
||||
TALEMU_TEST_ARGS: --test.run ImmediateClusterDestruction/|EncryptedCluster/|SinglenodeCluster/|ScaleUpAndDown/|ScaleUpAndDownMachineClassBasedMachineSets/|TalosUpgrades/|KubernetesUpgrades/|MaintenanceUpgrade/|ClusterTemplate/|ScaleUpAndDownAutoProvisionMachineSets/
|
||||
TALEMU_TEST_ARGS: --test.run TestIntegration/Suites/(ImmediateClusterDestruction|EncryptedCluster|SinglenodeCluster|ScaleUpAndDown|ScaleUpAndDownMachineClassBasedMachineSets|TalosUpgrades|KubernetesUpgrades|MaintenanceUpgrade|ClusterTemplate|ScaleUpAndDownAutoProvisionMachineSets)$
|
||||
WITH_DEBUG: "true"
|
||||
run: |
|
||||
sudo -E make run-integration-test
|
||||
|
6
.github/workflows/e2e-short-cron.yaml
vendored
6
.github/workflows/e2e-short-cron.yaml
vendored
@ -1,6 +1,6 @@
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2025-05-22T17:54:46Z by kres 9f64b0d.
|
||||
# Generated on 2025-06-02T21:18:31Z by kres 99b55ad-dirty.
|
||||
|
||||
name: e2e-short-cron
|
||||
concurrency:
|
||||
@ -61,9 +61,9 @@ jobs:
|
||||
- name: run-integration-test
|
||||
env:
|
||||
INTEGRATION_RUN_E2E_TEST: "false"
|
||||
INTEGRATION_TEST_ARGS: --test.run CleanState/|TalosImageGeneration/|ImmediateClusterDestruction/|DefaultCluster/|EncryptedCluster/|SinglenodeCluster/|Auth/
|
||||
INTEGRATION_TEST_ARGS: --test.run TestIntegration/Suites/(CleanState|TalosImageGeneration|ImmediateClusterDestruction|DefaultCluster|EncryptedCluster|SinglenodeCluster|Auth)$
|
||||
RUN_TALEMU_TESTS: "false"
|
||||
TALEMU_TEST_ARGS: --test.run ImmediateClusterDestruction/|EncryptedCluster/|SinglenodeCluster/|ScaleUpAndDown/|ScaleUpAndDownMachineClassBasedMachineSets/|TalosUpgrades/|KubernetesUpgrades/|MaintenanceUpgrade/|ClusterTemplate/|ScaleUpAndDownAutoProvisionMachineSets/
|
||||
TALEMU_TEST_ARGS: --test.run TestIntegration/Suites/(ImmediateClusterDestruction|EncryptedCluster|SinglenodeCluster|ScaleUpAndDown|ScaleUpAndDownMachineClassBasedMachineSets|TalosUpgrades|KubernetesUpgrades|MaintenanceUpgrade|ClusterTemplate|ScaleUpAndDownAutoProvisionMachineSets)$
|
||||
WITH_DEBUG: "true"
|
||||
run: |
|
||||
sudo -E make run-integration-test
|
||||
|
@ -1,6 +1,6 @@
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2025-05-22T17:54:46Z by kres 9f64b0d.
|
||||
# Generated on 2025-06-02T21:18:31Z by kres 99b55ad-dirty.
|
||||
|
||||
name: e2e-short-secureboot-cron
|
||||
concurrency:
|
||||
@ -62,9 +62,9 @@ jobs:
|
||||
env:
|
||||
ENABLE_SECUREBOOT: "true"
|
||||
INTEGRATION_RUN_E2E_TEST: "false"
|
||||
INTEGRATION_TEST_ARGS: --test.run CleanState/|TalosImageGeneration/|ImmediateClusterDestruction/|DefaultCluster/|EncryptedCluster/|SinglenodeCluster/|Auth/
|
||||
INTEGRATION_TEST_ARGS: --test.run TestIntegration/Suites/(CleanState|TalosImageGeneration|ImmediateClusterDestruction|DefaultCluster|EncryptedCluster|SinglenodeCluster|Auth)$
|
||||
RUN_TALEMU_TESTS: "false"
|
||||
TALEMU_TEST_ARGS: --test.run ImmediateClusterDestruction/|EncryptedCluster/|SinglenodeCluster/|ScaleUpAndDown/|ScaleUpAndDownMachineClassBasedMachineSets/|TalosUpgrades/|KubernetesUpgrades/|MaintenanceUpgrade/|ClusterTemplate/|ScaleUpAndDownAutoProvisionMachineSets/
|
||||
TALEMU_TEST_ARGS: --test.run TestIntegration/Suites/(ImmediateClusterDestruction|EncryptedCluster|SinglenodeCluster|ScaleUpAndDown|ScaleUpAndDownMachineClassBasedMachineSets|TalosUpgrades|KubernetesUpgrades|MaintenanceUpgrade|ClusterTemplate|ScaleUpAndDownAutoProvisionMachineSets)$
|
||||
WITH_DEBUG: "true"
|
||||
run: |
|
||||
sudo -E make run-integration-test
|
||||
|
6
.github/workflows/e2e-templates-cron.yaml
vendored
6
.github/workflows/e2e-templates-cron.yaml
vendored
@ -1,6 +1,6 @@
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2025-05-22T17:54:46Z by kres 9f64b0d.
|
||||
# Generated on 2025-06-02T21:18:31Z by kres 99b55ad-dirty.
|
||||
|
||||
name: e2e-templates-cron
|
||||
concurrency:
|
||||
@ -61,9 +61,9 @@ jobs:
|
||||
- name: run-integration-test
|
||||
env:
|
||||
INTEGRATION_RUN_E2E_TEST: "false"
|
||||
INTEGRATION_TEST_ARGS: --test.run CleanState/|ClusterTemplate/
|
||||
INTEGRATION_TEST_ARGS: --test.run TestIntegration/Suites/(CleanState|ClusterTemplate)$
|
||||
RUN_TALEMU_TESTS: "false"
|
||||
TALEMU_TEST_ARGS: --test.run ImmediateClusterDestruction/|EncryptedCluster/|SinglenodeCluster/|ScaleUpAndDown/|ScaleUpAndDownMachineClassBasedMachineSets/|TalosUpgrades/|KubernetesUpgrades/|MaintenanceUpgrade/|ClusterTemplate/|ScaleUpAndDownAutoProvisionMachineSets/
|
||||
TALEMU_TEST_ARGS: --test.run TestIntegration/Suites/(ImmediateClusterDestruction|EncryptedCluster|SinglenodeCluster|ScaleUpAndDown|ScaleUpAndDownMachineClassBasedMachineSets|TalosUpgrades|KubernetesUpgrades|MaintenanceUpgrade|ClusterTemplate|ScaleUpAndDownAutoProvisionMachineSets)$
|
||||
WITH_DEBUG: "true"
|
||||
run: |
|
||||
sudo -E make run-integration-test
|
||||
|
6
.github/workflows/e2e-upgrades-cron.yaml
vendored
6
.github/workflows/e2e-upgrades-cron.yaml
vendored
@ -1,6 +1,6 @@
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2025-05-22T17:54:46Z by kres 9f64b0d.
|
||||
# Generated on 2025-06-02T21:18:31Z by kres 99b55ad-dirty.
|
||||
|
||||
name: e2e-upgrades-cron
|
||||
concurrency:
|
||||
@ -61,9 +61,9 @@ jobs:
|
||||
- name: run-integration-test
|
||||
env:
|
||||
INTEGRATION_RUN_E2E_TEST: "false"
|
||||
INTEGRATION_TEST_ARGS: --test.run CleanState/|TalosUpgrades/|KubernetesUpgrades/|MaintenanceUpgrade/
|
||||
INTEGRATION_TEST_ARGS: --test.run TestIntegration/Suites/(CleanState|TalosUpgrades|KubernetesUpgrades|MaintenanceUpgrade)$
|
||||
RUN_TALEMU_TESTS: "false"
|
||||
TALEMU_TEST_ARGS: --test.run ImmediateClusterDestruction/|EncryptedCluster/|SinglenodeCluster/|ScaleUpAndDown/|ScaleUpAndDownMachineClassBasedMachineSets/|TalosUpgrades/|KubernetesUpgrades/|MaintenanceUpgrade/|ClusterTemplate/|ScaleUpAndDownAutoProvisionMachineSets/
|
||||
TALEMU_TEST_ARGS: --test.run TestIntegration/Suites/(ImmediateClusterDestruction|EncryptedCluster|SinglenodeCluster|ScaleUpAndDown|ScaleUpAndDownMachineClassBasedMachineSets|TalosUpgrades|KubernetesUpgrades|MaintenanceUpgrade|ClusterTemplate|ScaleUpAndDownAutoProvisionMachineSets)$
|
||||
WITH_DEBUG: "true"
|
||||
run: |
|
||||
sudo -E make run-integration-test
|
||||
|
@ -1,6 +1,6 @@
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2025-05-22T17:54:46Z by kres 9f64b0d.
|
||||
# Generated on 2025-06-02T21:18:31Z by kres 99b55ad-dirty.
|
||||
|
||||
name: e2e-workload-proxy-cron
|
||||
concurrency:
|
||||
@ -61,9 +61,9 @@ jobs:
|
||||
- name: run-integration-test
|
||||
env:
|
||||
INTEGRATION_RUN_E2E_TEST: "false"
|
||||
INTEGRATION_TEST_ARGS: --test.run CleanState/|WorkloadProxy
|
||||
INTEGRATION_TEST_ARGS: --test.run TestIntegration/Suites/(CleanState|WorkloadProxy)$
|
||||
RUN_TALEMU_TESTS: "false"
|
||||
TALEMU_TEST_ARGS: --test.run ImmediateClusterDestruction/|EncryptedCluster/|SinglenodeCluster/|ScaleUpAndDown/|ScaleUpAndDownMachineClassBasedMachineSets/|TalosUpgrades/|KubernetesUpgrades/|MaintenanceUpgrade/|ClusterTemplate/|ScaleUpAndDownAutoProvisionMachineSets/
|
||||
TALEMU_TEST_ARGS: --test.run TestIntegration/Suites/(ImmediateClusterDestruction|EncryptedCluster|SinglenodeCluster|ScaleUpAndDown|ScaleUpAndDownMachineClassBasedMachineSets|TalosUpgrades|KubernetesUpgrades|MaintenanceUpgrade|ClusterTemplate|ScaleUpAndDownAutoProvisionMachineSets)$
|
||||
WITH_DEBUG: "true"
|
||||
run: |
|
||||
sudo -E make run-integration-test
|
||||
|
2
.github/workflows/helm.yaml
vendored
2
.github/workflows/helm.yaml
vendored
@ -1,6 +1,6 @@
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2025-05-07T09:19:30Z by kres 5ad3e5f.
|
||||
# Generated on 2025-06-02T21:18:31Z by kres 99b55ad-dirty.
|
||||
|
||||
name: helm
|
||||
concurrency:
|
||||
|
2
.github/workflows/slack-notify.yaml
vendored
2
.github/workflows/slack-notify.yaml
vendored
@ -1,6 +1,6 @@
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2024-11-28T14:39:19Z by kres 232fe63.
|
||||
# Generated on 2025-05-30T19:29:08Z by kres 9f64b0d-dirty.
|
||||
|
||||
name: slack-notify
|
||||
"on":
|
||||
|
@ -1,6 +1,6 @@
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2025-05-05T10:42:38Z by kres 1a0156b.
|
||||
# Generated on 2025-05-30T19:29:08Z by kres 9f64b0d-dirty.
|
||||
|
||||
version: "2"
|
||||
|
||||
|
63
.kres.yaml
63
.kres.yaml
@ -9,11 +9,36 @@ name: acompat
|
||||
spec:
|
||||
disableImage: true
|
||||
---
|
||||
kind: auto.CommandConfig
|
||||
name: make-cookies
|
||||
spec:
|
||||
disableImage: true
|
||||
---
|
||||
kind: auto.Helm
|
||||
spec:
|
||||
enabled: true
|
||||
chartDir: deploy/helm/omni
|
||||
---
|
||||
kind: auto.IntegrationTests
|
||||
spec:
|
||||
tests:
|
||||
- path: internal/integration
|
||||
name: integration-test
|
||||
enableDockerImage: true
|
||||
outputs:
|
||||
linux-amd64:
|
||||
GOOS: linux
|
||||
GOARCH: amd64
|
||||
linux-arm64:
|
||||
GOOS: linux
|
||||
GOARCH: arm64
|
||||
darwin-amd64:
|
||||
GOOS: darwin
|
||||
GOARCH: amd64
|
||||
darwin-arm64:
|
||||
GOOS: darwin
|
||||
GOARCH: arm64
|
||||
---
|
||||
kind: common.GHWorkflow
|
||||
spec:
|
||||
customRunners:
|
||||
@ -187,8 +212,8 @@ spec:
|
||||
environment:
|
||||
WITH_DEBUG: "true"
|
||||
INTEGRATION_RUN_E2E_TEST: "true"
|
||||
INTEGRATION_TEST_ARGS: "--test.run CleanState/|Auth/|DefaultCluster/|CLICommands/"
|
||||
TALEMU_TEST_ARGS: "--test.run ImmediateClusterDestruction/|EncryptedCluster/|SinglenodeCluster/|ScaleUpAndDown/|ScaleUpAndDownMachineClassBasedMachineSets/|TalosUpgrades/|KubernetesUpgrades/|MaintenanceUpgrade/|ClusterTemplate/|ScaleUpAndDownAutoProvisionMachineSets/"
|
||||
INTEGRATION_TEST_ARGS: "--test.run TestIntegration/Suites/(CleanState|Auth|DefaultCluster|CLICommands)$"
|
||||
TALEMU_TEST_ARGS: "--test.run TestIntegration/Suites/(ImmediateClusterDestruction|EncryptedCluster|SinglenodeCluster|ScaleUpAndDown|ScaleUpAndDownMachineClassBasedMachineSets|TalosUpgrades|KubernetesUpgrades|MaintenanceUpgrade|ClusterTemplate|ScaleUpAndDownAutoProvisionMachineSets)$"
|
||||
RUN_TALEMU_TESTS: true
|
||||
jobs:
|
||||
- name: e2e-short-secureboot
|
||||
@ -200,7 +225,7 @@ spec:
|
||||
- integration/e2e-short-secureboot
|
||||
environmentOverride:
|
||||
INTEGRATION_RUN_E2E_TEST: "false"
|
||||
INTEGRATION_TEST_ARGS: "--test.run CleanState/|TalosImageGeneration/|ImmediateClusterDestruction/|DefaultCluster/|EncryptedCluster/|SinglenodeCluster/|Auth/"
|
||||
INTEGRATION_TEST_ARGS: "--test.run TestIntegration/Suites/(CleanState|TalosImageGeneration|ImmediateClusterDestruction|DefaultCluster|EncryptedCluster|SinglenodeCluster|Auth)$"
|
||||
RUN_TALEMU_TESTS: false
|
||||
ENABLE_SECUREBOOT: true
|
||||
- name: e2e-short
|
||||
@ -213,7 +238,7 @@ spec:
|
||||
- integration/e2e-short
|
||||
environmentOverride:
|
||||
INTEGRATION_RUN_E2E_TEST: "false"
|
||||
INTEGRATION_TEST_ARGS: "--test.run CleanState/|TalosImageGeneration/|ImmediateClusterDestruction/|DefaultCluster/|EncryptedCluster/|SinglenodeCluster/|Auth/"
|
||||
INTEGRATION_TEST_ARGS: "--test.run TestIntegration/Suites/(CleanState|TalosImageGeneration|ImmediateClusterDestruction|DefaultCluster|EncryptedCluster|SinglenodeCluster|Auth)$"
|
||||
RUN_TALEMU_TESTS: false
|
||||
- name: e2e-scaling
|
||||
crons:
|
||||
@ -225,7 +250,7 @@ spec:
|
||||
- integration/e2e-scaling
|
||||
environmentOverride:
|
||||
INTEGRATION_RUN_E2E_TEST: "false"
|
||||
INTEGRATION_TEST_ARGS: "--test.run CleanState/|ScaleUpAndDown/|ScaleUpAndDownMachineClassBasedMachineSets/|RollingUpdateParallelism"
|
||||
INTEGRATION_TEST_ARGS: "--test.run TestIntegration/Suites/(CleanState|ScaleUpAndDown|ScaleUpAndDownMachineClassBasedMachineSets|RollingUpdateParallelism)$"
|
||||
RUN_TALEMU_TESTS: false
|
||||
- name: e2e-forced-removal
|
||||
crons:
|
||||
@ -237,7 +262,7 @@ spec:
|
||||
- integration/e2e-forced-removal
|
||||
environmentOverride:
|
||||
INTEGRATION_RUN_E2E_TEST: "false"
|
||||
INTEGRATION_TEST_ARGS: "--test.run CleanState/|ForcedMachineRemoval/|ReplaceControlPlanes/|ConfigPatching/|KubernetesNodeAudit/"
|
||||
INTEGRATION_TEST_ARGS: "--test.run TestIntegration/Suites/(CleanState|ForcedMachineRemoval|ReplaceControlPlanes|ConfigPatching|KubernetesNodeAudit)$"
|
||||
RUN_TALEMU_TESTS: false
|
||||
- name: e2e-upgrades
|
||||
crons:
|
||||
@ -249,7 +274,7 @@ spec:
|
||||
- integration/e2e-upgrades
|
||||
environmentOverride:
|
||||
INTEGRATION_RUN_E2E_TEST: "false"
|
||||
INTEGRATION_TEST_ARGS: "--test.run CleanState/|TalosUpgrades/|KubernetesUpgrades/|MaintenanceUpgrade/"
|
||||
INTEGRATION_TEST_ARGS: "--test.run TestIntegration/Suites/(CleanState|TalosUpgrades|KubernetesUpgrades|MaintenanceUpgrade)$"
|
||||
RUN_TALEMU_TESTS: false
|
||||
- name: e2e-templates
|
||||
crons:
|
||||
@ -261,7 +286,7 @@ spec:
|
||||
- integration/e2e-templates
|
||||
environmentOverride:
|
||||
INTEGRATION_RUN_E2E_TEST: "false"
|
||||
INTEGRATION_TEST_ARGS: "--test.run CleanState/|ClusterTemplate/"
|
||||
INTEGRATION_TEST_ARGS: "--test.run TestIntegration/Suites/(CleanState|ClusterTemplate)$"
|
||||
RUN_TALEMU_TESTS: false
|
||||
- name: e2e-backups
|
||||
crons:
|
||||
@ -273,7 +298,7 @@ spec:
|
||||
- integration/e2e-backups
|
||||
environmentOverride:
|
||||
INTEGRATION_RUN_E2E_TEST: "false"
|
||||
INTEGRATION_TEST_ARGS: "--test.run CleanState/|EtcdBackupAndRestore"
|
||||
INTEGRATION_TEST_ARGS: "--test.run TestIntegration/Suites/(CleanState|EtcdBackupAndRestore)$"
|
||||
RUN_TALEMU_TESTS: false
|
||||
- name: e2e-workload-proxy
|
||||
crons:
|
||||
@ -285,7 +310,7 @@ spec:
|
||||
- integration/e2e-workload-proxy
|
||||
environmentOverride:
|
||||
INTEGRATION_RUN_E2E_TEST: "false"
|
||||
INTEGRATION_TEST_ARGS: "--test.run CleanState/|WorkloadProxy"
|
||||
INTEGRATION_TEST_ARGS: "--test.run TestIntegration/Suites/(CleanState|WorkloadProxy)$"
|
||||
RUN_TALEMU_TESTS: false
|
||||
---
|
||||
kind: common.Build
|
||||
@ -348,17 +373,6 @@ spec:
|
||||
GOOS: windows
|
||||
GOARCH: amd64
|
||||
---
|
||||
kind: golang.Build
|
||||
name: integration-test
|
||||
spec:
|
||||
outputs:
|
||||
linux-amd64:
|
||||
GOOS: linux
|
||||
GOARCH: amd64
|
||||
linux-arm64:
|
||||
GOOS: linux
|
||||
GOARCH: arm64
|
||||
---
|
||||
kind: golang.Generate
|
||||
spec:
|
||||
versionPackagePath: internal/version
|
||||
@ -529,13 +543,6 @@ spec:
|
||||
- omni-*
|
||||
---
|
||||
kind: common.Image
|
||||
name: image-integration-test
|
||||
spec:
|
||||
extraEnvironment:
|
||||
PLATFORM: linux/amd64,linux/arm64
|
||||
imageName: "omni-integration-test"
|
||||
---
|
||||
kind: common.Image
|
||||
name: image-omni
|
||||
spec:
|
||||
extraEnvironment:
|
||||
|
68
Dockerfile
68
Dockerfile
@ -2,7 +2,7 @@
|
||||
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2025-05-20T20:30:25Z by kres 9f64b0d.
|
||||
# Generated on 2025-06-02T21:18:31Z by kres 99b55ad-dirty.
|
||||
|
||||
ARG JS_TOOLCHAIN
|
||||
ARG TOOLCHAIN
|
||||
@ -29,6 +29,7 @@ COPY ./CHANGELOG.md ./CHANGELOG.md
|
||||
COPY ./CONTRIBUTING.md ./CONTRIBUTING.md
|
||||
COPY ./DEVELOPMENT.md ./DEVELOPMENT.md
|
||||
COPY ./README.md ./README.md
|
||||
COPY ./SECURITY.md ./SECURITY.md
|
||||
RUN bunx markdownlint --ignore "CHANGELOG.md" --ignore "**/node_modules/**" --ignore '**/hack/chglog/**' --rules sentences-per-line .
|
||||
|
||||
# collects proto specs
|
||||
@ -337,29 +338,65 @@ ARG SHA
|
||||
ARG TAG
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build,id=omni/root/.cache/go-build --mount=type=cache,target=/go/pkg,id=omni/go/pkg go build ${GO_BUILDFLAGS} -ldflags "${GO_LDFLAGS} -X ${VERSION_PKG}.Name=acompat -X ${VERSION_PKG}.SHA=${SHA} -X ${VERSION_PKG}.Tag=${TAG}" -o /acompat-linux-amd64
|
||||
|
||||
# builds integration-test-linux-amd64
|
||||
FROM base AS integration-test-linux-amd64-build
|
||||
# builds integration-test-darwin-amd64
|
||||
FROM base AS integration-test-darwin-amd64-build
|
||||
COPY --from=generate / /
|
||||
COPY --from=embed-generate / /
|
||||
WORKDIR /src/cmd/integration-test
|
||||
WORKDIR /src/internal/integration
|
||||
ARG GO_BUILDFLAGS
|
||||
ARG GO_LDFLAGS
|
||||
ARG VERSION_PKG="internal/version"
|
||||
ARG SHA
|
||||
ARG TAG
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build,id=omni/root/.cache/go-build --mount=type=cache,target=/go/pkg,id=omni/go/pkg GOARCH=amd64 GOOS=linux go build ${GO_BUILDFLAGS} -ldflags "${GO_LDFLAGS} -X ${VERSION_PKG}.Name=integration-test -X ${VERSION_PKG}.SHA=${SHA} -X ${VERSION_PKG}.Tag=${TAG}" -o /integration-test-linux-amd64
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build,id=omni/root/.cache/go-build --mount=type=cache,target=/go/pkg,id=omni/go/pkg GOARCH=amd64 GOOS=darwin go test -c -covermode=atomic -tags integration,sidero.debug -ldflags "${GO_LDFLAGS} -X ${VERSION_PKG}.Name=integration-test -X ${VERSION_PKG}.SHA=${SHA} -X ${VERSION_PKG}.Tag=${TAG}" -o /integration-test-darwin-amd64
|
||||
|
||||
# builds integration-test-darwin-arm64
|
||||
FROM base AS integration-test-darwin-arm64-build
|
||||
COPY --from=generate / /
|
||||
COPY --from=embed-generate / /
|
||||
WORKDIR /src/internal/integration
|
||||
ARG GO_BUILDFLAGS
|
||||
ARG GO_LDFLAGS
|
||||
ARG VERSION_PKG="internal/version"
|
||||
ARG SHA
|
||||
ARG TAG
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build,id=omni/root/.cache/go-build --mount=type=cache,target=/go/pkg,id=omni/go/pkg GOARCH=arm64 GOOS=darwin go test -c -covermode=atomic -tags integration,sidero.debug -ldflags "${GO_LDFLAGS} -X ${VERSION_PKG}.Name=integration-test -X ${VERSION_PKG}.SHA=${SHA} -X ${VERSION_PKG}.Tag=${TAG}" -o /integration-test-darwin-arm64
|
||||
|
||||
# builds integration-test-linux-amd64
|
||||
FROM base AS integration-test-linux-amd64-build
|
||||
COPY --from=generate / /
|
||||
COPY --from=embed-generate / /
|
||||
WORKDIR /src/internal/integration
|
||||
ARG GO_BUILDFLAGS
|
||||
ARG GO_LDFLAGS
|
||||
ARG VERSION_PKG="internal/version"
|
||||
ARG SHA
|
||||
ARG TAG
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build,id=omni/root/.cache/go-build --mount=type=cache,target=/go/pkg,id=omni/go/pkg GOARCH=amd64 GOOS=linux go test -c -covermode=atomic -tags integration,sidero.debug -ldflags "${GO_LDFLAGS} -X ${VERSION_PKG}.Name=integration-test -X ${VERSION_PKG}.SHA=${SHA} -X ${VERSION_PKG}.Tag=${TAG}" -o /integration-test-linux-amd64
|
||||
|
||||
# builds integration-test-linux-arm64
|
||||
FROM base AS integration-test-linux-arm64-build
|
||||
COPY --from=generate / /
|
||||
COPY --from=embed-generate / /
|
||||
WORKDIR /src/cmd/integration-test
|
||||
WORKDIR /src/internal/integration
|
||||
ARG GO_BUILDFLAGS
|
||||
ARG GO_LDFLAGS
|
||||
ARG VERSION_PKG="internal/version"
|
||||
ARG SHA
|
||||
ARG TAG
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build,id=omni/root/.cache/go-build --mount=type=cache,target=/go/pkg,id=omni/go/pkg GOARCH=arm64 GOOS=linux go build ${GO_BUILDFLAGS} -ldflags "${GO_LDFLAGS} -X ${VERSION_PKG}.Name=integration-test -X ${VERSION_PKG}.SHA=${SHA} -X ${VERSION_PKG}.Tag=${TAG}" -o /integration-test-linux-arm64
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build,id=omni/root/.cache/go-build --mount=type=cache,target=/go/pkg,id=omni/go/pkg GOARCH=arm64 GOOS=linux go test -c -covermode=atomic -tags integration,sidero.debug -ldflags "${GO_LDFLAGS} -X ${VERSION_PKG}.Name=integration-test -X ${VERSION_PKG}.SHA=${SHA} -X ${VERSION_PKG}.Tag=${TAG}" -o /integration-test-linux-arm64
|
||||
|
||||
# builds make-cookies-linux-amd64
|
||||
FROM base AS make-cookies-linux-amd64-build
|
||||
COPY --from=generate / /
|
||||
COPY --from=embed-generate / /
|
||||
WORKDIR /src/cmd/make-cookies
|
||||
ARG GO_BUILDFLAGS
|
||||
ARG GO_LDFLAGS
|
||||
ARG VERSION_PKG="internal/version"
|
||||
ARG SHA
|
||||
ARG TAG
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build,id=omni/root/.cache/go-build --mount=type=cache,target=/go/pkg,id=omni/go/pkg go build ${GO_BUILDFLAGS} -ldflags "${GO_LDFLAGS} -X ${VERSION_PKG}.Name=make-cookies -X ${VERSION_PKG}.SHA=${SHA} -X ${VERSION_PKG}.Tag=${TAG}" -o /make-cookies-linux-amd64
|
||||
|
||||
# builds omni-darwin-amd64
|
||||
FROM base AS omni-darwin-amd64-build
|
||||
@ -472,12 +509,21 @@ RUN --mount=type=cache,target=/root/.cache/go-build,id=omni/root/.cache/go-build
|
||||
FROM scratch AS acompat-linux-amd64
|
||||
COPY --from=acompat-linux-amd64-build /acompat-linux-amd64 /acompat-linux-amd64
|
||||
|
||||
FROM scratch AS integration-test-darwin-amd64
|
||||
COPY --from=integration-test-darwin-amd64-build /integration-test-darwin-amd64 /integration-test-darwin-amd64
|
||||
|
||||
FROM scratch AS integration-test-darwin-arm64
|
||||
COPY --from=integration-test-darwin-arm64-build /integration-test-darwin-arm64 /integration-test-darwin-arm64
|
||||
|
||||
FROM scratch AS integration-test-linux-amd64
|
||||
COPY --from=integration-test-linux-amd64-build /integration-test-linux-amd64 /integration-test-linux-amd64
|
||||
|
||||
FROM scratch AS integration-test-linux-arm64
|
||||
COPY --from=integration-test-linux-arm64-build /integration-test-linux-arm64 /integration-test-linux-arm64
|
||||
|
||||
FROM scratch AS make-cookies-linux-amd64
|
||||
COPY --from=make-cookies-linux-amd64-build /make-cookies-linux-amd64 /make-cookies-linux-amd64
|
||||
|
||||
FROM scratch AS omni-darwin-amd64
|
||||
COPY --from=omni-darwin-amd64-build /omni-darwin-amd64 /omni-darwin-amd64
|
||||
|
||||
@ -513,9 +559,16 @@ COPY --from=acompat-linux-amd64 / /
|
||||
FROM integration-test-linux-${TARGETARCH} AS integration-test
|
||||
|
||||
FROM scratch AS integration-test-all
|
||||
COPY --from=integration-test-darwin-amd64 / /
|
||||
COPY --from=integration-test-darwin-arm64 / /
|
||||
COPY --from=integration-test-linux-amd64 / /
|
||||
COPY --from=integration-test-linux-arm64 / /
|
||||
|
||||
FROM make-cookies-linux-${TARGETARCH} AS make-cookies
|
||||
|
||||
FROM scratch AS make-cookies-all
|
||||
COPY --from=make-cookies-linux-amd64 / /
|
||||
|
||||
FROM omni-linux-${TARGETARCH} AS omni
|
||||
|
||||
FROM scratch AS omni-all
|
||||
@ -536,7 +589,6 @@ COPY --from=omnictl-windows-amd64.exe / /
|
||||
FROM scratch AS image-integration-test
|
||||
ARG TARGETARCH
|
||||
COPY --from=integration-test integration-test-linux-${TARGETARCH} /integration-test
|
||||
COPY --from=integration-test integration-test-linux-${TARGETARCH} /integration-test
|
||||
COPY --from=image-fhs / /
|
||||
COPY --from=image-ca-certificates / /
|
||||
LABEL org.opencontainers.image.source=https://github.com/siderolabs/omni
|
||||
|
78
Makefile
78
Makefile
@ -1,6 +1,6 @@
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2025-05-20T20:30:25Z by kres 9f64b0d.
|
||||
# Generated on 2025-06-02T21:18:31Z by kres 99b55ad-dirty.
|
||||
|
||||
# common variables
|
||||
|
||||
@ -148,7 +148,7 @@ else
|
||||
GO_LDFLAGS += -s
|
||||
endif
|
||||
|
||||
all: unit-tests-frontend lint-eslint frontend unit-tests-client unit-tests acompat integration-test image-integration-test omni image-omni omnictl helm lint
|
||||
all: unit-tests-frontend lint-eslint frontend unit-tests-client unit-tests acompat make-cookies omni image-omni omnictl helm integration-test image-integration-test lint
|
||||
|
||||
$(ARTIFACTS): ## Creates artifacts directory.
|
||||
@mkdir -p $(ARTIFACTS)
|
||||
@ -256,33 +256,15 @@ acompat-linux-amd64: $(ARTIFACTS)/acompat-linux-amd64 ## Builds executable for
|
||||
.PHONY: acompat
|
||||
acompat: acompat-linux-amd64 ## Builds executables for acompat.
|
||||
|
||||
.PHONY: $(ARTIFACTS)/integration-test-linux-amd64
|
||||
$(ARTIFACTS)/integration-test-linux-amd64:
|
||||
@$(MAKE) local-integration-test-linux-amd64 DEST=$(ARTIFACTS)
|
||||
.PHONY: $(ARTIFACTS)/make-cookies-linux-amd64
|
||||
$(ARTIFACTS)/make-cookies-linux-amd64:
|
||||
@$(MAKE) local-make-cookies-linux-amd64 DEST=$(ARTIFACTS)
|
||||
|
||||
.PHONY: integration-test-linux-amd64
|
||||
integration-test-linux-amd64: $(ARTIFACTS)/integration-test-linux-amd64 ## Builds executable for integration-test-linux-amd64.
|
||||
.PHONY: make-cookies-linux-amd64
|
||||
make-cookies-linux-amd64: $(ARTIFACTS)/make-cookies-linux-amd64 ## Builds executable for make-cookies-linux-amd64.
|
||||
|
||||
.PHONY: $(ARTIFACTS)/integration-test-linux-arm64
|
||||
$(ARTIFACTS)/integration-test-linux-arm64:
|
||||
@$(MAKE) local-integration-test-linux-arm64 DEST=$(ARTIFACTS)
|
||||
|
||||
.PHONY: integration-test-linux-arm64
|
||||
integration-test-linux-arm64: $(ARTIFACTS)/integration-test-linux-arm64 ## Builds executable for integration-test-linux-arm64.
|
||||
|
||||
.PHONY: integration-test
|
||||
integration-test: integration-test-linux-amd64 integration-test-linux-arm64 ## Builds executables for integration-test.
|
||||
|
||||
.PHONY: lint-markdown
|
||||
lint-markdown: ## Runs markdownlint.
|
||||
@$(MAKE) target-$@
|
||||
|
||||
.PHONY: lint
|
||||
lint: lint-eslint lint-golangci-lint-client lint-gofumpt-client lint-govulncheck-client lint-golangci-lint lint-gofumpt lint-govulncheck lint-markdown ## Run all linters for the project.
|
||||
|
||||
.PHONY: image-integration-test
|
||||
image-integration-test: ## Builds image for omni-integration-test.
|
||||
@$(MAKE) registry-$@ IMAGE_NAME="omni-integration-test"
|
||||
.PHONY: make-cookies
|
||||
make-cookies: make-cookies-linux-amd64 ## Builds executables for make-cookies.
|
||||
|
||||
.PHONY: $(ARTIFACTS)/omni-darwin-amd64
|
||||
$(ARTIFACTS)/omni-darwin-amd64:
|
||||
@ -315,6 +297,13 @@ omni-linux-arm64: $(ARTIFACTS)/omni-linux-arm64 ## Builds executable for omni-l
|
||||
.PHONY: omni
|
||||
omni: omni-darwin-amd64 omni-darwin-arm64 omni-linux-amd64 omni-linux-arm64 ## Builds executables for omni.
|
||||
|
||||
.PHONY: lint-markdown
|
||||
lint-markdown: ## Runs markdownlint.
|
||||
@$(MAKE) target-$@
|
||||
|
||||
.PHONY: lint
|
||||
lint: lint-eslint lint-golangci-lint-client lint-gofumpt-client lint-govulncheck-client lint-golangci-lint lint-gofumpt lint-govulncheck lint-markdown ## Run all linters for the project.
|
||||
|
||||
.PHONY: image-omni
|
||||
image-omni: ## Builds image for omni.
|
||||
@$(MAKE) registry-$@ IMAGE_NAME="omni"
|
||||
@ -366,6 +355,41 @@ helm-release: helm ## Release helm chart
|
||||
@helm push $(ARTIFACTS)/omni-*.tgz oci://$(HELMREPO) 2>&1 | tee $(ARTIFACTS)/.digest
|
||||
@cosign sign --yes $(COSING_ARGS) $(HELMREPO)/omni@$$(cat $(ARTIFACTS)/.digest | awk -F "[, ]+" '/Digest/{print $$NF}')
|
||||
|
||||
.PHONY: $(ARTIFACTS)/integration-test-darwin-amd64
|
||||
$(ARTIFACTS)/integration-test-darwin-amd64:
|
||||
@$(MAKE) local-integration-test-darwin-amd64 DEST=$(ARTIFACTS)
|
||||
|
||||
.PHONY: integration-test-darwin-amd64
|
||||
integration-test-darwin-amd64: $(ARTIFACTS)/integration-test-darwin-amd64 ## Builds executable for integration-test-darwin-amd64.
|
||||
|
||||
.PHONY: $(ARTIFACTS)/integration-test-darwin-arm64
|
||||
$(ARTIFACTS)/integration-test-darwin-arm64:
|
||||
@$(MAKE) local-integration-test-darwin-arm64 DEST=$(ARTIFACTS)
|
||||
|
||||
.PHONY: integration-test-darwin-arm64
|
||||
integration-test-darwin-arm64: $(ARTIFACTS)/integration-test-darwin-arm64 ## Builds executable for integration-test-darwin-arm64.
|
||||
|
||||
.PHONY: $(ARTIFACTS)/integration-test-linux-amd64
|
||||
$(ARTIFACTS)/integration-test-linux-amd64:
|
||||
@$(MAKE) local-integration-test-linux-amd64 DEST=$(ARTIFACTS)
|
||||
|
||||
.PHONY: integration-test-linux-amd64
|
||||
integration-test-linux-amd64: $(ARTIFACTS)/integration-test-linux-amd64 ## Builds executable for integration-test-linux-amd64.
|
||||
|
||||
.PHONY: $(ARTIFACTS)/integration-test-linux-arm64
|
||||
$(ARTIFACTS)/integration-test-linux-arm64:
|
||||
@$(MAKE) local-integration-test-linux-arm64 DEST=$(ARTIFACTS)
|
||||
|
||||
.PHONY: integration-test-linux-arm64
|
||||
integration-test-linux-arm64: $(ARTIFACTS)/integration-test-linux-arm64 ## Builds executable for integration-test-linux-arm64.
|
||||
|
||||
.PHONY: integration-test
|
||||
integration-test: integration-test-darwin-amd64 integration-test-darwin-arm64 integration-test-linux-amd64 integration-test-linux-arm64 ## Builds executables for integration-test.
|
||||
|
||||
.PHONY: image-integration-test
|
||||
image-integration-test: ## Builds image for integration-test.
|
||||
@$(MAKE) registry-$@ IMAGE_NAME="integration-test"
|
||||
|
||||
.PHONY: dev-server
|
||||
dev-server:
|
||||
hack/dev-server.sh
|
||||
|
10
SECURITY.md
10
SECURITY.md
@ -1,16 +1,16 @@
|
||||
|
||||
|
||||
# Reporting Security Issues
|
||||
|
||||
We appreciate your efforts to disclose your findings responsibly, and will make every effort to acknowledge your contributions.
|
||||
|
||||
To report a security issue, please use the GitHub Security Advisory ["Report a Vulnerability"](https://github.com/siderolabs/omni/security/advisories/new) tab.
|
||||
|
||||
The Sidero Labs team will send a response indicating the next steps in handling your report. After the initial response to your report, the security team will keep you informed of the progress toward a fix and a full announcement and may request additional information or guidance. The expected response time is within 3 business days, and the fix is expected to be delivered within 30 days.
|
||||
The Sidero Labs team will send a response indicating the next steps in handling your report.
|
||||
After the initial response to your report, the security team will keep you informed of the progress toward a fix and a full announcement and may request additional information or guidance.
|
||||
The expected response time is within 3 business days, and the fix is expected to be delivered within 30 days.
|
||||
|
||||
## Supported Releases
|
||||
|
||||
The Sidero Labs team will only provide security updates for the two latest minor releases of Omni, unless you have a support contract that specifies otherwise. If you are using an older version of Omni, we recommend upgrading to the latest release.
|
||||
The Sidero Labs team will only provide security updates for the two latest minor releases of Omni, unless you have a support contract that specifies otherwise.
|
||||
If you are using an older version of Omni, we recommend upgrading to the latest release.
|
||||
|
||||
For example, if the latest release is `v0.49.1`, the supported releases are `v0.48.x` and `v0.49.x`.
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2025-05-05T10:42:38Z by kres 1a0156b.
|
||||
# Generated on 2025-05-30T19:29:08Z by kres 9f64b0d-dirty.
|
||||
|
||||
version: "2"
|
||||
|
||||
|
@ -1,20 +0,0 @@
|
||||
// Copyright (c) 2025 Sidero Labs, Inc.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
// Package main provides the entrypoint for the omni-integration-test binary.
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
_ "github.com/siderolabs/omni/cmd/acompat" // this package should always be imported first for init->set env to work
|
||||
"github.com/siderolabs/omni/cmd/integration-test/pkg"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := pkg.RootCmd().Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
@ -1,232 +0,0 @@
|
||||
// Copyright (c) 2025 Sidero Labs, Inc.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
// Package pkg provides the root command for the omni-integration-test binary.
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/mattn/go-shellwords"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/siderolabs/omni/client/pkg/compression"
|
||||
clientconsts "github.com/siderolabs/omni/client/pkg/constants"
|
||||
"github.com/siderolabs/omni/cmd/integration-test/pkg/clientconfig"
|
||||
"github.com/siderolabs/omni/cmd/integration-test/pkg/tests"
|
||||
"github.com/siderolabs/omni/internal/pkg/constants"
|
||||
)
|
||||
|
||||
// rootCmd represents the base command when called without any subcommands.
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "omni-integration-test",
|
||||
Short: "Omni integration test runner.",
|
||||
Long: ``,
|
||||
PersistentPreRunE: func(*cobra.Command, []string) error {
|
||||
return compression.InitConfig(true)
|
||||
},
|
||||
RunE: func(*cobra.Command, []string) error {
|
||||
return withContext(func(ctx context.Context) error {
|
||||
// hacky hack
|
||||
os.Args = append(os.Args[0:1], "-test.v", "-test.parallel", strconv.FormatInt(rootCmdFlags.parallel, 10), "-test.timeout", rootCmdFlags.testsTimeout.String())
|
||||
|
||||
testOptions := tests.Options{
|
||||
RunTestPattern: rootCmdFlags.runTestPattern,
|
||||
|
||||
ExpectedMachines: rootCmdFlags.expectedMachines,
|
||||
CleanupLinks: rootCmdFlags.cleanupLinks,
|
||||
RunStatsCheck: rootCmdFlags.runStatsCheck,
|
||||
SkipExtensionsCheckOnCreate: rootCmdFlags.skipExtensionsCheckOnCreate,
|
||||
|
||||
MachineOptions: rootCmdFlags.machineOptions,
|
||||
AnotherTalosVersion: rootCmdFlags.anotherTalosVersion,
|
||||
AnotherKubernetesVersion: rootCmdFlags.anotherKubernetesVersion,
|
||||
OmnictlPath: rootCmdFlags.omnictlPath,
|
||||
ScalingTimeout: rootCmdFlags.scalingTimeout,
|
||||
OutputDir: rootCmdFlags.outputDir,
|
||||
}
|
||||
|
||||
if rootCmdFlags.provisionConfigFile != "" {
|
||||
f, err := os.Open(rootCmdFlags.provisionConfigFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open provision config file %q: %w", rootCmdFlags.provisionConfigFile, err)
|
||||
}
|
||||
|
||||
decoder := yaml.NewDecoder(f)
|
||||
|
||||
for {
|
||||
var cfg tests.MachineProvisionConfig
|
||||
|
||||
if err = decoder.Decode(&cfg); err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
testOptions.ProvisionConfigs = append(testOptions.ProvisionConfigs, cfg)
|
||||
}
|
||||
} else {
|
||||
testOptions.ProvisionConfigs = append(testOptions.ProvisionConfigs,
|
||||
tests.MachineProvisionConfig{
|
||||
MachineCount: rootCmdFlags.provisionMachinesCount,
|
||||
Provider: tests.MachineProviderConfig{
|
||||
ID: rootCmdFlags.infraProvider,
|
||||
Data: rootCmdFlags.providerData,
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
if rootCmdFlags.restartAMachineScript != "" {
|
||||
parsedScript, err := shellwords.Parse(rootCmdFlags.restartAMachineScript)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing restart a machine script: %w", err)
|
||||
}
|
||||
|
||||
testOptions.RestartAMachineFunc = func(ctx context.Context, uuid string) error {
|
||||
return execCmd(ctx, parsedScript, uuid)
|
||||
}
|
||||
}
|
||||
|
||||
if rootCmdFlags.wipeAMachineScript != "" {
|
||||
parsedScript, err := shellwords.Parse(rootCmdFlags.wipeAMachineScript)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing wipe a machine script: %w", err)
|
||||
}
|
||||
|
||||
testOptions.WipeAMachineFunc = func(ctx context.Context, uuid string) error {
|
||||
return execCmd(ctx, parsedScript, uuid)
|
||||
}
|
||||
}
|
||||
|
||||
if rootCmdFlags.freezeAMachineScript != "" {
|
||||
parsedScript, err := shellwords.Parse(rootCmdFlags.freezeAMachineScript)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing freeze a machine script: %w", err)
|
||||
}
|
||||
|
||||
testOptions.FreezeAMachineFunc = func(ctx context.Context, uuid string) error {
|
||||
return execCmd(ctx, parsedScript, uuid)
|
||||
}
|
||||
}
|
||||
|
||||
u, err := url.Parse(rootCmdFlags.endpoint)
|
||||
if err != nil {
|
||||
return errors.New("error parsing endpoint")
|
||||
}
|
||||
|
||||
if u.Scheme == "grpc" {
|
||||
u.Scheme = "http"
|
||||
}
|
||||
|
||||
testOptions.HTTPEndpoint = u.String()
|
||||
|
||||
clientConfig := clientconfig.New(rootCmdFlags.endpoint)
|
||||
defer clientConfig.Close() //nolint:errcheck
|
||||
|
||||
return tests.Run(ctx, clientConfig, testOptions)
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
func execCmd(ctx context.Context, parsedScript []string, args ...string) error {
|
||||
cmd := exec.CommandContext(ctx, parsedScript[0], append(parsedScript[1:], args...)...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
//nolint:govet
|
||||
var rootCmdFlags struct {
|
||||
endpoint string
|
||||
runTestPattern string
|
||||
infraProvider string
|
||||
providerData string
|
||||
|
||||
provisionMachinesCount int
|
||||
expectedMachines int
|
||||
expectedBareMetalMachines int
|
||||
parallel int64
|
||||
cleanupLinks bool
|
||||
runStatsCheck bool
|
||||
skipExtensionsCheckOnCreate bool
|
||||
|
||||
testsTimeout time.Duration
|
||||
scalingTimeout time.Duration
|
||||
|
||||
restartAMachineScript string
|
||||
wipeAMachineScript string
|
||||
freezeAMachineScript string
|
||||
anotherTalosVersion string
|
||||
anotherKubernetesVersion string
|
||||
omnictlPath string
|
||||
provisionConfigFile string
|
||||
outputDir string
|
||||
|
||||
machineOptions tests.MachineOptions
|
||||
}
|
||||
|
||||
// RootCmd returns the root command.
|
||||
func RootCmd() *cobra.Command { return onceInit() }
|
||||
|
||||
var onceInit = sync.OnceValue(func() *cobra.Command {
|
||||
rootCmd.PersistentFlags().StringVar(&rootCmdFlags.endpoint, "endpoint", "grpc://127.0.0.1:8080", "The endpoint of the Omni API.")
|
||||
rootCmd.Flags().StringVar(&rootCmdFlags.runTestPattern, "test.run", "", "tests to run (regular expression)")
|
||||
rootCmd.Flags().IntVar(&rootCmdFlags.expectedMachines, "expected-machines", 4, "minimum number of machines expected")
|
||||
rootCmd.Flags().StringVar(&rootCmdFlags.restartAMachineScript, "restart-a-machine-script", "hack/test/restart-a-vm.sh", "a script to run to restart a machine by UUID (optional)")
|
||||
rootCmd.Flags().StringVar(&rootCmdFlags.wipeAMachineScript, "wipe-a-machine-script", "hack/test/wipe-a-vm.sh", "a script to run to wipe a machine by UUID (optional)")
|
||||
rootCmd.Flags().StringVar(&rootCmdFlags.freezeAMachineScript, "freeze-a-machine-script", "hack/test/freeze-a-vm.sh", "a script to run to freeze a machine by UUID (optional)")
|
||||
rootCmd.Flags().StringVar(&rootCmdFlags.omnictlPath, "omnictl-path", "", "omnictl CLI script path (optional)")
|
||||
rootCmd.Flags().StringVar(&rootCmdFlags.anotherTalosVersion, "another-talos-version",
|
||||
constants.AnotherTalosVersion,
|
||||
"Talos version for upgrade test",
|
||||
)
|
||||
rootCmd.Flags().StringVar(
|
||||
&rootCmdFlags.machineOptions.TalosVersion,
|
||||
"talos-version",
|
||||
clientconsts.DefaultTalosVersion,
|
||||
"installer version for workload clusters",
|
||||
)
|
||||
rootCmd.Flags().StringVar(&rootCmdFlags.machineOptions.KubernetesVersion, "kubernetes-version", constants.DefaultKubernetesVersion, "Kubernetes version for workload clusters")
|
||||
rootCmd.Flags().StringVar(&rootCmdFlags.anotherKubernetesVersion, "another-kubernetes-version", constants.AnotherKubernetesVersion, "Kubernetes version for upgrade tests")
|
||||
rootCmd.Flags().Int64VarP(&rootCmdFlags.parallel, "parallel", "p", 4, "tests parallelism")
|
||||
rootCmd.Flags().DurationVarP(&rootCmdFlags.testsTimeout, "timeout", "t", time.Hour, "tests global timeout")
|
||||
rootCmd.Flags().BoolVar(&rootCmdFlags.cleanupLinks, "cleanup-links", false, "remove all links after the tests are complete")
|
||||
rootCmd.Flags().BoolVar(&rootCmdFlags.runStatsCheck, "run-stats-check", false, "runs stats check after the test is complete")
|
||||
rootCmd.Flags().IntVar(&rootCmdFlags.provisionMachinesCount, "provision-machines", 0, "provisions machines through the infrastructure provider")
|
||||
rootCmd.Flags().StringVar(&rootCmdFlags.infraProvider, "infra-provider", "talemu", "use infra provider with the specified ID when provisioning the machines")
|
||||
rootCmd.Flags().StringVar(&rootCmdFlags.providerData, "provider-data", "{}", "the infra provider machine template data to use")
|
||||
rootCmd.Flags().DurationVar(&rootCmdFlags.scalingTimeout, "scale-timeout", time.Second*150, "scale up test timeout")
|
||||
rootCmd.Flags().StringVar(&rootCmdFlags.provisionConfigFile, "provision-config-file", "", "provision machines with the more complicated configuration")
|
||||
rootCmd.Flags().BoolVar(&rootCmdFlags.skipExtensionsCheckOnCreate, "skip-extensions-check-on-create", false,
|
||||
"disables checking for hello-world-service extension on the machine allocation and in the upgrade tests")
|
||||
rootCmd.Flags().StringVar(&rootCmdFlags.outputDir, "output-dir", "/tmp/integration-test", "output directory for the files generated by the test, e.g., the support bundles")
|
||||
|
||||
rootCmd.MarkFlagsMutuallyExclusive("provision-machines", "provision-config-file")
|
||||
|
||||
return rootCmd
|
||||
})
|
||||
|
||||
// withContext wraps with CLI context.
|
||||
func withContext(f func(ctx context.Context) error) error {
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt)
|
||||
defer stop()
|
||||
|
||||
return f(ctx)
|
||||
}
|
@ -1,365 +0,0 @@
|
||||
// Copyright (c) 2025 Sidero Labs, Inc.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/resource/rtestutils"
|
||||
"github.com/cosi-project/runtime/pkg/safe"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/go-retry/retry"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zaptest"
|
||||
|
||||
"github.com/siderolabs/omni/client/api/omni/specs"
|
||||
"github.com/siderolabs/omni/client/pkg/client"
|
||||
"github.com/siderolabs/omni/client/pkg/omni/resources"
|
||||
"github.com/siderolabs/omni/client/pkg/omni/resources/infra"
|
||||
"github.com/siderolabs/omni/client/pkg/omni/resources/omni"
|
||||
"github.com/siderolabs/omni/client/pkg/omni/resources/siderolink"
|
||||
)
|
||||
|
||||
// AssertMachinesShouldBeProvisioned creates a machine request set and waits until all requests are fulfilled.
|
||||
//
|
||||
//nolint:gocognit
|
||||
func AssertMachinesShouldBeProvisioned(testCtx context.Context, client *client.Client, cfg MachineProvisionConfig, machineRequestSetName,
|
||||
talosVersion string,
|
||||
) TestFunc {
|
||||
return func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(testCtx, time.Minute*5)
|
||||
defer cancel()
|
||||
|
||||
rtestutils.AssertResources(ctx, t, client.Omni().State(), []string{cfg.Provider.ID}, func(*infra.ProviderStatus, *assert.Assertions) {})
|
||||
|
||||
machineRequestSet, err := safe.ReaderGetByID[*omni.MachineRequestSet](ctx, client.Omni().State(), machineRequestSetName)
|
||||
|
||||
if !state.IsNotFoundError(err) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
if machineRequestSet != nil {
|
||||
rtestutils.Destroy[*omni.MachineRequestSet](ctx, t, client.Omni().State(), []string{machineRequestSetName})
|
||||
}
|
||||
|
||||
machineRequestSet = omni.NewMachineRequestSet(resources.DefaultNamespace, machineRequestSetName)
|
||||
|
||||
machineRequestSet.TypedSpec().Value.Extensions = []string{
|
||||
"siderolabs/" + HelloWorldServiceExtensionName,
|
||||
}
|
||||
|
||||
machineRequestSet.TypedSpec().Value.ProviderId = cfg.Provider.ID
|
||||
machineRequestSet.TypedSpec().Value.TalosVersion = talosVersion
|
||||
machineRequestSet.TypedSpec().Value.ProviderData = cfg.Provider.Data
|
||||
machineRequestSet.TypedSpec().Value.MachineCount = int32(cfg.MachineCount)
|
||||
|
||||
require.NoError(t, client.Omni().State().Create(ctx, machineRequestSet))
|
||||
|
||||
var resources safe.List[*infra.MachineRequestStatus]
|
||||
|
||||
err = retry.Constant(time.Second*60).RetryWithContext(ctx, func(ctx context.Context) error {
|
||||
resources, err = safe.ReaderListAll[*infra.MachineRequestStatus](ctx, client.Omni().State(),
|
||||
state.WithLabelQuery(resource.LabelEqual(omni.LabelMachineRequestSet, machineRequestSetName)),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resources.Len() != cfg.MachineCount {
|
||||
return retry.ExpectedErrorf("provision machine count is %d, expected %d", resources.Len(), cfg.MachineCount)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
err = retry.Constant(time.Minute*5).RetryWithContext(ctx, func(ctx context.Context) error {
|
||||
var machines safe.List[*omni.MachineStatus]
|
||||
|
||||
machines, err = safe.ReaderListAll[*omni.MachineStatus](ctx, client.Omni().State())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if machines.Len() < cfg.MachineCount {
|
||||
return retry.ExpectedErrorf("links count is %d, expected at least %d", machines.Len(), cfg.MachineCount)
|
||||
}
|
||||
|
||||
for r := range resources.All() {
|
||||
requestedMachines := machines.FilterLabelQuery(resource.LabelEqual(omni.LabelMachineRequest, r.Metadata().ID()))
|
||||
|
||||
if requestedMachines.Len() == 0 {
|
||||
return retry.ExpectedErrorf("machine request %q doesn't have the related link", r.Metadata().ID())
|
||||
}
|
||||
|
||||
if requestedMachines.Len() != 1 {
|
||||
return fmt.Errorf("more than one machine is labeled with %q machine request label", r.Metadata().ID())
|
||||
}
|
||||
|
||||
m := requestedMachines.Get(0)
|
||||
if m.TypedSpec().Value.Hardware == nil {
|
||||
return retry.ExpectedErrorf("the machine %q is not fully provisioned", r.Metadata().ID())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// AssertMachinesShouldBeDeprovisioned removes the machine request set and checks that all related links were deleted.
|
||||
func AssertMachinesShouldBeDeprovisioned(testCtx context.Context, client *client.Client, machineRequestSetName string) TestFunc {
|
||||
return func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(testCtx, time.Minute*5)
|
||||
defer cancel()
|
||||
|
||||
requestIDs := rtestutils.ResourceIDs[*infra.MachineRequest](ctx, t, client.Omni().State(),
|
||||
state.WithLabelQuery(resource.LabelEqual(omni.LabelMachineRequestSet, machineRequestSetName)),
|
||||
)
|
||||
|
||||
links, err := safe.ReaderListAll[*siderolink.Link](ctx, client.Omni().State())
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
linkIDs := make([]string, 0, len(requestIDs))
|
||||
|
||||
for l := range links.All() {
|
||||
mr, ok := l.Metadata().Labels().Get(omni.LabelMachineRequest)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if slices.Index(requestIDs, mr) != -1 {
|
||||
linkIDs = append(linkIDs, l.Metadata().ID())
|
||||
}
|
||||
}
|
||||
|
||||
rtestutils.Destroy[*omni.MachineRequestSet](ctx, t, client.Omni().State(), []string{machineRequestSetName})
|
||||
|
||||
for _, id := range requestIDs {
|
||||
rtestutils.AssertNoResource[*infra.MachineRequest](ctx, t, client.Omni().State(), id)
|
||||
}
|
||||
|
||||
for _, id := range linkIDs {
|
||||
rtestutils.AssertNoResource[*siderolink.Link](ctx, t, client.Omni().State(), id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AcceptInfraMachines asserts that there are a certain number of machines that are not accepted, provisioned by the static infra provider with the given ID.
|
||||
//
|
||||
// It then accepts them all and asserts that the states of various resources are updated as expected.
|
||||
func AcceptInfraMachines(testCtx context.Context, omniState state.State, infraProviderID string, expectedCount int, disableKexec bool) TestFunc {
|
||||
const disableKexecConfigPatch = `machine:
|
||||
install:
|
||||
extraKernelArgs:
|
||||
- kexec_load_disabled=1
|
||||
sysctls:
|
||||
kernel.kexec_load_disabled: "1"`
|
||||
|
||||
return func(t *testing.T) {
|
||||
logger := zaptest.NewLogger(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(testCtx, time.Minute*10)
|
||||
defer cancel()
|
||||
|
||||
linksMap := make(map[string]*siderolink.Link, expectedCount)
|
||||
|
||||
err := retry.Constant(time.Minute*10).RetryWithContext(ctx, func(ctx context.Context) error {
|
||||
links, err := safe.ReaderListAll[*siderolink.Link](ctx, omniState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
discoveredLinks := 0
|
||||
|
||||
for link := range links.All() {
|
||||
providerID, ok := link.Metadata().Annotations().Get(omni.LabelInfraProviderID)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if infraProviderID == providerID {
|
||||
discoveredLinks++
|
||||
}
|
||||
|
||||
linksMap[link.Metadata().ID()] = link
|
||||
}
|
||||
|
||||
if discoveredLinks != expectedCount {
|
||||
return retry.ExpectedErrorf("expected %d static infra provider machines, got %d", expectedCount, discoveredLinks)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
// link count should match the expected count
|
||||
require.Equal(t, expectedCount, len(linksMap))
|
||||
|
||||
ids := make([]resource.ID, 0, len(linksMap))
|
||||
|
||||
for id := range linksMap {
|
||||
ids = append(ids, id)
|
||||
|
||||
rtestutils.AssertResource(ctx, t, omniState, id, func(res *infra.Machine, assertion *assert.Assertions) {
|
||||
assertion.Equal(specs.InfraMachineConfigSpec_PENDING, res.TypedSpec().Value.AcceptanceStatus)
|
||||
})
|
||||
|
||||
rtestutils.AssertNoResource[*infra.MachineStatus](ctx, t, omniState, id)
|
||||
|
||||
rtestutils.AssertNoResource[*omni.Machine](ctx, t, omniState, id)
|
||||
|
||||
// Accept the machine
|
||||
infraMachineConfig := omni.NewInfraMachineConfig(resources.DefaultNamespace, id)
|
||||
|
||||
infraMachineConfig.TypedSpec().Value.AcceptanceStatus = specs.InfraMachineConfigSpec_ACCEPTED
|
||||
|
||||
if disableKexec {
|
||||
infraMachineConfig.TypedSpec().Value.ExtraKernelArgs = "kexec_load_disabled=1"
|
||||
}
|
||||
|
||||
require.NoError(t, omniState.Create(ctx, infraMachineConfig))
|
||||
|
||||
if disableKexec {
|
||||
disableKexecConfigPatchRes := omni.NewConfigPatch(resources.DefaultNamespace, fmt.Sprintf("500-%s-disable-kexec", id))
|
||||
|
||||
disableKexecConfigPatchRes.Metadata().Labels().Set(omni.LabelMachine, id)
|
||||
|
||||
require.NoError(t, disableKexecConfigPatchRes.TypedSpec().Value.SetUncompressedData([]byte(disableKexecConfigPatch)))
|
||||
require.NoError(t, omniState.Create(ctx, disableKexecConfigPatchRes))
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("accepted machines", zap.Reflect("infra_provider_id", infraProviderID), zap.Strings("machine_ids", ids))
|
||||
|
||||
// Assert that the infra.Machines are now marked as accepted
|
||||
rtestutils.AssertResources(ctx, t, omniState, ids, func(res *infra.Machine, assertion *assert.Assertions) {
|
||||
assertion.Equal(specs.InfraMachineConfigSpec_ACCEPTED, res.TypedSpec().Value.AcceptanceStatus)
|
||||
})
|
||||
|
||||
// Assert that omni.Machine resources are now created and marked as managed by the static infra provider
|
||||
rtestutils.AssertResources(ctx, t, omniState, ids, func(res *omni.Machine, assertion *assert.Assertions) {
|
||||
_, isManagedByStaticInfraProvider := res.Metadata().Labels().Get(omni.LabelIsManagedByStaticInfraProvider)
|
||||
|
||||
assertion.True(isManagedByStaticInfraProvider)
|
||||
})
|
||||
|
||||
// Assert that omni.Machine resources are now created
|
||||
rtestutils.AssertResources(ctx, t, omniState, ids, func(res *omni.Machine, assertion *assert.Assertions) {
|
||||
_, isManagedByStaticInfraProvider := res.Metadata().Labels().Get(omni.LabelIsManagedByStaticInfraProvider)
|
||||
|
||||
assertion.True(isManagedByStaticInfraProvider)
|
||||
})
|
||||
|
||||
// Assert that infra.MachineStatus resources are now created, powered off, marked as ready to use, and the machine labels are set on them
|
||||
rtestutils.AssertResources(ctx, t, omniState, ids, func(res *infra.MachineStatus, assertion *assert.Assertions) {
|
||||
aVal, _ := res.Metadata().Labels().Get("a")
|
||||
assertion.Equal("b", aVal)
|
||||
|
||||
_, cOk := res.Metadata().Labels().Get("c")
|
||||
assertion.True(cOk)
|
||||
|
||||
assertion.Equal(specs.InfraMachineStatusSpec_POWER_STATE_OFF, res.TypedSpec().Value.PowerState)
|
||||
assertion.True(res.TypedSpec().Value.ReadyToUse)
|
||||
})
|
||||
|
||||
// Assert the infra provider labels on MachineStatus resources
|
||||
rtestutils.AssertResources(ctx, t, omniState, ids, func(res *omni.MachineStatus, assertion *assert.Assertions) {
|
||||
link := linksMap[res.Metadata().ID()]
|
||||
|
||||
infraProviderID, _ := link.Metadata().Annotations().Get(omni.LabelInfraProviderID)
|
||||
|
||||
aLabel := fmt.Sprintf(omni.InfraProviderLabelPrefixFormat, infraProviderID) + "a"
|
||||
aVal, _ := res.Metadata().Labels().Get(aLabel)
|
||||
|
||||
assertion.Equal("b", aVal)
|
||||
|
||||
cLabel := fmt.Sprintf(omni.InfraProviderLabelPrefixFormat, infraProviderID) + "c"
|
||||
_, cOk := res.Metadata().Labels().Get(cLabel)
|
||||
assertion.True(cOk)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// AssertInfraMachinesAreAllocated asserts that the machines that belong to the given cluster and managed by a static infra provider
|
||||
// are marked as allocated in the related resources.
|
||||
func AssertInfraMachinesAreAllocated(testCtx context.Context, omniState state.State, clusterID, talosVersion string, extensions []string) TestFunc {
|
||||
return func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(testCtx, time.Minute*10)
|
||||
defer cancel()
|
||||
|
||||
nodeList, err := safe.StateListAll[*omni.MachineSetNode](ctx, omniState, state.WithLabelQuery(resource.LabelEqual(omni.LabelCluster, clusterID)))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Greater(t, nodeList.Len(), 0)
|
||||
|
||||
for machineSetNode := range nodeList.All() {
|
||||
id := machineSetNode.Metadata().ID()
|
||||
|
||||
// There must be an infra.Machine resource for each node
|
||||
rtestutils.AssertResource[*infra.Machine](ctx, t, omniState, id, func(res *infra.Machine, assertion *assert.Assertions) {
|
||||
assertion.Equal(talosVersion, res.TypedSpec().Value.ClusterTalosVersion)
|
||||
assertion.Empty(res.TypedSpec().Value.WipeId)
|
||||
assertion.Equal(extensions, res.TypedSpec().Value.Extensions)
|
||||
})
|
||||
|
||||
// The machine is allocated, so it will be powered on and be ready to use
|
||||
rtestutils.AssertResource[*infra.MachineStatus](ctx, t, omniState, id, func(res *infra.MachineStatus, assertion *assert.Assertions) {
|
||||
assertion.Equal(specs.InfraMachineStatusSpec_POWER_STATE_ON, res.TypedSpec().Value.PowerState)
|
||||
assertion.True(res.TypedSpec().Value.ReadyToUse)
|
||||
assertion.True(res.TypedSpec().Value.Installed)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DestroyInfraMachines removes siderolink.Link resources for all machines managed by a static infra provider,
|
||||
// and asserts that the related infra.Machine and infra.MachineStatus resources are deleted.
|
||||
func DestroyInfraMachines(testCtx context.Context, omniState state.State, providerID string, count int) TestFunc {
|
||||
return func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(testCtx, time.Minute*10)
|
||||
defer cancel()
|
||||
|
||||
links, err := safe.StateListAll[*siderolink.Link](ctx, omniState)
|
||||
require.NoError(t, err)
|
||||
|
||||
var deleted int
|
||||
|
||||
for link := range links.All() {
|
||||
pid, ok := link.Metadata().Annotations().Get(omni.LabelInfraProviderID)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if pid != providerID {
|
||||
continue
|
||||
}
|
||||
|
||||
id := link.Metadata().ID()
|
||||
|
||||
rtestutils.Destroy[*siderolink.Link](ctx, t, omniState, []string{id})
|
||||
|
||||
rtestutils.AssertNoResource[*infra.Machine](ctx, t, omniState, id)
|
||||
rtestutils.AssertNoResource[*infra.MachineStatus](ctx, t, omniState, id)
|
||||
|
||||
deleted++
|
||||
}
|
||||
|
||||
require.EqualValues(t, count, deleted)
|
||||
}
|
||||
}
|
@ -1,143 +0,0 @@
|
||||
// Copyright (c) 2025 Sidero Labs, Inc.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/api"
|
||||
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/siderolabs/go-retry/retry"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// AssertStatsLimits checks that metrics don't show any spikes of resource reads/writes, controller wakeups.
|
||||
// This test should only be run after the integration tests set with Talemu enabled as the thresholds are adjusted for it.
|
||||
// Should have Prometheus running on 9090.
|
||||
func AssertStatsLimits(testCtx context.Context) TestFunc {
|
||||
return func(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
check func(assert *assert.Assertions, value float64)
|
||||
name string
|
||||
query string
|
||||
}{
|
||||
{
|
||||
name: "resource CRUD",
|
||||
query: `sum(omni_resource_operations_total{operation=~"create|update", type!="MachineStatusLinks.omni.sidero.dev"})`,
|
||||
check: func(assert *assert.Assertions, value float64) {
|
||||
limit := float64(12000)
|
||||
|
||||
assert.Lessf(value, limit, "resource CRUD operations were expected to be less than %f. "+
|
||||
"If the limit is exceeded not because of a leak but because you added some new resources/controllers, adjust the limit accordingly.", limit)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "queue length",
|
||||
query: `sum(omni_runtime_qcontroller_queue_length)`,
|
||||
check: func(assert *assert.Assertions, value float64) { assert.Zero(value) },
|
||||
},
|
||||
{
|
||||
name: "controller wakeups",
|
||||
query: `sum(omni_runtime_controller_wakeups{controller!="MachineStatusLinkController"})`,
|
||||
check: func(assert *assert.Assertions, value float64) {
|
||||
limit := float64(12000)
|
||||
|
||||
assert.Lessf(value, limit, "controller wakeups were expected to be less than %f. "+
|
||||
"If the limit is exceeded not because of a leak but because you added some new resources/controllers, adjust the limit accordingly.", limit)
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(testCtx, time.Second*16)
|
||||
defer cancel()
|
||||
|
||||
err := retry.Constant(time.Second * 15).Retry(func() error {
|
||||
promClient, err := api.NewClient(api.Config{
|
||||
Address: "http://127.0.0.1:9090",
|
||||
})
|
||||
if err != nil {
|
||||
return retry.ExpectedError(err)
|
||||
}
|
||||
|
||||
var (
|
||||
value model.Value
|
||||
warnings v1.Warnings
|
||||
)
|
||||
|
||||
agg := assertionAggregator{}
|
||||
|
||||
v1api := v1.NewAPI(promClient)
|
||||
|
||||
value, warnings, err = v1api.Query(ctx, tt.query, time.Now())
|
||||
if err != nil {
|
||||
return retry.ExpectedError(err)
|
||||
}
|
||||
|
||||
if len(warnings) > 0 {
|
||||
return retry.ExpectedErrorf("prometheus query had warnings %#v", warnings)
|
||||
}
|
||||
|
||||
assert := assert.New(&agg)
|
||||
|
||||
switch val := value.(type) {
|
||||
case *model.Scalar:
|
||||
tt.check(assert, float64(val.Value))
|
||||
case model.Vector:
|
||||
tt.check(assert, float64(val[val.Len()-1].Value))
|
||||
default:
|
||||
return fmt.Errorf("unexpected value type %s", val.Type())
|
||||
}
|
||||
|
||||
if agg.hadErrors {
|
||||
return retry.ExpectedError(errors.New(agg.String()))
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type assertionAggregator struct {
|
||||
errors map[string]struct{}
|
||||
hadErrors bool
|
||||
}
|
||||
|
||||
func (agg *assertionAggregator) Errorf(format string, args ...any) {
|
||||
errorString := fmt.Sprintf(format, args...)
|
||||
|
||||
if agg.errors == nil {
|
||||
agg.errors = map[string]struct{}{}
|
||||
}
|
||||
|
||||
agg.errors[errorString] = struct{}{}
|
||||
agg.hadErrors = true
|
||||
}
|
||||
|
||||
func (agg *assertionAggregator) String() string {
|
||||
lines := make([]string, 0, len(agg.errors))
|
||||
|
||||
for errorString := range agg.errors {
|
||||
lines = append(lines, " * "+errorString)
|
||||
}
|
||||
|
||||
sort.Strings(lines)
|
||||
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,61 +0,0 @@
|
||||
// Copyright (c) 2025 Sidero Labs, Inc.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
package tests
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type corpusEntry = struct {
|
||||
Parent string
|
||||
Path string
|
||||
Data []byte
|
||||
Values []any
|
||||
Generation int
|
||||
IsSeed bool
|
||||
}
|
||||
|
||||
var errMain = errors.New("testing: unexpected use of func Main")
|
||||
|
||||
type matchStringOnly func(pat, str string) (bool, error)
|
||||
|
||||
func (f matchStringOnly) MatchString(pat, str string) (bool, error) { return f(pat, str) }
|
||||
|
||||
func (f matchStringOnly) StartCPUProfile(io.Writer) error { return errMain }
|
||||
|
||||
func (f matchStringOnly) StopCPUProfile() {}
|
||||
|
||||
func (f matchStringOnly) WriteProfileTo(string, io.Writer, int) error { return errMain }
|
||||
|
||||
func (f matchStringOnly) ImportPath() string { return "" }
|
||||
|
||||
func (f matchStringOnly) StartTestLog(io.Writer) {}
|
||||
|
||||
func (f matchStringOnly) StopTestLog() error { return errMain }
|
||||
|
||||
func (f matchStringOnly) SetPanicOnExit0(bool) {}
|
||||
|
||||
func (f matchStringOnly) CoordinateFuzzing(time.Duration, int64, time.Duration, int64, int, []corpusEntry, []reflect.Type, string, string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f matchStringOnly) RunFuzzWorker(func(corpusEntry) error) error { return nil }
|
||||
|
||||
func (f matchStringOnly) ReadCorpus(string, []reflect.Type) ([]corpusEntry, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (f matchStringOnly) CheckCorpus([]any, []reflect.Type) error { return nil }
|
||||
|
||||
func (f matchStringOnly) ResetCoverage() {}
|
||||
func (f matchStringOnly) SnapshotCoverage() {}
|
||||
|
||||
func (f matchStringOnly) InitRuntimeCoverage() (mode string, tearDown func(coverprofile string, gocoverdir string) (string, error), snapcov func() float64) {
|
||||
return "", func(string, string) (string, error) { return "", nil }, func() float64 { return 0 }
|
||||
}
|
@ -12,8 +12,8 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/siderolabs/omni/cmd/integration-test/pkg/clientconfig"
|
||||
"github.com/siderolabs/omni/internal/backend/workloadproxy"
|
||||
"github.com/siderolabs/omni/internal/pkg/clientconfig"
|
||||
)
|
||||
|
||||
func main() {
|
@ -12,8 +12,8 @@
|
||||
"dependencies": {
|
||||
"@auth0/auth0-vue": "^2.4.0",
|
||||
"@headlessui/vue": "^1.7.23",
|
||||
"@jsonforms/vue": "^3.5.1",
|
||||
"@jsonforms/vue-vanilla": "^3.5.1",
|
||||
"@jsonforms/vue": "^3.5.1",
|
||||
"@kubernetes/client-node": "^0.22.3",
|
||||
"apexcharts": "3.45.2",
|
||||
"click-outside-vue3": "^4.0.1",
|
||||
|
@ -164,15 +164,17 @@ if [[ "${RUN_TALEMU_TESTS:-false}" == "true" ]]; then
|
||||
|
||||
SSL_CERT_DIR=hack/certs:/etc/ssl/certs \
|
||||
${ARTIFACTS}/integration-test-linux-amd64 \
|
||||
--endpoint https://my-instance.localhost:8099 \
|
||||
--talos-version=${TALOS_VERSION} \
|
||||
--omnictl-path=${ARTIFACTS}/omnictl-linux-amd64 \
|
||||
--expected-machines=30 \
|
||||
--provision-config-file=hack/test/provisionconfig.yaml \
|
||||
--output-dir="${TEST_OUTPUTS_DIR}" \
|
||||
--run-stats-check \
|
||||
-t 10m \
|
||||
-p 10 \
|
||||
--omni.endpoint https://my-instance.localhost:8099 \
|
||||
--omni.talos-version=${TALOS_VERSION} \
|
||||
--omni.omnictl-path=${ARTIFACTS}/omnictl-linux-amd64 \
|
||||
--omni.expected-machines=30 \
|
||||
--omni.provision-config-file=hack/test/provisionconfig.yaml \
|
||||
--omni.output-dir="${TEST_OUTPUTS_DIR}" \
|
||||
--omni.run-stats-check \
|
||||
--test.timeout 10m \
|
||||
--test.parallel 10 \
|
||||
--test.failfast \
|
||||
--test.v \
|
||||
${TALEMU_TEST_ARGS:-}
|
||||
|
||||
docker rm -f "$PROMETHEUS_CONTAINER"
|
||||
@ -272,10 +274,12 @@ sleep 5
|
||||
|
||||
SSL_CERT_DIR=hack/certs:/etc/ssl/certs \
|
||||
${ARTIFACTS}/integration-test-linux-amd64 \
|
||||
--endpoint https://my-instance.localhost:8099 \
|
||||
--talos-version=${TALOS_VERSION} \
|
||||
--omnictl-path=${ARTIFACTS}/omnictl-linux-amd64 \
|
||||
--expected-machines=8 `# equal to the masters+workers above` \
|
||||
--omni.endpoint https://my-instance.localhost:8099 \
|
||||
--omni.talos-version=${TALOS_VERSION} \
|
||||
--omni.omnictl-path=${ARTIFACTS}/omnictl-linux-amd64 \
|
||||
--omni.expected-machines=8 `# equal to the masters+workers above` \
|
||||
--test.failfast \
|
||||
--test.v \
|
||||
${INTEGRATION_TEST_ARGS:-}
|
||||
|
||||
if [ "${INTEGRATION_RUN_E2E_TEST:-true}" == "true" ]; then
|
||||
|
@ -3,7 +3,9 @@
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
package tests
|
||||
//go:build integration
|
||||
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -55,11 +57,11 @@ import (
|
||||
"github.com/siderolabs/omni/client/pkg/omni/resources/siderolink"
|
||||
"github.com/siderolabs/omni/client/pkg/omni/resources/system"
|
||||
"github.com/siderolabs/omni/client/pkg/omni/resources/virtual"
|
||||
"github.com/siderolabs/omni/cmd/integration-test/pkg/clientconfig"
|
||||
"github.com/siderolabs/omni/internal/backend/runtime/omni/infraprovider"
|
||||
"github.com/siderolabs/omni/internal/backend/runtime/omni/validated"
|
||||
"github.com/siderolabs/omni/internal/pkg/auth"
|
||||
"github.com/siderolabs/omni/internal/pkg/auth/role"
|
||||
"github.com/siderolabs/omni/internal/pkg/clientconfig"
|
||||
"github.com/siderolabs/omni/internal/pkg/grpcutil"
|
||||
)
|
||||
|
@ -3,7 +3,9 @@
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
package tests
|
||||
//go:build integration
|
||||
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"context"
|
@ -3,11 +3,12 @@
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
package tests
|
||||
//go:build integration
|
||||
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
@ -18,8 +19,8 @@ import (
|
||||
"github.com/siderolabs/omni/client/pkg/omni/resources/omni"
|
||||
)
|
||||
|
||||
// TestBlockClusterShouldBeReady is a reusable block of assertions that can be used to verify that a cluster is fully ready.
|
||||
func TestBlockClusterShouldBeReady(ctx context.Context, rootClient *client.Client, clusterName,
|
||||
// AssertBlockClusterShouldBeReady is a reusable block of assertions that can be used to verify that a cluster is fully ready.
|
||||
func AssertBlockClusterShouldBeReady(ctx context.Context, rootClient *client.Client, clusterName,
|
||||
expectedTalosVersion string, talosAPIKeyPrepare TalosAPIKeyPrepareFunc,
|
||||
) subTestList { //nolint:nolintlint,revive
|
||||
return subTestList{
|
||||
@ -63,8 +64,8 @@ func TestBlockClusterShouldBeReady(ctx context.Context, rootClient *client.Clien
|
||||
}
|
||||
}
|
||||
|
||||
// TestBlockProxyAPIAccessShouldWork is a reusable block of assertions that can be used to verify that Omni API proxies work.
|
||||
func TestBlockProxyAPIAccessShouldWork(ctx context.Context, rootClient *client.Client, clusterName string, talosAPIKeyPrepare TalosAPIKeyPrepareFunc) []subTest { //nolint:nolintlint,revive
|
||||
// AssertBlockProxyAPIAccessShouldWork is a reusable block of assertions that can be used to verify that Omni API proxies work.
|
||||
func AssertBlockProxyAPIAccessShouldWork(ctx context.Context, rootClient *client.Client, clusterName string, talosAPIKeyPrepare TalosAPIKeyPrepareFunc) []subTest { //nolint:nolintlint,revive
|
||||
return []subTest{
|
||||
{
|
||||
"ClusterKubernetesAPIShouldBeAccessibleViaOmni",
|
||||
@ -77,18 +78,18 @@ func TestBlockProxyAPIAccessShouldWork(ctx context.Context, rootClient *client.C
|
||||
}
|
||||
}
|
||||
|
||||
// TestBlockClusterAndTalosAPIAndKubernetesShouldBeReady is a reusable block of assertions that can be used to verify
|
||||
// AssertBlockClusterAndTalosAPIAndKubernetesShouldBeReady is a reusable block of assertions that can be used to verify
|
||||
// that a cluster is fully ready and that Omni API proxies work, and Kubernetes version is correct, and Kubernetes usage
|
||||
// metrics were collected.
|
||||
//
|
||||
// This block is a bit slower than TestBlockClusterShouldBeReady, because it also verifies Kubernetes version.
|
||||
func TestBlockClusterAndTalosAPIAndKubernetesShouldBeReady(
|
||||
// This block is a bit slower than TestsBlockClusterShouldBeReady, because it also verifies Kubernetes version.
|
||||
func AssertBlockClusterAndTalosAPIAndKubernetesShouldBeReady(
|
||||
ctx context.Context, rootClient *client.Client,
|
||||
clusterName, expectedTalosVersion, expectedKubernetesVersion string,
|
||||
talosAPIKeyPrepare TalosAPIKeyPrepareFunc,
|
||||
) []subTest { //nolint:nolintlint,revive
|
||||
return TestBlockClusterShouldBeReady(ctx, rootClient, clusterName, expectedTalosVersion, talosAPIKeyPrepare).
|
||||
Append(TestBlockProxyAPIAccessShouldWork(ctx, rootClient, clusterName, talosAPIKeyPrepare)...).
|
||||
return AssertBlockClusterShouldBeReady(ctx, rootClient, clusterName, expectedTalosVersion, talosAPIKeyPrepare).
|
||||
Append(AssertBlockProxyAPIAccessShouldWork(ctx, rootClient, clusterName, talosAPIKeyPrepare)...).
|
||||
Append(
|
||||
subTest{
|
||||
"ClusterKubernetesVersionShouldBeCorrect",
|
||||
@ -107,9 +108,9 @@ func TestBlockClusterAndTalosAPIAndKubernetesShouldBeReady(
|
||||
)
|
||||
}
|
||||
|
||||
// TestBlockRestoreEtcdFromLatestBackup is a reusable block of assertions that can be used to verify that a
|
||||
// AssertBlockRestoreEtcdFromLatestBackup is a reusable block of assertions that can be used to verify that a
|
||||
// cluster's control plane can be broken, destroyed and then restored from an etcd backup.
|
||||
func TestBlockRestoreEtcdFromLatestBackup(ctx context.Context, rootClient *client.Client, talosAPIKeyPrepare TalosAPIKeyPrepareFunc,
|
||||
func AssertBlockRestoreEtcdFromLatestBackup(ctx context.Context, rootClient *client.Client, talosAPIKeyPrepare TalosAPIKeyPrepareFunc,
|
||||
options Options, controlPlaneNodeCount int, clusterName, assertDeploymentNS, assertDeploymentName string,
|
||||
) subTestList { //nolint:nolintlint,revive
|
||||
return subTestList{
|
||||
@ -161,13 +162,13 @@ func TestBlockRestoreEtcdFromLatestBackup(ctx context.Context, rootClient *clien
|
||||
AssertKubernetesDeploymentHasRunningPods(ctx, rootClient.Management(), clusterName, assertDeploymentNS, assertDeploymentName),
|
||||
},
|
||||
).Append(
|
||||
TestBlockKubernetesDeploymentCreateAndRunning(ctx, rootClient.Management(), clusterName, assertDeploymentNS, assertDeploymentName+"-after-restore")...,
|
||||
AssertBlockKubernetesDeploymentCreateAndRunning(ctx, rootClient.Management(), clusterName, assertDeploymentNS, assertDeploymentName+"-after-restore")...,
|
||||
)
|
||||
}
|
||||
|
||||
// TestBlockCreateClusterFromEtcdBackup is a reusable block of assertions that can be used to verify that a
|
||||
// AssertBlockCreateClusterFromEtcdBackup is a reusable block of assertions that can be used to verify that a
|
||||
// new cluster can be created from another cluster's etcd backup.
|
||||
func TestBlockCreateClusterFromEtcdBackup(ctx context.Context, rootClient *client.Client, talosAPIKeyPrepare TalosAPIKeyPrepareFunc, options Options,
|
||||
func AssertBlockCreateClusterFromEtcdBackup(ctx context.Context, rootClient *client.Client, talosAPIKeyPrepare TalosAPIKeyPrepareFunc, options Options,
|
||||
sourceClusterName, newClusterName, assertDeploymentNS, assertDeploymentName string,
|
||||
) subTestList { //nolint:nolintlint,revive
|
||||
return subTestList{
|
||||
@ -211,13 +212,13 @@ func TestBlockCreateClusterFromEtcdBackup(ctx context.Context, rootClient *clien
|
||||
AssertKubernetesDeploymentHasRunningPods(ctx, rootClient.Management(), newClusterName, assertDeploymentNS, assertDeploymentName),
|
||||
},
|
||||
).Append(
|
||||
TestBlockKubernetesDeploymentCreateAndRunning(ctx, rootClient.Management(), newClusterName, assertDeploymentNS, assertDeploymentName+"-after-restore")...,
|
||||
AssertBlockKubernetesDeploymentCreateAndRunning(ctx, rootClient.Management(), newClusterName, assertDeploymentNS, assertDeploymentName+"-after-restore")...,
|
||||
)
|
||||
}
|
||||
|
||||
// TestBlockKubernetesDeploymentCreateAndRunning is a reusable block of assertions that can be used to verify that a
|
||||
// AssertBlockKubernetesDeploymentCreateAndRunning is a reusable block of assertions that can be used to verify that a
|
||||
// Kubernetes deployment is created and has running pods.
|
||||
func TestBlockKubernetesDeploymentCreateAndRunning(ctx context.Context, managementClient *management.Client, clusterName, ns, name string) []subTest { //nolint:nolintlint,revive
|
||||
func AssertBlockKubernetesDeploymentCreateAndRunning(ctx context.Context, managementClient *management.Client, clusterName, ns, name string) []subTest { //nolint:nolintlint,revive
|
||||
return []subTest{
|
||||
{
|
||||
"KubernetesDeploymentShouldBeCreated",
|
||||
@ -230,41 +231,34 @@ func TestBlockKubernetesDeploymentCreateAndRunning(ctx context.Context, manageme
|
||||
}
|
||||
}
|
||||
|
||||
// TestGroupClusterCreateAndReady is a reusable group of tests that can be used to verify that a cluster is created and ready.
|
||||
func TestGroupClusterCreateAndReady(
|
||||
// AssertClusterCreateAndReady is a reusable group of tests that can be used to verify that a cluster is created and ready.
|
||||
func AssertClusterCreateAndReady(
|
||||
ctx context.Context,
|
||||
rootClient *client.Client,
|
||||
talosAPIKeyPrepare TalosAPIKeyPrepareFunc,
|
||||
name, description string,
|
||||
name string,
|
||||
options ClusterOptions,
|
||||
testOutputDir string,
|
||||
) testGroup { //nolint:nolintlint,revive
|
||||
) []subTest { //nolint:nolintlint,revive
|
||||
clusterName := "integration-" + name
|
||||
options.Name = clusterName
|
||||
|
||||
return testGroup{
|
||||
Name: strings.ToUpper(name[0:1]) + name[1:] + "Cluster",
|
||||
Description: description,
|
||||
Parallel: true,
|
||||
MachineClaim: options.ControlPlanes + options.Workers,
|
||||
Subtests: subTests(
|
||||
subTest{
|
||||
"ClusterShouldBeCreated",
|
||||
CreateCluster(ctx, rootClient, options),
|
||||
},
|
||||
).Append(
|
||||
TestBlockClusterAndTalosAPIAndKubernetesShouldBeReady(ctx, rootClient, clusterName, options.MachineOptions.TalosVersion, options.MachineOptions.KubernetesVersion, talosAPIKeyPrepare)...,
|
||||
).Append(
|
||||
subTest{
|
||||
"AssertSupportBundleContents",
|
||||
AssertSupportBundleContents(ctx, rootClient, clusterName),
|
||||
},
|
||||
).Append(
|
||||
subTest{
|
||||
"ClusterShouldBeDestroyed",
|
||||
AssertDestroyCluster(ctx, rootClient.Omni().State(), clusterName, options.InfraProvider != "", false),
|
||||
},
|
||||
),
|
||||
Finalizer: DestroyCluster(ctx, rootClient, testOutputDir, clusterName),
|
||||
}
|
||||
return subTests(
|
||||
subTest{
|
||||
"ClusterShouldBeCreated",
|
||||
CreateCluster(ctx, rootClient, options),
|
||||
},
|
||||
).Append(
|
||||
AssertBlockClusterAndTalosAPIAndKubernetesShouldBeReady(ctx, rootClient, clusterName, options.MachineOptions.TalosVersion, options.MachineOptions.KubernetesVersion, talosAPIKeyPrepare)...,
|
||||
).Append(
|
||||
subTest{
|
||||
"AssertSupportBundleContents",
|
||||
AssertSupportBundleContents(ctx, rootClient, clusterName),
|
||||
},
|
||||
).Append(
|
||||
subTest{
|
||||
"ClusterShouldBeDestroyed",
|
||||
AssertDestroyCluster(ctx, rootClient.Omni().State(), clusterName, options.InfraProvider != "", false),
|
||||
},
|
||||
)
|
||||
}
|
@ -3,7 +3,9 @@
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
package tests
|
||||
//go:build integration
|
||||
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"context"
|
@ -3,7 +3,9 @@
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
package tests
|
||||
//go:build integration
|
||||
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"bytes"
|
@ -3,7 +3,9 @@
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
package tests
|
||||
//go:build integration
|
||||
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -63,12 +65,6 @@ type ClusterOptions struct {
|
||||
SkipExtensionCheckOnCreate bool
|
||||
}
|
||||
|
||||
// MachineOptions are the options for machine creation.
|
||||
type MachineOptions struct {
|
||||
TalosVersion string
|
||||
KubernetesVersion string
|
||||
}
|
||||
|
||||
// CreateCluster verifies cluster creation.
|
||||
func CreateCluster(testCtx context.Context, cli *client.Client, options ClusterOptions) TestFunc {
|
||||
return func(t *testing.T) {
|
@ -3,7 +3,9 @@
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
package tests
|
||||
//go:build integration
|
||||
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -11,19 +13,24 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/safe"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
talosclient "github.com/siderolabs/talos/pkg/machinery/client"
|
||||
clientconfig "github.com/siderolabs/talos/pkg/machinery/client/config"
|
||||
talosclientconfig "github.com/siderolabs/talos/pkg/machinery/client/config"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/cluster"
|
||||
"golang.org/x/sync/semaphore"
|
||||
|
||||
"github.com/siderolabs/omni/client/pkg/client"
|
||||
"github.com/siderolabs/omni/client/pkg/omni/resources"
|
||||
"github.com/siderolabs/omni/client/pkg/omni/resources/omni"
|
||||
"github.com/siderolabs/omni/internal/backend/runtime/talos"
|
||||
"github.com/siderolabs/omni/internal/pkg/clientconfig"
|
||||
)
|
||||
|
||||
func resourceDetails(res resource.Resource) string {
|
||||
@ -137,7 +144,7 @@ func talosClient(ctx context.Context, cli *client.Client, clusterName string) (*
|
||||
return nil, errors.New("empty talosconfig")
|
||||
}
|
||||
|
||||
config, err := clientconfig.FromBytes(data)
|
||||
config, err := talosclientconfig.FromBytes(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -177,3 +184,127 @@ func talosNodeIPs(ctx context.Context, talosState state.State) ([]string, error)
|
||||
|
||||
return nodeIPs, nil
|
||||
}
|
||||
|
||||
//nolint:govet
|
||||
type testGroup struct {
|
||||
Name string
|
||||
Description string
|
||||
Parallel bool
|
||||
MachineClaim int
|
||||
Subtests []subTest
|
||||
Finalizer func(t *testing.T)
|
||||
}
|
||||
|
||||
//nolint:govet
|
||||
type subTest struct {
|
||||
Name string
|
||||
F func(t *testing.T)
|
||||
}
|
||||
|
||||
type subTestList []subTest
|
||||
|
||||
func subTests(items ...subTest) subTestList {
|
||||
return items
|
||||
}
|
||||
|
||||
func (l subTestList) Append(items ...subTest) subTestList {
|
||||
return append(l, items...)
|
||||
}
|
||||
|
||||
// MachineOptions are the options for machine creation.
|
||||
type MachineOptions struct {
|
||||
TalosVersion string
|
||||
KubernetesVersion string
|
||||
}
|
||||
|
||||
// TestFunc is a testing function prototype.
|
||||
type TestFunc func(t *testing.T)
|
||||
|
||||
// RestartAMachineFunc is a function to restart a machine by UUID.
|
||||
type RestartAMachineFunc func(ctx context.Context, uuid string) error
|
||||
|
||||
// WipeAMachineFunc is a function to wipe a machine by UUID.
|
||||
type WipeAMachineFunc func(ctx context.Context, uuid string) error
|
||||
|
||||
// FreezeAMachineFunc is a function to freeze a machine by UUID.
|
||||
type FreezeAMachineFunc func(ctx context.Context, uuid string) error
|
||||
|
||||
// HTTPRequestSignerFunc is function to sign the HTTP request.
|
||||
type HTTPRequestSignerFunc func(ctx context.Context, req *http.Request) error
|
||||
|
||||
// TalosAPIKeyPrepareFunc is a function to prepare a public key for Talos API auth.
|
||||
type TalosAPIKeyPrepareFunc func(ctx context.Context, contextName string) error
|
||||
|
||||
// Options for the test runner.
|
||||
//
|
||||
//nolint:govet
|
||||
type Options struct {
|
||||
CleanupLinks bool
|
||||
SkipExtensionsCheckOnCreate bool
|
||||
RunStatsCheck bool
|
||||
ExpectedMachines int
|
||||
|
||||
RestartAMachineFunc RestartAMachineFunc
|
||||
WipeAMachineFunc WipeAMachineFunc
|
||||
FreezeAMachineFunc FreezeAMachineFunc
|
||||
ProvisionConfigs []MachineProvisionConfig
|
||||
|
||||
MachineOptions MachineOptions
|
||||
|
||||
HTTPEndpoint string
|
||||
AnotherTalosVersion string
|
||||
AnotherKubernetesVersion string
|
||||
OmnictlPath string
|
||||
ScalingTimeout time.Duration
|
||||
StaticInfraProvider string
|
||||
OutputDir string
|
||||
}
|
||||
|
||||
func (o Options) defaultInfraProvider() string {
|
||||
if len(o.ProvisionConfigs) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return o.ProvisionConfigs[0].Provider.ID
|
||||
}
|
||||
|
||||
func (o Options) defaultProviderData() string {
|
||||
if len(o.ProvisionConfigs) == 0 {
|
||||
return "{}"
|
||||
}
|
||||
|
||||
return o.ProvisionConfigs[0].Provider.Data
|
||||
}
|
||||
|
||||
func (o Options) provisionMachines() bool {
|
||||
var totalMachineCount int
|
||||
|
||||
for _, cfg := range o.ProvisionConfigs {
|
||||
totalMachineCount += cfg.MachineCount
|
||||
}
|
||||
|
||||
return totalMachineCount > 0
|
||||
}
|
||||
|
||||
// MachineProvisionConfig tells the test to provision machines from the infra provider.
|
||||
type MachineProvisionConfig struct {
|
||||
Provider MachineProviderConfig `yaml:"provider"`
|
||||
MachineCount int `yaml:"count"`
|
||||
}
|
||||
|
||||
// MachineProviderConfig keeps the configuration of the infra provider for the machine provision config.
|
||||
type MachineProviderConfig struct {
|
||||
ID string `yaml:"id"`
|
||||
Data string `yaml:"data"`
|
||||
Static bool `yaml:"static"`
|
||||
}
|
||||
|
||||
// TestOptions constains all common data that might be required to run the tests.
|
||||
type TestOptions struct {
|
||||
Options
|
||||
omniClient *client.Client
|
||||
talosAPIKeyPrepare TalosAPIKeyPrepareFunc
|
||||
clientConfig *clientconfig.ClientConfig
|
||||
|
||||
machineSemaphore *semaphore.Weighted
|
||||
}
|
@ -3,7 +3,9 @@
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
package tests
|
||||
//go:build integration
|
||||
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"context"
|
@ -3,7 +3,9 @@
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
package tests
|
||||
//go:build integration
|
||||
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"context"
|
@ -3,7 +3,9 @@
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
package tests
|
||||
//go:build integration
|
||||
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"context"
|
359
internal/integration/infra_test.go
Normal file
359
internal/integration/infra_test.go
Normal file
@ -0,0 +1,359 @@
|
||||
// Copyright (c) 2025 Sidero Labs, Inc.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
//go:build integration
|
||||
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/resource/rtestutils"
|
||||
"github.com/cosi-project/runtime/pkg/safe"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/go-retry/retry"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zaptest"
|
||||
|
||||
"github.com/siderolabs/omni/client/api/omni/specs"
|
||||
"github.com/siderolabs/omni/client/pkg/client"
|
||||
"github.com/siderolabs/omni/client/pkg/omni/resources"
|
||||
"github.com/siderolabs/omni/client/pkg/omni/resources/infra"
|
||||
"github.com/siderolabs/omni/client/pkg/omni/resources/omni"
|
||||
"github.com/siderolabs/omni/client/pkg/omni/resources/siderolink"
|
||||
)
|
||||
|
||||
// machineProvisionHook creates a machine request set and waits until all requests are fulfilled.
|
||||
//
|
||||
//nolint:gocognit
|
||||
func machineProvisionHook(t *testing.T, client *client.Client, cfg MachineProvisionConfig, machineRequestSetName,
|
||||
talosVersion string,
|
||||
) {
|
||||
ctx, cancel := context.WithTimeout(t.Context(), time.Minute*5)
|
||||
defer cancel()
|
||||
|
||||
rtestutils.AssertResources(ctx, t, client.Omni().State(), []string{cfg.Provider.ID}, func(*infra.ProviderStatus, *assert.Assertions) {})
|
||||
|
||||
machineRequestSet, err := safe.ReaderGetByID[*omni.MachineRequestSet](ctx, client.Omni().State(), machineRequestSetName)
|
||||
|
||||
if !state.IsNotFoundError(err) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
if machineRequestSet != nil {
|
||||
rtestutils.Destroy[*omni.MachineRequestSet](ctx, t, client.Omni().State(), []string{machineRequestSetName})
|
||||
}
|
||||
|
||||
machineRequestSet = omni.NewMachineRequestSet(resources.DefaultNamespace, machineRequestSetName)
|
||||
|
||||
machineRequestSet.TypedSpec().Value.Extensions = []string{
|
||||
"siderolabs/" + HelloWorldServiceExtensionName,
|
||||
}
|
||||
|
||||
machineRequestSet.TypedSpec().Value.ProviderId = cfg.Provider.ID
|
||||
machineRequestSet.TypedSpec().Value.TalosVersion = talosVersion
|
||||
machineRequestSet.TypedSpec().Value.ProviderData = cfg.Provider.Data
|
||||
machineRequestSet.TypedSpec().Value.MachineCount = int32(cfg.MachineCount)
|
||||
|
||||
require.NoError(t, client.Omni().State().Create(ctx, machineRequestSet))
|
||||
|
||||
var resources safe.List[*infra.MachineRequestStatus]
|
||||
|
||||
err = retry.Constant(time.Second*60).RetryWithContext(ctx, func(ctx context.Context) error {
|
||||
resources, err = safe.ReaderListAll[*infra.MachineRequestStatus](ctx, client.Omni().State(),
|
||||
state.WithLabelQuery(resource.LabelEqual(omni.LabelMachineRequestSet, machineRequestSetName)),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resources.Len() != cfg.MachineCount {
|
||||
return retry.ExpectedErrorf("provision machine count is %d, expected %d", resources.Len(), cfg.MachineCount)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
err = retry.Constant(time.Minute*5).RetryWithContext(ctx, func(ctx context.Context) error {
|
||||
var machines safe.List[*omni.MachineStatus]
|
||||
|
||||
machines, err = safe.ReaderListAll[*omni.MachineStatus](ctx, client.Omni().State())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if machines.Len() < cfg.MachineCount {
|
||||
return retry.ExpectedErrorf("links count is %d, expected at least %d", machines.Len(), cfg.MachineCount)
|
||||
}
|
||||
|
||||
for r := range resources.All() {
|
||||
requestedMachines := machines.FilterLabelQuery(resource.LabelEqual(omni.LabelMachineRequest, r.Metadata().ID()))
|
||||
|
||||
if requestedMachines.Len() == 0 {
|
||||
return retry.ExpectedErrorf("machine request %q doesn't have the related link", r.Metadata().ID())
|
||||
}
|
||||
|
||||
if requestedMachines.Len() != 1 {
|
||||
return fmt.Errorf("more than one machine is labeled with %q machine request label", r.Metadata().ID())
|
||||
}
|
||||
|
||||
m := requestedMachines.Get(0)
|
||||
if m.TypedSpec().Value.Hardware == nil {
|
||||
return retry.ExpectedErrorf("the machine %q is not fully provisioned", r.Metadata().ID())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// machineDeprovisionHook removes the machine request set and checks that all related links were deleted.
|
||||
func machineDeprovisionHook(t *testing.T, client *client.Client, machineRequestSetName string) {
|
||||
ctx, cancel := context.WithTimeout(t.Context(), time.Minute*5)
|
||||
defer cancel()
|
||||
|
||||
requestIDs := rtestutils.ResourceIDs[*infra.MachineRequest](ctx, t, client.Omni().State(),
|
||||
state.WithLabelQuery(resource.LabelEqual(omni.LabelMachineRequestSet, machineRequestSetName)),
|
||||
)
|
||||
|
||||
links, err := safe.ReaderListAll[*siderolink.Link](ctx, client.Omni().State())
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
linkIDs := make([]string, 0, len(requestIDs))
|
||||
|
||||
for l := range links.All() {
|
||||
mr, ok := l.Metadata().Labels().Get(omni.LabelMachineRequest)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if slices.Index(requestIDs, mr) != -1 {
|
||||
linkIDs = append(linkIDs, l.Metadata().ID())
|
||||
}
|
||||
}
|
||||
|
||||
rtestutils.Destroy[*omni.MachineRequestSet](ctx, t, client.Omni().State(), []string{machineRequestSetName})
|
||||
|
||||
for _, id := range requestIDs {
|
||||
rtestutils.AssertNoResource[*infra.MachineRequest](ctx, t, client.Omni().State(), id)
|
||||
}
|
||||
|
||||
for _, id := range linkIDs {
|
||||
rtestutils.AssertNoResource[*siderolink.Link](ctx, t, client.Omni().State(), id)
|
||||
}
|
||||
}
|
||||
|
||||
// infraMachinesAcceptHook asserts that there are a certain number of machines that are not accepted, provisioned by the static infra provider with the given ID.
|
||||
//
|
||||
// It then accepts them all and asserts that the states of various resources are updated as expected.
|
||||
func infraMachinesAcceptHook(t *testing.T, omniState state.State, infraProviderID string, expectedCount int, disableKexec bool) {
|
||||
const disableKexecConfigPatch = `machine:
|
||||
install:
|
||||
extraKernelArgs:
|
||||
- kexec_load_disabled=1
|
||||
sysctls:
|
||||
kernel.kexec_load_disabled: "1"`
|
||||
|
||||
logger := zaptest.NewLogger(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(t.Context(), time.Minute*10)
|
||||
defer cancel()
|
||||
|
||||
linksMap := make(map[string]*siderolink.Link, expectedCount)
|
||||
|
||||
err := retry.Constant(time.Minute*10).RetryWithContext(ctx, func(ctx context.Context) error {
|
||||
links, err := safe.ReaderListAll[*siderolink.Link](ctx, omniState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
discoveredLinks := 0
|
||||
|
||||
for link := range links.All() {
|
||||
providerID, ok := link.Metadata().Annotations().Get(omni.LabelInfraProviderID)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if infraProviderID == providerID {
|
||||
discoveredLinks++
|
||||
}
|
||||
|
||||
linksMap[link.Metadata().ID()] = link
|
||||
}
|
||||
|
||||
if discoveredLinks != expectedCount {
|
||||
return retry.ExpectedErrorf("expected %d static infra provider machines, got %d", expectedCount, discoveredLinks)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
// link count should match the expected count
|
||||
require.Equal(t, expectedCount, len(linksMap))
|
||||
|
||||
ids := make([]resource.ID, 0, len(linksMap))
|
||||
|
||||
for id := range linksMap {
|
||||
ids = append(ids, id)
|
||||
|
||||
rtestutils.AssertResource(ctx, t, omniState, id, func(res *infra.Machine, assertion *assert.Assertions) {
|
||||
assertion.Equal(specs.InfraMachineConfigSpec_PENDING, res.TypedSpec().Value.AcceptanceStatus)
|
||||
})
|
||||
|
||||
rtestutils.AssertNoResource[*infra.MachineStatus](ctx, t, omniState, id)
|
||||
|
||||
rtestutils.AssertNoResource[*omni.Machine](ctx, t, omniState, id)
|
||||
|
||||
// Accept the machine
|
||||
infraMachineConfig := omni.NewInfraMachineConfig(resources.DefaultNamespace, id)
|
||||
|
||||
infraMachineConfig.TypedSpec().Value.AcceptanceStatus = specs.InfraMachineConfigSpec_ACCEPTED
|
||||
|
||||
if disableKexec {
|
||||
infraMachineConfig.TypedSpec().Value.ExtraKernelArgs = "kexec_load_disabled=1"
|
||||
}
|
||||
|
||||
require.NoError(t, omniState.Create(ctx, infraMachineConfig))
|
||||
|
||||
if disableKexec {
|
||||
disableKexecConfigPatchRes := omni.NewConfigPatch(resources.DefaultNamespace, fmt.Sprintf("500-%s-disable-kexec", id))
|
||||
|
||||
disableKexecConfigPatchRes.Metadata().Labels().Set(omni.LabelMachine, id)
|
||||
|
||||
require.NoError(t, disableKexecConfigPatchRes.TypedSpec().Value.SetUncompressedData([]byte(disableKexecConfigPatch)))
|
||||
require.NoError(t, omniState.Create(ctx, disableKexecConfigPatchRes))
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("accepted machines", zap.Reflect("infra_provider_id", infraProviderID), zap.Strings("machine_ids", ids))
|
||||
|
||||
// Assert that the infra.Machines are now marked as accepted
|
||||
rtestutils.AssertResources(ctx, t, omniState, ids, func(res *infra.Machine, assertion *assert.Assertions) {
|
||||
assertion.Equal(specs.InfraMachineConfigSpec_ACCEPTED, res.TypedSpec().Value.AcceptanceStatus)
|
||||
})
|
||||
|
||||
// Assert that omni.Machine resources are now created and marked as managed by the static infra provider
|
||||
rtestutils.AssertResources(ctx, t, omniState, ids, func(res *omni.Machine, assertion *assert.Assertions) {
|
||||
_, isManagedByStaticInfraProvider := res.Metadata().Labels().Get(omni.LabelIsManagedByStaticInfraProvider)
|
||||
|
||||
assertion.True(isManagedByStaticInfraProvider)
|
||||
})
|
||||
|
||||
// Assert that omni.Machine resources are now created
|
||||
rtestutils.AssertResources(ctx, t, omniState, ids, func(res *omni.Machine, assertion *assert.Assertions) {
|
||||
_, isManagedByStaticInfraProvider := res.Metadata().Labels().Get(omni.LabelIsManagedByStaticInfraProvider)
|
||||
|
||||
assertion.True(isManagedByStaticInfraProvider)
|
||||
})
|
||||
|
||||
// Assert that infra.MachineStatus resources are now created, powered off, marked as ready to use, and the machine labels are set on them
|
||||
rtestutils.AssertResources(ctx, t, omniState, ids, func(res *infra.MachineStatus, assertion *assert.Assertions) {
|
||||
aVal, _ := res.Metadata().Labels().Get("a")
|
||||
assertion.Equal("b", aVal)
|
||||
|
||||
_, cOk := res.Metadata().Labels().Get("c")
|
||||
assertion.True(cOk)
|
||||
|
||||
assertion.Equal(specs.InfraMachineStatusSpec_POWER_STATE_OFF, res.TypedSpec().Value.PowerState)
|
||||
assertion.True(res.TypedSpec().Value.ReadyToUse)
|
||||
})
|
||||
|
||||
// Assert the infra provider labels on MachineStatus resources
|
||||
rtestutils.AssertResources(ctx, t, omniState, ids, func(res *omni.MachineStatus, assertion *assert.Assertions) {
|
||||
link := linksMap[res.Metadata().ID()]
|
||||
|
||||
infraProviderID, _ := link.Metadata().Annotations().Get(omni.LabelInfraProviderID)
|
||||
|
||||
aLabel := fmt.Sprintf(omni.InfraProviderLabelPrefixFormat, infraProviderID) + "a"
|
||||
aVal, _ := res.Metadata().Labels().Get(aLabel)
|
||||
|
||||
assertion.Equal("b", aVal)
|
||||
|
||||
cLabel := fmt.Sprintf(omni.InfraProviderLabelPrefixFormat, infraProviderID) + "c"
|
||||
_, cOk := res.Metadata().Labels().Get(cLabel)
|
||||
assertion.True(cOk)
|
||||
})
|
||||
}
|
||||
|
||||
// infraMachinesDestroyHook removes siderolink.Link resources for all machines managed by a static infra provider,
|
||||
// and asserts that the related infra.Machine and infra.MachineStatus resources are deleted.
|
||||
func infraMachinesDestroyHook(t *testing.T, omniState state.State, providerID string, count int) {
|
||||
ctx, cancel := context.WithTimeout(t.Context(), time.Minute*10)
|
||||
defer cancel()
|
||||
|
||||
links, err := safe.StateListAll[*siderolink.Link](ctx, omniState)
|
||||
require.NoError(t, err)
|
||||
|
||||
var deleted int
|
||||
|
||||
for link := range links.All() {
|
||||
pid, ok := link.Metadata().Annotations().Get(omni.LabelInfraProviderID)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if pid != providerID {
|
||||
continue
|
||||
}
|
||||
|
||||
id := link.Metadata().ID()
|
||||
|
||||
rtestutils.Destroy[*siderolink.Link](ctx, t, omniState, []string{id})
|
||||
|
||||
rtestutils.AssertNoResource[*infra.Machine](ctx, t, omniState, id)
|
||||
rtestutils.AssertNoResource[*infra.MachineStatus](ctx, t, omniState, id)
|
||||
|
||||
deleted++
|
||||
}
|
||||
|
||||
require.EqualValues(t, count, deleted)
|
||||
}
|
||||
|
||||
// AssertInfraMachinesAreAllocated asserts that the machines that belong to the given cluster and managed by a static infra provider
|
||||
// are marked as allocated in the related resources.
|
||||
func AssertInfraMachinesAreAllocated(testCtx context.Context, omniState state.State, clusterID, talosVersion string, extensions []string) TestFunc {
|
||||
return func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(testCtx, time.Minute*10)
|
||||
defer cancel()
|
||||
|
||||
nodeList, err := safe.StateListAll[*omni.MachineSetNode](ctx, omniState, state.WithLabelQuery(resource.LabelEqual(omni.LabelCluster, clusterID)))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Greater(t, nodeList.Len(), 0)
|
||||
|
||||
for machineSetNode := range nodeList.All() {
|
||||
id := machineSetNode.Metadata().ID()
|
||||
|
||||
// There must be an infra.Machine resource for each node
|
||||
rtestutils.AssertResource[*infra.Machine](ctx, t, omniState, id, func(res *infra.Machine, assertion *assert.Assertions) {
|
||||
assertion.Equal(talosVersion, res.TypedSpec().Value.ClusterTalosVersion)
|
||||
assertion.Empty(res.TypedSpec().Value.WipeId)
|
||||
assertion.Equal(extensions, res.TypedSpec().Value.Extensions)
|
||||
})
|
||||
|
||||
// The machine is allocated, so it will be powered on and be ready to use
|
||||
rtestutils.AssertResource[*infra.MachineStatus](ctx, t, omniState, id, func(res *infra.MachineStatus, assertion *assert.Assertions) {
|
||||
assertion.Equal(specs.InfraMachineStatusSpec_POWER_STATE_ON, res.TypedSpec().Value.PowerState)
|
||||
assertion.True(res.TypedSpec().Value.ReadyToUse)
|
||||
assertion.True(res.TypedSpec().Value.Installed)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
348
internal/integration/integration_test.go
Normal file
348
internal/integration/integration_test.go
Normal file
@ -0,0 +1,348 @@
|
||||
// Copyright (c) 2025 Sidero Labs, Inc.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
//go:build integration
|
||||
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/safe"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/mattn/go-shellwords"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sync/semaphore"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
clientconsts "github.com/siderolabs/omni/client/pkg/constants"
|
||||
"github.com/siderolabs/omni/client/pkg/omni/resources/siderolink"
|
||||
_ "github.com/siderolabs/omni/cmd/acompat" // this package should always be imported first for init->set env to work
|
||||
"github.com/siderolabs/omni/internal/pkg/clientconfig"
|
||||
"github.com/siderolabs/omni/internal/pkg/constants"
|
||||
)
|
||||
|
||||
// Flag values.
|
||||
var (
|
||||
omniEndpoint string
|
||||
restartAMachineScript string
|
||||
wipeAMachineScript string
|
||||
freezeAMachineScript string
|
||||
omnictlPath string
|
||||
talosVersion string
|
||||
anotherTalosVersion string
|
||||
kubernetesVersion string
|
||||
anotherKubernetesVersion string
|
||||
expectedMachines int
|
||||
|
||||
// provisioning flags
|
||||
provisionMachinesCount int
|
||||
infraProvider string
|
||||
providerData string
|
||||
provisionConfigFile string
|
||||
|
||||
scalingTimeout time.Duration
|
||||
|
||||
cleanupLinks bool
|
||||
runStatsCheck bool
|
||||
skipExtensionsCheckOnCreate bool
|
||||
artifactsOutputDir string
|
||||
)
|
||||
|
||||
func TestIntegration(t *testing.T) {
|
||||
machineOptions := MachineOptions{
|
||||
TalosVersion: talosVersion,
|
||||
KubernetesVersion: kubernetesVersion,
|
||||
}
|
||||
|
||||
options := Options{
|
||||
ExpectedMachines: expectedMachines,
|
||||
CleanupLinks: cleanupLinks,
|
||||
RunStatsCheck: runStatsCheck,
|
||||
SkipExtensionsCheckOnCreate: skipExtensionsCheckOnCreate,
|
||||
|
||||
MachineOptions: machineOptions,
|
||||
AnotherTalosVersion: anotherTalosVersion,
|
||||
AnotherKubernetesVersion: anotherKubernetesVersion,
|
||||
OmnictlPath: omnictlPath,
|
||||
ScalingTimeout: scalingTimeout,
|
||||
OutputDir: artifactsOutputDir,
|
||||
}
|
||||
|
||||
if provisionConfigFile != "" {
|
||||
f, err := os.Open(provisionConfigFile)
|
||||
|
||||
require.NoError(t, err, "failed to open provision config file")
|
||||
|
||||
decoder := yaml.NewDecoder(f)
|
||||
|
||||
for {
|
||||
var cfg MachineProvisionConfig
|
||||
|
||||
if err = decoder.Decode(&cfg); err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
|
||||
require.NoError(t, err, "failed to parse provision config file")
|
||||
}
|
||||
|
||||
options.ProvisionConfigs = append(options.ProvisionConfigs, cfg)
|
||||
}
|
||||
} else {
|
||||
options.ProvisionConfigs = append(options.ProvisionConfigs,
|
||||
MachineProvisionConfig{
|
||||
MachineCount: provisionMachinesCount,
|
||||
Provider: MachineProviderConfig{
|
||||
ID: infraProvider,
|
||||
Data: providerData,
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
if restartAMachineScript != "" {
|
||||
parsedScript, err := shellwords.Parse(restartAMachineScript)
|
||||
require.NoError(t, err, "failed to parse restart-a-machine-script file")
|
||||
|
||||
options.RestartAMachineFunc = func(ctx context.Context, uuid string) error {
|
||||
return execCmd(ctx, parsedScript, uuid)
|
||||
}
|
||||
}
|
||||
|
||||
if wipeAMachineScript != "" {
|
||||
parsedScript, err := shellwords.Parse(wipeAMachineScript)
|
||||
require.NoError(t, err, "failed to parse wipe-a-machine-script file")
|
||||
|
||||
options.WipeAMachineFunc = func(ctx context.Context, uuid string) error {
|
||||
return execCmd(ctx, parsedScript, uuid)
|
||||
}
|
||||
}
|
||||
|
||||
if freezeAMachineScript != "" {
|
||||
parsedScript, err := shellwords.Parse(freezeAMachineScript)
|
||||
require.NoError(t, err, "failed to parse freeze-a-machine-script file")
|
||||
|
||||
options.FreezeAMachineFunc = func(ctx context.Context, uuid string) error {
|
||||
return execCmd(ctx, parsedScript, uuid)
|
||||
}
|
||||
}
|
||||
|
||||
u, err := url.Parse(omniEndpoint)
|
||||
require.NoError(t, err, "error parsing omni endpoint")
|
||||
|
||||
if u.Scheme == "grpc" {
|
||||
u.Scheme = "http"
|
||||
}
|
||||
|
||||
options.HTTPEndpoint = u.String()
|
||||
|
||||
clientConfig := clientconfig.New(omniEndpoint)
|
||||
|
||||
t.Cleanup(func() {
|
||||
clientConfig.Close() //nolint:errcheck
|
||||
})
|
||||
|
||||
rootClient, err := clientConfig.GetClient(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, rootClient.Close())
|
||||
})
|
||||
|
||||
talosAPIKeyPrepare := func(ctx context.Context, contextName string) error {
|
||||
return clientconfig.TalosAPIKeyPrepare(ctx, rootClient, contextName)
|
||||
}
|
||||
|
||||
if !clientconsts.IsDebugBuild {
|
||||
// noop for non-debug builds
|
||||
talosAPIKeyPrepare = func(context.Context, string) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
testOptions := &TestOptions{
|
||||
omniClient: rootClient,
|
||||
talosAPIKeyPrepare: talosAPIKeyPrepare,
|
||||
Options: options,
|
||||
machineSemaphore: semaphore.NewWeighted(int64(options.ExpectedMachines)),
|
||||
clientConfig: clientConfig,
|
||||
}
|
||||
|
||||
preRunHooks(t, testOptions)
|
||||
|
||||
t.Run("Suites", func(t *testing.T) {
|
||||
t.Run("CleanState", testCleanState(testOptions))
|
||||
t.Run("TalosImageGeneration", testImageGeneration(testOptions))
|
||||
t.Run("CLICommands", testCLICommands(testOptions))
|
||||
t.Run("KubernetesNodeAudit", testKubernetesNodeAudit(testOptions))
|
||||
t.Run("ForcedMachineRemoval", testForcedMachineRemoval(testOptions))
|
||||
t.Run("ImmediateClusterDestruction", testImmediateClusterDestruction(testOptions))
|
||||
t.Run("DefaultCluster", testDefaultCluster(testOptions))
|
||||
t.Run("EncryptedCluster", testEncryptedCluster(testOptions))
|
||||
t.Run("SinglenodeCluster", testSinglenodeCluster(testOptions))
|
||||
t.Run("ScaleUpAndDown", testScaleUpAndDown(testOptions))
|
||||
t.Run("ScaleUpAndDownMachineClassBasedMachineSets", testScaleUpAndDownMachineClassBasedMachineSets(testOptions))
|
||||
t.Run("ScaleUpAndDownAutoProvisionMachineSets", testScaleUpAndDownAutoProvisionMachineSets(testOptions))
|
||||
t.Run("RollingUpdateParallelism", testRollingUpdateParallelism(testOptions))
|
||||
t.Run("ReplaceControlPlanes", testReplaceControlPlanes(testOptions))
|
||||
t.Run("ConfigPatching", testConfigPatching(testOptions))
|
||||
t.Run("TalosUpgrades", testTalosUpgrades(testOptions))
|
||||
t.Run("KubernetesUpgrades", testKubernetesUpgrades(testOptions))
|
||||
t.Run("EtcdBackupAndRestore", testEtcdBackupAndRestore(testOptions))
|
||||
t.Run("MaintenanceUpgrade", testMaintenanceUpgrade(testOptions))
|
||||
t.Run("Auth", testAuth(testOptions))
|
||||
t.Run("ClusterTemplate", testClusterTemplate(testOptions))
|
||||
t.Run("WorkloadProxy", testWorkloadProxy(testOptions))
|
||||
t.Run("StaticInfraProvider", testStaticInfraProvider(testOptions))
|
||||
})
|
||||
|
||||
postRunHooks(t, testOptions)
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&omniEndpoint, "omni.endpoint", "grpc://127.0.0.1:8080", "The endpoint of the Omni API.")
|
||||
flag.IntVar(&expectedMachines, "omni.expected-machines", 4, "minimum number of machines expected")
|
||||
flag.StringVar(&restartAMachineScript, "omni.restart-a-machine-script", "hack/test/restart-a-vm.sh", "a script to run to restart a machine by UUID (optional)")
|
||||
flag.StringVar(&wipeAMachineScript, "omni.wipe-a-machine-script", "hack/test/wipe-a-vm.sh", "a script to run to wipe a machine by UUID (optional)")
|
||||
flag.StringVar(&freezeAMachineScript, "omni.freeze-a-machine-script", "hack/test/freeze-a-vm.sh", "a script to run to freeze a machine by UUID (optional)")
|
||||
flag.StringVar(&omnictlPath, "omni.omnictl-path", "_out/omnictl-linux-amd64", "omnictl CLI script path (optional)")
|
||||
flag.StringVar(&anotherTalosVersion, "omni.another-talos-version",
|
||||
constants.AnotherTalosVersion,
|
||||
"omni.Talos version for upgrade test",
|
||||
)
|
||||
flag.StringVar(
|
||||
&talosVersion,
|
||||
"omni.talos-version",
|
||||
clientconsts.DefaultTalosVersion,
|
||||
"omni.installer version for workload clusters",
|
||||
)
|
||||
flag.StringVar(&kubernetesVersion, "omni.kubernetes-version", constants.DefaultKubernetesVersion, "Kubernetes version for workload clusters")
|
||||
flag.StringVar(&anotherKubernetesVersion, "omni.another-kubernetes-version", constants.AnotherKubernetesVersion, "Kubernetes version for upgrade tests")
|
||||
flag.BoolVar(&cleanupLinks, "omni.cleanup-links", false, "remove all links after the tests are complete")
|
||||
flag.BoolVar(&runStatsCheck, "omni.run-stats-check", false, "runs stats check after the test is complete")
|
||||
flag.IntVar(&provisionMachinesCount, "omni.provision-machines", 0, "provisions machines through the infrastructure provider")
|
||||
flag.StringVar(&infraProvider, "omni.infra-provider", "talemu", "use infra provider with the specified ID when provisioning the machines")
|
||||
flag.StringVar(&providerData, "omni.provider-data", "{}", "the infra provider machine template data to use")
|
||||
flag.DurationVar(&scalingTimeout, "omni.scale-timeout", time.Second*150, "scale up test timeout")
|
||||
flag.StringVar(&provisionConfigFile, "omni.provision-config-file", "", "provision machines with the more complicated configuration")
|
||||
flag.BoolVar(&skipExtensionsCheckOnCreate, "omni.skip-extensions-check-on-create", false,
|
||||
"omni.disables checking for hello-world-service extension on the machine allocation and in the upgrade tests")
|
||||
flag.StringVar(&artifactsOutputDir, "omni.output-dir", "/tmp/integration-test", "output directory for the files generated by the test, e.g., the support bundles")
|
||||
}
|
||||
|
||||
func execCmd(ctx context.Context, parsedScript []string, args ...string) error {
|
||||
cmd := exec.CommandContext(ctx, parsedScript[0], append(parsedScript[1:], args...)...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func (opts *TestOptions) claimMachines(t *testing.T, count int) {
|
||||
require.GreaterOrEqual(t, expectedMachines, count)
|
||||
|
||||
t.Logf("attempting to acquire semaphore for %d machines", count)
|
||||
|
||||
if err := opts.machineSemaphore.Acquire(t.Context(), int64(count)); err != nil {
|
||||
t.Fatalf("failed to acquire machine semaphore: %s", err)
|
||||
}
|
||||
|
||||
t.Logf("acquired semaphore for %d machines", count)
|
||||
|
||||
t.Cleanup(func() {
|
||||
t.Logf("releasing semaphore for %d machines", count)
|
||||
|
||||
opts.machineSemaphore.Release(int64(count))
|
||||
})
|
||||
}
|
||||
|
||||
func runTests(t *testing.T, tests []subTest) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.Name, tt.F)
|
||||
}
|
||||
}
|
||||
|
||||
func preRunHooks(t *testing.T, options *TestOptions) {
|
||||
if !options.provisionMachines() {
|
||||
return
|
||||
}
|
||||
|
||||
for i, cfg := range options.ProvisionConfigs {
|
||||
if cfg.Provider.Static {
|
||||
infraMachinesAcceptHook(t, options.omniClient.Omni().State(), cfg.Provider.ID, cfg.MachineCount, true)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
t.Logf("provision %d machines using provider %q, machine request set name provisioned%d",
|
||||
cfg.MachineCount,
|
||||
cfg.Provider.ID,
|
||||
i,
|
||||
)
|
||||
|
||||
machineProvisionHook(
|
||||
t,
|
||||
options.omniClient,
|
||||
cfg,
|
||||
fmt.Sprintf("provisioned%d", i),
|
||||
options.MachineOptions.TalosVersion,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func postRunHooks(t *testing.T, options *TestOptions) {
|
||||
if options.provisionMachines() {
|
||||
for i, cfg := range options.ProvisionConfigs {
|
||||
if cfg.Provider.Static {
|
||||
infraMachinesDestroyHook(t, options.omniClient.Omni().State(), cfg.Provider.ID, cfg.MachineCount)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
machineDeprovisionHook(t, options.omniClient, fmt.Sprintf("provisioned%d", i))
|
||||
}
|
||||
}
|
||||
|
||||
if options.RunStatsCheck {
|
||||
t.Log("checking controller stats for the write and read spikes")
|
||||
|
||||
statsLimitsHook(t)
|
||||
}
|
||||
|
||||
if options.CleanupLinks {
|
||||
require.NoError(t, cleanupLinksFunc(t.Context(), options.omniClient.Omni().State()))
|
||||
}
|
||||
}
|
||||
|
||||
func cleanupLinksFunc(ctx context.Context, st state.State) error {
|
||||
links, err := safe.ReaderListAll[*siderolink.Link](ctx, st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var cancel context.CancelFunc
|
||||
|
||||
ctx, cancel = context.WithTimeout(ctx, time.Minute)
|
||||
defer cancel()
|
||||
|
||||
return links.ForEachErr(func(r *siderolink.Link) error {
|
||||
err := st.TeardownAndDestroy(ctx, r.Metadata())
|
||||
if err != nil && !state.IsNotFoundError(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
@ -3,7 +3,9 @@
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
package tests
|
||||
//go:build integration
|
||||
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -20,7 +22,6 @@ import (
|
||||
"go.uber.org/zap/zaptest"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/siderolabs/omni/client/pkg/client"
|
||||
"github.com/siderolabs/omni/client/pkg/omni/resources/omni"
|
||||
)
|
||||
|
||||
@ -31,7 +32,9 @@ import (
|
||||
// 3. Assert that the ClusterMachine resource is deleted - the ClusterMachineTeardownController did not block its deletion despite failing to remove the node from Kubernetes.
|
||||
// 4. Wake the control plane back up.
|
||||
// 5. Assert that the worker node eventually gets removed from Kubernetes due to node audit.
|
||||
func AssertKubernetesNodeAudit(ctx context.Context, st state.State, clusterName string, omniClient *client.Client, options Options) TestFunc {
|
||||
func AssertKubernetesNodeAudit(ctx context.Context, clusterName string, options *TestOptions) TestFunc {
|
||||
st := options.omniClient.Omni().State()
|
||||
|
||||
return func(t *testing.T) {
|
||||
if options.FreezeAMachineFunc == nil || options.RestartAMachineFunc == nil {
|
||||
t.Skip("skip the test as FreezeAMachineFunc or RestartAMachineFunc is not set")
|
||||
@ -82,7 +85,7 @@ func AssertKubernetesNodeAudit(ctx context.Context, st state.State, clusterName
|
||||
require.NoError(t, options.RestartAMachineFunc(ctx, id))
|
||||
}
|
||||
|
||||
kubernetesClient := getKubernetesClient(ctx, t, omniClient.Management(), clusterName)
|
||||
kubernetesClient := getKubernetesClient(ctx, t, options.omniClient.Management(), clusterName)
|
||||
|
||||
logger.Info("assert that the node is removed from Kubernetes due to node audit")
|
||||
|
@ -3,7 +3,9 @@
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
package tests
|
||||
//go:build integration
|
||||
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"context"
|
@ -3,7 +3,9 @@
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
package tests
|
||||
//go:build integration
|
||||
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"bufio"
|
@ -3,7 +3,9 @@
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
package tests
|
||||
//go:build integration
|
||||
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"context"
|
@ -3,7 +3,9 @@
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
package tests
|
||||
//go:build integration
|
||||
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"context"
|
@ -3,7 +3,9 @@
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
package tests
|
||||
//go:build integration
|
||||
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"context"
|
@ -3,7 +3,9 @@
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
package tests
|
||||
//go:build integration
|
||||
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"context"
|
143
internal/integration/stats_test.go
Normal file
143
internal/integration/stats_test.go
Normal file
@ -0,0 +1,143 @@
|
||||
// Copyright (c) 2025 Sidero Labs, Inc.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
//go:build integration
|
||||
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/api"
|
||||
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/siderolabs/go-retry/retry"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// statsLimitsHook checks that metrics don't show any spikes of resource reads/writes, controller wakeups.
|
||||
// This test should only be run after the integration tests set with Talemu enabled as the thresholds are adjusted for it.
|
||||
// Should have Prometheus running on 9090.
|
||||
func statsLimitsHook(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
check func(assert *assert.Assertions, value float64)
|
||||
name string
|
||||
query string
|
||||
}{
|
||||
{
|
||||
name: "resource CRUD",
|
||||
query: `sum(omni_resource_operations_total{operation=~"create|update", type!="MachineStatusLinks.omni.sidero.dev"})`,
|
||||
check: func(assert *assert.Assertions, value float64) {
|
||||
limit := float64(12000)
|
||||
|
||||
assert.Lessf(value, limit, "resource CRUD operations were expected to be less than %f. "+
|
||||
"If the limit is exceeded not because of a leak but because you added some new resources/controllers, adjust the limit accordingly.", limit)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "queue length",
|
||||
query: `sum(omni_runtime_qcontroller_queue_length)`,
|
||||
check: func(assert *assert.Assertions, value float64) { assert.Zero(value) },
|
||||
},
|
||||
{
|
||||
name: "controller wakeups",
|
||||
query: `sum(omni_runtime_controller_wakeups{controller!="MachineStatusLinkController"})`,
|
||||
check: func(assert *assert.Assertions, value float64) {
|
||||
limit := float64(12000)
|
||||
|
||||
assert.Lessf(value, limit, "controller wakeups were expected to be less than %f. "+
|
||||
"If the limit is exceeded not because of a leak but because you added some new resources/controllers, adjust the limit accordingly.", limit)
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(t.Context(), time.Second*16)
|
||||
defer cancel()
|
||||
|
||||
err := retry.Constant(time.Second * 15).Retry(func() error {
|
||||
promClient, err := api.NewClient(api.Config{
|
||||
Address: "http://127.0.0.1:9090",
|
||||
})
|
||||
if err != nil {
|
||||
return retry.ExpectedError(err)
|
||||
}
|
||||
|
||||
var (
|
||||
value model.Value
|
||||
warnings v1.Warnings
|
||||
)
|
||||
|
||||
agg := assertionAggregator{}
|
||||
|
||||
v1api := v1.NewAPI(promClient)
|
||||
|
||||
value, warnings, err = v1api.Query(ctx, tt.query, time.Now())
|
||||
if err != nil {
|
||||
return retry.ExpectedError(err)
|
||||
}
|
||||
|
||||
if len(warnings) > 0 {
|
||||
return retry.ExpectedErrorf("prometheus query had warnings %#v", warnings)
|
||||
}
|
||||
|
||||
assert := assert.New(&agg)
|
||||
|
||||
switch val := value.(type) {
|
||||
case *model.Scalar:
|
||||
tt.check(assert, float64(val.Value))
|
||||
case model.Vector:
|
||||
tt.check(assert, float64(val[val.Len()-1].Value))
|
||||
default:
|
||||
return fmt.Errorf("unexpected value type %s", val.Type())
|
||||
}
|
||||
|
||||
if agg.hadErrors {
|
||||
return retry.ExpectedError(errors.New(agg.String()))
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type assertionAggregator struct {
|
||||
errors map[string]struct{}
|
||||
hadErrors bool
|
||||
}
|
||||
|
||||
func (agg *assertionAggregator) Errorf(format string, args ...any) {
|
||||
errorString := fmt.Sprintf(format, args...)
|
||||
|
||||
if agg.errors == nil {
|
||||
agg.errors = map[string]struct{}{}
|
||||
}
|
||||
|
||||
agg.errors[errorString] = struct{}{}
|
||||
agg.hadErrors = true
|
||||
}
|
||||
|
||||
func (agg *assertionAggregator) String() string {
|
||||
lines := make([]string, 0, len(agg.errors))
|
||||
|
||||
for errorString := range agg.errors {
|
||||
lines = append(lines, " * "+errorString)
|
||||
}
|
||||
|
||||
sort.Strings(lines)
|
||||
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
1373
internal/integration/suites_test.go
Normal file
1373
internal/integration/suites_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@ -3,7 +3,9 @@
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
package tests
|
||||
//go:build integration
|
||||
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
@ -636,9 +638,11 @@ func AssertTalosUpgradeIsCancelable(testCtx context.Context, st state.State, clu
|
||||
|
||||
events := make(chan state.Event)
|
||||
|
||||
require.NoError(t, st.WatchKind(ctx, omni.NewClusterMachineStatus(resources.DefaultNamespace, "").Metadata(), events),
|
||||
state.WithLabelQuery(resource.LabelEqual(omni.LabelCluster, clusterName)),
|
||||
)
|
||||
t.Logf("watching for the machines in cluster %q", clusterName)
|
||||
|
||||
require.NoError(t, st.WatchKind(ctx, omni.NewClusterMachineStatus(resources.DefaultNamespace, "").Metadata(), events,
|
||||
state.WatchWithLabelQuery(resource.LabelEqual(omni.LabelCluster, clusterName)),
|
||||
))
|
||||
|
||||
ids := []string{}
|
||||
|
||||
@ -679,6 +683,8 @@ func AssertTalosUpgradeIsCancelable(testCtx context.Context, st state.State, clu
|
||||
|
||||
ids = append(ids, res.Metadata().ID())
|
||||
|
||||
t.Logf("found machine %q, labels %#v", res.Metadata().ID(), res.Metadata().Labels())
|
||||
|
||||
break outer
|
||||
}
|
||||
}
|
@ -3,7 +3,9 @@
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
package tests
|
||||
//go:build integration
|
||||
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"bytes"
|
Before Width: | Height: | Size: 1.3 KiB After Width: | Height: | Size: 1.3 KiB |
@ -3,7 +3,9 @@
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the LICENSE file.
|
||||
|
||||
package tests
|
||||
//go:build integration
|
||||
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
@ -33,8 +35,8 @@ import (
|
||||
|
||||
"github.com/siderolabs/omni/client/pkg/client"
|
||||
"github.com/siderolabs/omni/client/pkg/omni/resources/omni"
|
||||
"github.com/siderolabs/omni/cmd/integration-test/pkg/clientconfig"
|
||||
"github.com/siderolabs/omni/internal/backend/workloadproxy"
|
||||
"github.com/siderolabs/omni/internal/pkg/clientconfig"
|
||||
)
|
||||
|
||||
//go:embed testdata/sidero-labs-icon.svg
|
Loading…
Reference in New Issue
Block a user