chore: enable go linting for build tags, fix linting errors

Add the build tags we were using, `integration` and `tools`, to be included in the linting/formatting of  golangci-lint.

Rename the build tag `tools` to `sidero.tools` to avoid colliding with the same named build tag in `github.com/johannesboyne/gofakes3` package - otherwise the dependency was failing to compile due to having multiple package names in the same package.

Fix all the linting errors surfaced by this enablement.

Also, temporarily re-enabled `nolintlint` to find the nolint directives which were no longer necessary and removed them.

Signed-off-by: Utku Ozdemir <utku.ozdemir@siderolabs.com>
This commit is contained in:
Utku Ozdemir 2026-04-29 20:48:57 +02:00
parent 718d61a6b4
commit 2fe716d2c9
No known key found for this signature in database
GPG Key ID: DBD13117B0A14E93
99 changed files with 257 additions and 334 deletions

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2026-04-18T04:34:01Z by kres 15ff2fd.
# Generated on 2026-04-29T17:27:17Z by kres 7359c62.
concurrency:
group: ${{ github.head_ref || github.run_id }}
@ -93,7 +93,7 @@ jobs:
make base
- name: Retrieve PR labels
id: retrieve-pr-labels
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # version: v8.0.0
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # version: v9.0.0
with:
retries: "3"
script: |
@ -179,7 +179,7 @@ jobs:
make release-notes
- name: Release
if: startsWith(github.ref, 'refs/tags/')
uses: softprops/action-gh-release@153bb8e04406b158c6c84fc1615b65b24149a1fe # version: v2.6.1
uses: softprops/action-gh-release@b4309332981a82ec1c5618f44dd2e27cc8bfbfda # version: v3.0.0
with:
body_path: _out/RELEASE_NOTES.md
draft: "true"
@ -190,7 +190,7 @@ jobs:
e2e-backups:
runs-on:
group: large
if: contains(fromJSON(needs.default.outputs.labels), 'integration/e2e') || contains(fromJSON(needs.default.outputs.labels), 'integration/e2e-backups')
if: contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e') || contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e-backups')
needs:
- integration-talemu
- integration-qemu
@ -251,7 +251,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu-e2e-backups
path: ${{ github.workspace }}/integration-test
@ -260,7 +260,7 @@ jobs:
e2e-cluster-import:
runs-on:
group: large
if: contains(fromJSON(needs.default.outputs.labels), 'integration/e2e') || contains(fromJSON(needs.default.outputs.labels), 'integration/e2e-cluster-import')
if: contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e') || contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e-cluster-import')
needs:
- integration-talemu
- integration-qemu
@ -323,7 +323,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu-e2e-cluster-import
path: ${{ github.workspace }}/integration-test
@ -332,7 +332,7 @@ jobs:
e2e-forced-removal:
runs-on:
group: large
if: contains(fromJSON(needs.default.outputs.labels), 'integration/e2e') || contains(fromJSON(needs.default.outputs.labels), 'integration/e2e-forced-removal')
if: contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e') || contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e-forced-removal')
needs:
- integration-talemu
- integration-qemu
@ -393,7 +393,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu-e2e-forced-removal
path: ${{ github.workspace }}/integration-test
@ -461,7 +461,7 @@ jobs:
sudo -E make run-integration-test
- name: save-e2e-helm-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: e2e-helm
path: ${{ github.workspace }}/integration-test
@ -469,7 +469,7 @@ jobs:
e2e-minor-talos-upgrade:
runs-on:
group: large
if: contains(fromJSON(needs.default.outputs.labels), 'integration/e2e') || contains(fromJSON(needs.default.outputs.labels), 'integration/e2e-upgrades')
if: contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e') || contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e-upgrades')
needs:
- integration-talemu
- integration-qemu
@ -531,7 +531,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu-e2e-minor-talos-upgrade
path: ${{ github.workspace }}/integration-test
@ -540,7 +540,7 @@ jobs:
e2e-misc-upgrades:
runs-on:
group: large
if: contains(fromJSON(needs.default.outputs.labels), 'integration/e2e') || contains(fromJSON(needs.default.outputs.labels), 'integration/e2e-upgrades') || contains(fromJSON(needs.default.outputs.labels), 'integration/e2e-misc-upgrades')
if: contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e') || contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e-upgrades') || contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e-misc-upgrades')
needs:
- integration-talemu
- integration-qemu
@ -601,7 +601,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu-e2e-misc-upgrades
path: ${{ github.workspace }}/integration-test
@ -610,7 +610,7 @@ jobs:
e2e-omni-upgrade:
runs-on:
group: large
if: contains(fromJSON(needs.default.outputs.labels), 'integration/e2e') || contains(fromJSON(needs.default.outputs.labels), 'integration/e2e-omni-upgrade')
if: contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e') || contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e-omni-upgrade')
needs:
- integration-talemu
- integration-qemu
@ -673,7 +673,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu-e2e-omni-upgrade
path: ${{ github.workspace }}/integration-test
@ -741,7 +741,7 @@ jobs:
sudo -E make run-integration-test
- name: save-e2e-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: e2e-qemu
path: ${{ github.workspace }}/integration-test
@ -749,7 +749,7 @@ jobs:
e2e-rotate-ca:
runs-on:
group: large
if: contains(fromJSON(needs.default.outputs.labels), 'integration/e2e') || contains(fromJSON(needs.default.outputs.labels), 'integration/e2e-rotate-ca')
if: contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e') || contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e-rotate-ca')
needs:
- integration-talemu
- integration-qemu
@ -812,7 +812,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu-e2e-rotate-ca
path: ${{ github.workspace }}/integration-test
@ -821,7 +821,7 @@ jobs:
e2e-scaling:
runs-on:
group: large
if: contains(fromJSON(needs.default.outputs.labels), 'integration/e2e') || contains(fromJSON(needs.default.outputs.labels), 'integration/e2e-scaling')
if: contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e') || contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e-scaling')
needs:
- integration-talemu
- integration-qemu
@ -882,7 +882,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu-e2e-scaling
path: ${{ github.workspace }}/integration-test
@ -891,7 +891,7 @@ jobs:
e2e-short:
runs-on:
group: large
if: contains(fromJSON(needs.default.outputs.labels), 'integration/e2e') || contains(fromJSON(needs.default.outputs.labels), 'integration/e2e-short')
if: contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e') || contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e-short')
needs:
- integration-talemu
- integration-qemu
@ -952,7 +952,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu-e2e-short
path: ${{ github.workspace }}/integration-test
@ -961,7 +961,7 @@ jobs:
e2e-short-secureboot:
runs-on:
group: large
if: contains(fromJSON(needs.default.outputs.labels), 'integration/e2e-short-secureboot')
if: contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e-short-secureboot')
needs:
- integration-talemu
- integration-qemu
@ -1023,7 +1023,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu-e2e-short-secureboot
path: ${{ github.workspace }}/integration-test
@ -1091,7 +1091,7 @@ jobs:
sudo -E make run-integration-test
- name: save-e2e-talemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: e2e-talemu
path: ${{ github.workspace }}/integration-test
@ -1106,7 +1106,7 @@ jobs:
e2e-templates:
runs-on:
group: large
if: contains(fromJSON(needs.default.outputs.labels), 'integration/e2e') || contains(fromJSON(needs.default.outputs.labels), 'integration/e2e-templates')
if: contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e') || contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e-templates')
needs:
- integration-talemu
- integration-qemu
@ -1167,7 +1167,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu-e2e-templates
path: ${{ github.workspace }}/integration-test
@ -1176,7 +1176,7 @@ jobs:
e2e-upgrades:
runs-on:
group: large
if: contains(fromJSON(needs.default.outputs.labels), 'integration/e2e') || contains(fromJSON(needs.default.outputs.labels), 'integration/e2e-upgrades')
if: contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e') || contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e-upgrades')
needs:
- integration-talemu
- integration-qemu
@ -1237,7 +1237,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu-e2e-upgrades
path: ${{ github.workspace }}/integration-test
@ -1246,7 +1246,7 @@ jobs:
e2e-workload-proxy:
runs-on:
group: large
if: contains(fromJSON(needs.default.outputs.labels), 'integration/e2e') || contains(fromJSON(needs.default.outputs.labels), 'integration/e2e-workload-proxy')
if: contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e') || contains(fromJSON(needs.default.outputs.labels || '[]'), 'integration/e2e-workload-proxy')
needs:
- integration-talemu
- integration-qemu
@ -1307,7 +1307,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu-e2e-workload-proxy
path: ${{ github.workspace }}/integration-test
@ -1378,7 +1378,7 @@ jobs:
run: |
find _out -type f -executable > _out/executable-artifacts
- name: save-artifacts
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: artifacts
path: |-
@ -1388,7 +1388,7 @@ jobs:
retention-days: "5"
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu
path: ${{ github.workspace }}/integration-test
@ -1463,7 +1463,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-talemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-talemu
path: ${{ github.workspace }}/integration-test

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2026-03-10T14:06:17Z by kres 3328d87.
# Generated on 2026-04-29T17:27:17Z by kres 7359c62.
concurrency:
group: ${{ github.head_ref || github.run_id }}
@ -67,7 +67,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu
path: ${{ github.workspace }}/integration-test

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2026-03-10T14:06:17Z by kres 3328d87.
# Generated on 2026-04-29T17:27:17Z by kres 7359c62.
concurrency:
group: ${{ github.head_ref || github.run_id }}
@ -69,7 +69,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu
path: ${{ github.workspace }}/integration-test

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2026-03-10T14:06:17Z by kres 3328d87.
# Generated on 2026-04-29T17:27:17Z by kres 7359c62.
concurrency:
group: ${{ github.head_ref || github.run_id }}
@ -67,7 +67,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu
path: ${{ github.workspace }}/integration-test

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2026-03-10T14:06:17Z by kres 3328d87.
# Generated on 2026-04-29T17:27:17Z by kres 7359c62.
concurrency:
group: ${{ github.head_ref || github.run_id }}
@ -68,7 +68,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu
path: ${{ github.workspace }}/integration-test

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2026-03-10T14:06:17Z by kres 3328d87.
# Generated on 2026-04-29T17:27:17Z by kres 7359c62.
concurrency:
group: ${{ github.head_ref || github.run_id }}
@ -67,7 +67,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu
path: ${{ github.workspace }}/integration-test

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2026-03-10T14:06:17Z by kres 3328d87.
# Generated on 2026-04-29T17:27:17Z by kres 7359c62.
concurrency:
group: ${{ github.head_ref || github.run_id }}
@ -69,7 +69,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu
path: ${{ github.workspace }}/integration-test

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2026-03-10T14:06:17Z by kres 3328d87.
# Generated on 2026-04-29T17:27:17Z by kres 7359c62.
concurrency:
group: ${{ github.head_ref || github.run_id }}
@ -69,7 +69,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu
path: ${{ github.workspace }}/integration-test

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2026-03-10T14:06:17Z by kres 3328d87.
# Generated on 2026-04-29T17:27:17Z by kres 7359c62.
concurrency:
group: ${{ github.head_ref || github.run_id }}
@ -67,7 +67,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu
path: ${{ github.workspace }}/integration-test

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2026-03-10T14:06:17Z by kres 3328d87.
# Generated on 2026-04-29T17:27:17Z by kres 7359c62.
concurrency:
group: ${{ github.head_ref || github.run_id }}
@ -67,7 +67,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu
path: ${{ github.workspace }}/integration-test

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2026-03-10T14:06:17Z by kres 3328d87.
# Generated on 2026-04-29T17:27:17Z by kres 7359c62.
concurrency:
group: ${{ github.head_ref || github.run_id }}
@ -68,7 +68,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu
path: ${{ github.workspace }}/integration-test

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2026-03-10T14:06:17Z by kres 3328d87.
# Generated on 2026-04-29T17:27:17Z by kres 7359c62.
concurrency:
group: ${{ github.head_ref || github.run_id }}
@ -67,7 +67,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu
path: ${{ github.workspace }}/integration-test

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2026-03-10T14:06:17Z by kres 3328d87.
# Generated on 2026-04-29T17:27:17Z by kres 7359c62.
concurrency:
group: ${{ github.head_ref || github.run_id }}
@ -67,7 +67,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu
path: ${{ github.workspace }}/integration-test

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2026-03-10T14:06:17Z by kres 3328d87.
# Generated on 2026-04-29T17:27:17Z by kres 7359c62.
concurrency:
group: ${{ github.head_ref || github.run_id }}
@ -67,7 +67,7 @@ jobs:
sudo -E make run-integration-test
- name: save-integration-qemu-artifacts
if: always()
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # version: v7.0.0
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # version: v7.0.1
with:
name: integration-qemu
path: ${{ github.workspace }}/integration-test

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2026-04-07T14:45:56Z by kres 4e3b74d.
# Generated on 2026-04-29T17:27:17Z by kres 7359c62.
"on":
workflow_run:
@ -31,7 +31,7 @@ jobs:
if: github.event.workflow_run.conclusion == 'failure' && github.event.workflow_run.event != 'pull_request'
steps:
- name: Slack Notify
uses: slackapi/slack-github-action@af78098f536edbc4de71162a307590698245be95 # version: v3.0.1
uses: slackapi/slack-github-action@03ea5433c137af7c0495bc0cad1af10403fc800c # version: v3.0.2
with:
method: chat.postMessage
payload: |

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2026-04-07T14:45:56Z by kres 4e3b74d.
# Generated on 2026-04-29T17:27:17Z by kres 7359c62.
"on":
workflow_run:
@ -36,7 +36,7 @@ jobs:
run: |
echo pull_request_number=$(gh pr view -R ${{ github.repository }} ${{ github.event.workflow_run.head_repository.owner.login }}:${{ github.event.workflow_run.head_branch }} --json number --jq .number) >> $GITHUB_OUTPUT
- name: Slack Notify
uses: slackapi/slack-github-action@af78098f536edbc4de71162a307590698245be95 # version: v3.0.1
uses: slackapi/slack-github-action@03ea5433c137af7c0495bc0cad1af10403fc800c # version: v3.0.2
with:
method: chat.postMessage
payload: |

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2026-03-10T14:06:17Z by kres 3328d87.
# Generated on 2026-04-29T18:20:53Z by kres 7359c62.
version: "2"
@ -9,7 +9,7 @@ run:
modules-download-mode: readonly
issues-exit-code: 1
tests: true
build-tags: []
build-tags: ["integration","sidero.tools"]
# output configuration options
output:

View File

@ -673,6 +673,9 @@ spec:
---
kind: golang.GolangciLint
spec:
buildTags:
- integration
- sidero.tools
depguardExtraRules:
prevent_sync_errgroup:
list-mode: lax

View File

@ -2,7 +2,7 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2026-04-16T13:44:23Z by kres b6d29bf.
# Generated on 2026-04-29T17:27:17Z by kres 7359c62.
ARG JS_TOOLCHAIN
ARG TOOLCHAIN=scratch
@ -13,9 +13,9 @@ ARG HELMDOCS_VERSION
RUN --mount=type=cache,target=/root/.cache/go-build,id=omni/root/.cache/go-build --mount=type=cache,target=/go/pkg,id=omni/go/pkg go install github.com/norwoodj/helm-docs/cmd/helm-docs@${HELMDOCS_VERSION} \
&& mv /go/bin/helm-docs /bin/helm-docs
FROM ghcr.io/siderolabs/ca-certificates:v1.12.0 AS image-ca-certificates
FROM ghcr.io/siderolabs/ca-certificates:v1.13.0 AS image-ca-certificates
FROM ghcr.io/siderolabs/fhs:v1.12.0 AS image-fhs
FROM ghcr.io/siderolabs/fhs:v1.13.0 AS image-fhs
# base toolchain image
FROM --platform=${BUILDPLATFORM} ${JS_TOOLCHAIN} AS js-toolchain
@ -26,7 +26,7 @@ ENV GOPATH=/go
ENV PATH=${PATH}:/usr/local/go/bin
# runs markdownlint
FROM docker.io/oven/bun:1.3.11-alpine AS lint-markdown
FROM docker.io/oven/bun:1.3.13-alpine AS lint-markdown
WORKDIR /src
RUN bun i markdownlint-cli@0.48.0 sentences-per-line@0.5.2
COPY .markdownlint.json .

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2026-04-20T11:44:49Z by kres 4b58472-dirty.
# Generated on 2026-04-29T17:27:17Z by kres 7359c62.
# common variables
@ -24,15 +24,15 @@ TESTPKGS ?= ./...
JS_BUILD_ARGS ?=
PROTOBUF_GO_VERSION ?= 1.36.11
GRPC_GO_VERSION ?= 1.6.1
GRPC_GATEWAY_VERSION ?= 2.28.0
GRPC_GATEWAY_VERSION ?= 2.29.0
VTPROTOBUF_VERSION ?= 0.6.0
GOIMPORTS_VERSION ?= 0.43.0
GOIMPORTS_VERSION ?= 0.44.0
GOMOCK_VERSION ?= 0.6.0
DEEPCOPY_VERSION ?= v0.5.8
GOLANGCILINT_VERSION ?= v2.11.4
GOFUMPT_VERSION ?= v0.9.2
GO_VERSION ?= 1.26.2
DIS_VULNCHECK_VERSION ?= v0.0.0-20260408104044-a7a2dc044240
DIS_VULNCHECK_VERSION ?= v0.0.0-20260409114749-05440f84fe69
GO_BUILDFLAGS ?=
GO_BUILDTAGS ?= memory.counters,libc.memexpvar,
GO_LDFLAGS ?=
@ -88,7 +88,7 @@ COMMON_ARGS += --build-arg=GOFUMPT_VERSION="$(GOFUMPT_VERSION)"
COMMON_ARGS += --build-arg=DIS_VULNCHECK_VERSION="$(DIS_VULNCHECK_VERSION)"
COMMON_ARGS += --build-arg=TESTPKGS="$(TESTPKGS)"
COMMON_ARGS += --build-arg=HELMDOCS_VERSION="$(HELMDOCS_VERSION)"
JS_TOOLCHAIN ?= docker.io/node:24.14.1-alpine
JS_TOOLCHAIN ?= docker.io/node:24.15.0-alpine
TOOLCHAIN ?= docker.io/golang:1.26-alpine
# extra variables

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2026-03-10T14:06:17Z by kres 3328d87.
# Generated on 2026-04-29T18:20:53Z by kres 7359c62.
version: "2"
@ -9,7 +9,7 @@ run:
modules-download-mode: readonly
issues-exit-code: 1
tests: true
build-tags: []
build-tags: ["integration","sidero.tools"]
# output configuration options
output:

View File

@ -2,7 +2,6 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
//nolint:revive
package specs
import (

View File

@ -2,7 +2,6 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
//nolint:revive
package specs
import (

View File

@ -2,7 +2,6 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
//nolint:revive
package specs
import (

View File

@ -63,7 +63,8 @@ type Context struct {
}
// BuildContext builds the import context by collecting information from the existing Talos cluster.
// nolint:gocyclo,cyclop
//
//nolint:gocyclo,cyclop
func BuildContext(ctx context.Context, input Input, omniState state.State, imageFactoryClient ImageFactoryClient, talosClient TalosClient) (*Context, error) {
input.logf("discovering Talos cluster state...")
@ -472,7 +473,6 @@ func (c *Context) importClusterToOmni(ctx context.Context) error {
return nil
}
//nolint:gocognit
func (c *Context) validate(ctx context.Context) error {
c.input.logf("validating cluster status...")

View File

@ -19,7 +19,7 @@ const Namespace = resources.DefaultNamespace
const CounterNamespace = resources.MetricsNamespace
func init() {
registry.MustRegisterResource(ConnectionParamsType, &ConnectionParams{}) //nolint:staticcheck
registry.MustRegisterResource(ConnectionParamsType, &ConnectionParams{})
registry.MustRegisterResource(ConfigType, &Config{})
registry.MustRegisterResource(LinkType, &Link{})
registry.MustRegisterResource(PendingMachineType, &PendingMachine{})

View File

@ -44,7 +44,7 @@ will only work if the cluster is locked and tainted as "importing"`,
return fmt.Errorf("failed to abort import operation for cluster %q: %w", clusterID, err)
}
fmt.Fprintf(os.Stderr, "import operation was aborted successfully for cluster %q\n", clusterID) //nolint:errcheck
fmt.Fprintf(os.Stderr, "import operation was aborted successfully for cluster %q\n", clusterID)
return nil
})

View File

@ -200,7 +200,6 @@ func (c *Config) Merge(additionalConfigPath string) ([]Rename, error) {
return renames, nil
}
//nolint:gocognit
func defaultPath(readOnly bool) (string, error) {
path := os.Getenv(OmniConfigEnvVar)
if path != "" {

View File

@ -232,7 +232,7 @@ func addEditingComment(in string) string {
fmt.Fprintln(&sb, "# Edit Failed:")
for _, line := range lines {
fmt.Fprintf(&sb, "# %s\n", line) //nolint:errcheck
fmt.Fprintf(&sb, "# %s\n", line)
}
return sb.String()

View File

@ -43,7 +43,7 @@ To get a list of all available resource definitions, issue 'omnictl get rd'`,
},
}
//nolint:gocognit,gocyclo,cyclop,maintidx
//nolint:gocognit,gocyclo,cyclop
func getResources(args []string) func(ctx context.Context, client *client.Client, _ access.ServerInfo) error {
return func(ctx context.Context, client *client.Client, _ access.ServerInfo) error {
st := client.Omni().State()

View File

@ -234,7 +234,7 @@ func checkNotifications(ctx context.Context, st state.State, info ServerInfo) er
prefix = "[UNKNOWN]"
}
fmt.Fprintf(os.Stderr, "%s %s: %s\n", prefix, spec.Title, spec.Body) //nolint:errcheck
fmt.Fprintf(os.Stderr, "%s %s: %s\n", prefix, spec.Title, spec.Body)
}
return nil

View File

@ -57,8 +57,6 @@ func New() *Reporter {
}
// Report reports an update to the reporter.
//
//nolint:gocyclo
func (r *Reporter) Report(update Update) {
line := strings.TrimSpace(update.Message)
// replace tabs with spaces to get consistent output length

View File

@ -133,7 +133,7 @@ func TestGenerateConfigs(t *testing.T) {
_, err = clientcmd.Load(kubeconfig)
require.NoError(t, err)
updatedClusterStatus, err := safe.ReaderGetByID[*omni.ClusterStatus](ctx, st.Default(), clusterName) // nolint:govet
updatedClusterStatus, err := safe.ReaderGetByID[*omni.ClusterStatus](ctx, st.Default(), clusterName) //nolint:govet
require.NoError(t, err)
_, taintBreakGlass := updatedClusterStatus.Metadata().Labels().Get(omni.LabelClusterTaintedByBreakGlass)

View File

@ -21,7 +21,6 @@ type AuthServer = authServer
// ManagementServerOption configures a test management server.
type ManagementServerOption func(*ManagementServer)
//nolint:revive
func NewManagementServer(st state.State, imageFactoryClient *imagefactory.Client, logger *zap.Logger,
enableBreakGlassConfigs bool, kubernetesRuntime KubernetesRuntime, talosconfigProvider TalosconfigProvider,
opts ...ManagementServerOption,

View File

@ -166,7 +166,6 @@ func (s *managementServer) syncSSA(
return fmt.Errorf("failed to send manifest sync response: %w", err)
}
//nolint:exhaustive
switch r.Action {
case ssa.CreatedAction, ssa.ConfiguredAction:
if !req.DryRun && !resp.Skipped {

View File

@ -1009,7 +1009,6 @@ func (s *managementServer) triggerManifestResync(ctx context.Context, requestCon
return nil
}
//nolint:unparam
func (s *managementServer) authCheckGRPC(ctx context.Context, opts ...auth.CheckOption) (auth.CheckResult, error) {
authCheckResult, err := auth.Check(ctx, opts...)
if errors.Is(err, auth.ErrUnauthenticated) {

View File

@ -269,7 +269,6 @@ func (backend *TalosBackend) BuildError(bool, error) ([]byte, error) {
return nil, nil
}
//nolint:unparam
func setHeaderData(ctx context.Context, md metadata.MD, k string, v ...string) {
if len(v) == 0 {
return

View File

@ -217,7 +217,6 @@ func (s *managementServer) collectLogs(machineID string) *collectors.Collector {
func (s *managementServer) collectClusterResources(ctx context.Context, cluster string) ([]resource.Resource, error) {
st := s.omniState
//nolint:prealloc
var resources []resource.Resource
clusterQuery := []state.ListOption{

View File

@ -87,9 +87,7 @@ func (Client) DevMode() bool {
}
// RestrictAdditionalIdTokenScopes allows specifying which custom scopes shall be asserted into the id_token.
//
//nolint:staticcheck // Id, not ID, because this is an implementation of an interface from a library.
func (Client) RestrictAdditionalIdTokenScopes() func(scopes []string) []string { //nolint:revive
func (Client) RestrictAdditionalIdTokenScopes() func(scopes []string) []string {
return func(scopes []string) []string {
return scopes
}

View File

@ -74,7 +74,7 @@ func WatchLegacy(ctx context.Context, st state.State, md resource.Metadata, out
return nil
}
var listOpts []state.ListOption //nolint:prealloc
var listOpts []state.ListOption
for _, query := range queries {
listOpts = append(listOpts, state.WithLabelQuery(resource.RawLabelQuery(query)))

View File

@ -30,8 +30,6 @@ type ClusterDestroyStatusController = qtransform.QController[*omni.Cluster, *omn
const ClusterDestroyStatusControllerName = "ClusterDestroyStatusController"
// NewClusterDestroyStatusController initializes ClusterDestroyStatusController.
//
//nolint:gocognit,gocyclo,cyclop
func NewClusterDestroyStatusController() *ClusterDestroyStatusController {
return qtransform.NewQController(
qtransform.Settings[*omni.Cluster, *omni.ClusterDestroyStatus]{

View File

@ -869,7 +869,7 @@ func findBackups(ctx context.Context, t *testing.T, st state.State, sf store.Fac
}
func toSlice(t *testing.T, it iter.Seq2[etcdbackup.Info, error]) []etcdbackup.Info {
var result []etcdbackup.Info //nolint:prealloc
var result []etcdbackup.Info
for v, err := range it {
require.NoError(t, err)

View File

@ -42,8 +42,7 @@ func (c *TalosImageClient) ListImagesOnNode(ctx context.Context, cluster, node s
return nil, fmt.Errorf("failed to get talos client for node %q: %w", node, err)
}
//nolint:staticcheck
imageListStream, err := talosCli.ImageList(ctx, common.ContainerdNamespace_NS_CRI)
imageListStream, err := talosCli.ImageList(ctx, common.ContainerdNamespace_NS_CRI) //nolint:staticcheck
if err != nil {
return nil, fmt.Errorf("failed to list images: %w", err)
}
@ -80,8 +79,7 @@ func (c *TalosImageClient) PullImageToNode(ctx context.Context, cluster, node, i
return fmt.Errorf("failed to get talos client for node %q: %w", node, err)
}
//nolint:staticcheck
if err = talosCli.ImagePull(ctx, common.ContainerdNamespace_NS_CRI, image); err != nil {
if err = talosCli.ImagePull(ctx, common.ContainerdNamespace_NS_CRI, image); err != nil { //nolint:staticcheck
return fmt.Errorf("failed to pull image %s: %w", image, err)
}

View File

@ -49,8 +49,6 @@ func NewInfraMachineController(installEventCh <-chan resource.ID) *InfraMachineC
}
// Settings implements the controller.QController interface.
//
//nolint:dupl
func (ctrl *InfraMachineController) Settings() controller.QSettings {
return controller.QSettings{
Inputs: []controller.Input{

View File

@ -30,7 +30,8 @@ type InfraProviderCleanupController = cleanup.Controller[*infra.Provider]
// NewInfraProviderCleanupController returns a new InfraProviderCleanup controller.
// This controller removes infra.ProviderStatus and infra.ProviderHealthStatus resources reported by the provider.
// nolint:gocognit,gocyclo,cyclop
//
//nolint:gocognit,gocyclo,cyclop
func NewInfraProviderCleanupController() *InfraProviderCleanupController {
return cleanup.NewController(
cleanup.Settings[*infra.Provider]{

View File

@ -13,7 +13,6 @@ import (
type ReaderLimiter = readerLimiter
//nolint:revive
func NewReaderLimiter(rdr io.ReadCloser, l *rate.Limiter) *ReaderLimiter {
return &readerLimiter{rdr: rdr, l: l}
}

View File

@ -87,8 +87,6 @@ type ReconciliationContext struct {
}
// BuildReconciliationContext is the COSI reader dependent method to build the reconciliation context.
//
//nolint:gocognit
func BuildReconciliationContext(ctx context.Context, r controller.Reader, machineSet *omni.MachineSet) (*ReconciliationContext, error) {
clusterName, ok := machineSet.Metadata().Labels().Get(omni.LabelCluster)
if !ok {

View File

@ -19,7 +19,6 @@ import (
"github.com/siderolabs/omni/internal/backend/runtime/omni/controllers/omni/internal/machineset"
)
//nolint:maintidx
func TestReconciliationContext(t *testing.T) {
t.Parallel()

View File

@ -97,7 +97,7 @@ func (c Candidate) validateKubernetesCARotation(ctx context.Context, lbConfig *o
if err != nil {
return false, err
}
defer k8sClient.Close() //nolint:errcheck
defer k8sClient.Close()
clientset := k8sClient.Clientset()

View File

@ -162,7 +162,7 @@ func (ctrl *ClusterManifestsStatusController) Reconcile(ctx context.Context, log
return ctrl.reconcileRunning(ctx, r, logger, res, status)
}
// nolint:gocyclo,cyclop,gocognit
//nolint:gocyclo,cyclop,gocognit
func (ctrl *ClusterManifestsStatusController) reconcileRunning(ctx context.Context, r controller.ReaderWriter, logger *zap.Logger,
cluster *omni.Cluster, clusterKubernetesManifestsStatus *omni.ClusterKubernetesManifestsStatus,
) error {
@ -318,7 +318,7 @@ func (ctrl *ClusterManifestsStatusController) reconcileRunning(ctx context.Conte
return errs
}
// nolint:gocyclo,cyclop,gocognit
//nolint:gocyclo,cyclop,gocognit
func (ctrl *ClusterManifestsStatusController) updateStatus(
ctx context.Context,
r controller.ReaderWriter,
@ -543,7 +543,6 @@ func (ctrl *ClusterManifestsStatusController) sync(
zap.String("action", string(change.Action)),
)
// nolint:exhaustive
switch change.Action {
case ssa.CreatedAction, ssa.ConfiguredAction:
requeue = true

View File

@ -240,7 +240,6 @@ func testMachineRequestSetStatusReconcile(ctx context.Context, t *testing.T, st
require.True(t, requests.Len() == 0)
}
//nolint:gocognit
func reconcileLabels(ctx context.Context, st state.State, ready chan<- struct{}) error {
ch := make(chan state.Event, 64)

View File

@ -30,8 +30,6 @@ type MachineSetDestroyStatusController = qtransform.QController[*omni.MachineSet
const MachineSetDestroyStatusControllerName = "MachineSetDestroyStatusController"
// NewMachineSetDestroyStatusController initializes MachineSetDestroyStatusController.
//
//nolint:gocognit
func NewMachineSetDestroyStatusController() *MachineSetDestroyStatusController {
return qtransform.NewQController(
qtransform.Settings[*omni.MachineSet, *omni.MachineSetDestroyStatus]{

View File

@ -64,8 +64,6 @@ func NewMachineStatusController(imageFactoryClient ImageFactoryClient, kernelArg
}
// Settings implements controller.QController interface.
//
//nolint:dupl
func (ctrl *MachineStatusController) Settings() controller.QSettings {
return controller.QSettings{
Inputs: []controller.Input{

View File

@ -68,8 +68,6 @@ type ClusterMachineConfigStatusController struct {
}
// NewClusterMachineConfigStatusController initializes ClusterMachineConfigStatusController.
//
//nolint:gocognit,gocyclo,cyclop,maintidx
func NewClusterMachineConfigStatusController(imageFactoryHost, talosRegistry string) *ClusterMachineConfigStatusController {
ongoingResets := &ongoingResets{
statuses: map[string]*resetStatus{},
@ -543,7 +541,7 @@ func logClose(c io.Closer, logger *zap.Logger, additional string) {
}
}
//nolint:gocyclo,cyclop,gocognit,maintidx
//nolint:gocyclo,cyclop
func (ctrl *ClusterMachineConfigStatusController) reset(
ctx context.Context,
logger *zap.Logger,

View File

@ -65,7 +65,7 @@ func NewStatusController(imageFactoryHost, talosRegistry string, talosClientFact
return ctrl
}
//nolint:gocyclo,cyclop,maintidx
//nolint:gocyclo,cyclop
func (ctrl *StatusController) transform(ctx context.Context, r controller.Reader, logger *zap.Logger, ms *omni.MachineStatus, status *omni.MachineUpgradeStatus) error {
helpers.SyncLabels(ms, status, omni.LabelCluster, omni.LabelMachineSet)

View File

@ -73,7 +73,6 @@ func testReconcile(ctx context.Context, t *testing.T, st state.State, cleanupCh
data := string(buffer.Data())
//nolint:lll
assert.Equal(machineConfig, data)
},
)

View File

@ -136,8 +136,6 @@ type schematicConfigurationHelper struct {
}
// Reconcile implements controller.QController interface.
//
//nolint:gocognit,gocyclo,cyclop
func (helper *schematicConfigurationHelper) reconcile(
ctx context.Context,
r controller.ReaderWriter,
@ -353,7 +351,6 @@ func newMachineCustomization(ctx context.Context, r controller.Reader, ms *omni.
return mc, nil
}
//nolint:recvcheck
type machineCustomization struct {
machineStatus *omni.MachineStatus
machineExtensions *omni.MachineExtensions

View File

@ -304,7 +304,6 @@ func (s *Rotator) createInitialStage(currentPhase specs.SecretRotationSpec_Phase
}
}
//nolint:gocognit
func (s *Rotator) addRotationStage(previousPhase, currentPhase specs.SecretRotationSpec_Phase) func(
ctx context.Context,
logger *zap.Logger,

View File

@ -44,7 +44,7 @@ import (
"github.com/siderolabs/omni/internal/pkg/constants"
)
//nolint:maintidx,gocognit
//nolint:maintidx
func Test_TalosCARotation(t *testing.T) {
t.Parallel()

View File

@ -42,7 +42,7 @@ type Controller struct {
// NewSecretsController instantiates the secrets' controller.
//
//nolint:gocognit,gocyclo,cyclop,maintidx
//nolint:gocognit,gocyclo,cyclop
func NewSecretsController(etcdBackupStoreFactory store.Factory) *Controller {
ctrl := &Controller{}

View File

@ -163,7 +163,7 @@ func NewStatusController() *TalosUpgradeStatusController {
return ctrl
}
//nolint:gocyclo,cyclop,gocognit
//nolint:gocyclo,cyclop
func (ctrl *TalosUpgradeStatusController) reconcileStatus(ctx context.Context, r controller.ReaderWriter,
clusterMachines safe.List[*omni.ClusterMachine], outdatedMachines *outdatedMachines,
cluster *omni.Cluster, upgradeStatus *omni.TalosUpgradeStatus,

View File

@ -170,7 +170,6 @@ func (suite *MigrationSuite) TestMoveInfraProviderAnnotationsToLabels() {
suite.False(machine1AnnotationOk)
}
//nolint:dupl
func (suite *MigrationSuite) TestDropSchematicConfigFinalizerFromClusterMachines() {
ctx, cancel := context.WithTimeout(suite.T().Context(), 10*time.Second)
defer cancel()
@ -207,7 +206,6 @@ func (suite *MigrationSuite) TestDropSchematicConfigFinalizerFromClusterMachines
suite.True(cm3VersionBefore.Equal(cm3Migrated.Metadata().Version()), "expected cm3 to be left untouched")
}
//nolint:dupl
func (suite *MigrationSuite) TestDropTalosUpgradeStatusFinalizersFromSchematicConfigs() {
ctx, cancel := context.WithTimeout(suite.T().Context(), 10*time.Second)
defer cancel()

View File

@ -375,8 +375,6 @@ func verbToRole(verb state.Verb) role.Role {
}
// filterAccess provides a filter to exclude some resources and operations from external sources.
//
//nolint:cyclop,gocyclo
func filterAccess(ctx context.Context, access state.Access) error {
if actor.ContextIsInternalActor(ctx) {
return nil

View File

@ -65,7 +65,7 @@ type KubernetesUsage struct {
state state.State
kubeRuntime KubernetesClientGetter
factory informers.SharedInformerFactory
ctx context.Context // nolint:containedctx
ctx context.Context //nolint:containedctx
stopCh chan struct{}
logger *zap.Logger
ctxCancel context.CancelFunc

View File

@ -503,7 +503,6 @@ func TestProxyRuntime_WatchSearchForTransition(t *testing.T) {
testWatch(t, msgs, expected, true, runtime.WithSearchFor([]string{"cluster1"}))
}
//nolint:unparam
func watchResponse(id int, cluster, sortByField string, count int) runtime.WatchResponse {
return cosi.NewResponse(
fmt.Sprintf("id%d", id),

View File

@ -210,7 +210,7 @@ func (s *Server) Run(ctx context.Context) error {
return err
}
serverOptions, err := s.buildServerOptions(ctx) //nolint:contextcheck
serverOptions, err := s.buildServerOptions(ctx)
if err != nil {
return err
}

View File

@ -57,7 +57,6 @@ import (
resapi "github.com/siderolabs/omni/client/api/omni/resources"
"github.com/siderolabs/omni/client/api/omni/specs"
"github.com/siderolabs/omni/client/pkg/access"
pkgaccess "github.com/siderolabs/omni/client/pkg/access"
"github.com/siderolabs/omni/client/pkg/client"
managementcli "github.com/siderolabs/omni/client/pkg/client/management"
"github.com/siderolabs/omni/client/pkg/constants"
@ -81,12 +80,10 @@ import (
// It uses the root client (automation SA) to create new service accounts with the specified role,
// then returns a client authenticated as that SA. Clients are cached by role.
type testClientFactory struct {
endpoint string
serviceAccountKey string
rootCli *client.Client
mu sync.Mutex
clients map[role.Role]*client.Client
rootCli *client.Client
clients map[role.Role]*client.Client
endpoint string
mu sync.Mutex
}
func newTestClientFactory(endpoint string, rootCli *client.Client) *testClientFactory {
@ -336,7 +333,7 @@ func AssertServiceAccountAPIFlow(testCtx context.Context, cli *client.Client) Te
assert.NoError(t, err)
rtestutils.AssertResources(testCtx, t, cli.Omni().State(), []string{
name + pkgaccess.ServiceAccountNameSuffix,
name + access.ServiceAccountNameSuffix,
}, func(res *authres.ServiceAccountStatus, assert *assert.Assertions) {
assert.Equal(string(role.Admin), res.TypedSpec().Value.Role)
assert.Equal(2, len(res.TypedSpec().Value.PublicKeys))
@ -392,7 +389,7 @@ func newServiceAccountClient(cli *client.Client, name string) (*client.Client, s
// generate a new PGP key with long lifetime
comment := fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)
serviceAccountEmail := name + pkgaccess.ServiceAccountNameSuffix
serviceAccountEmail := name + access.ServiceAccountNameSuffix
key, err := pgp.GenerateKey(name, comment, serviceAccountEmail, auth.ServiceAccountMaxAllowedLifetime)
if err != nil {
@ -849,7 +846,9 @@ func AssertResourceAuthz(rootCtx context.Context, rootCli *client.Client, client
importedClusterSecret := omni.NewImportedClusterSecrets(cluster.Metadata().ID())
testCases := []resourceAuthzTestCase{
testCases := make([]resourceAuthzTestCase, 0, 139)
testCases = append(testCases, []resourceAuthzTestCase{
{
resource: accessPolicy,
allowedVerbSet: allVerbsSet,
@ -963,7 +962,7 @@ func AssertResourceAuthz(rootCtx context.Context, rootCli *client.Client, client
resource: kubernetesManifest,
allowedVerbSet: allVerbsSet,
},
}
}...)
// read-only resources
@ -1202,7 +1201,7 @@ func AssertResourceAuthz(rootCtx context.Context, rootCli *client.Client, client
allowedVerbSet: readOnlyVerbSet,
},
{
resource: siderolink.NewConnectionParams(uuid.New().String()),
resource: siderolink.NewConnectionParams(uuid.New().String()), //nolint:staticcheck // deprecated resource is still registered and needs authz coverage.
allowedVerbSet: readOnlyVerbSet,
},
{
@ -1578,6 +1577,7 @@ var (
const grpcMetadataPrefix = "Grpc-Metadata-"
//nolint:gocognit,maintidx
func AssertFrontendResourceAPI(ctx context.Context, rootCli *client.Client, serviceAccountKey, httpEndpoint, clusterName string) TestFunc {
return func(t *testing.T) {
sa, err := serviceaccount.Decode(serviceAccountKey)
@ -1601,21 +1601,20 @@ func AssertFrontendResourceAPI(ctx context.Context, rootCli *client.Client, serv
Method: request.URL.Path[len("/api"):],
}
payloadJSON, err := json.Marshal(payload)
if err != nil {
return fmt.Errorf("failed to encode payload: %w", err)
payloadJSON, marshalErr := json.Marshal(payload)
if marshalErr != nil {
return fmt.Errorf("failed to encode payload: %w", marshalErr)
}
request.Header.Set(grpcMetadataPrefix+message.PayloadHeaderKey, string(payloadJSON))
signature, err := key.Sign(payloadJSON)
if err != nil {
return fmt.Errorf("failed to sign: %w", err)
signature, signErr := key.Sign(payloadJSON)
if signErr != nil {
return fmt.Errorf("failed to sign: %w", signErr)
}
signatureBase64 := base64.StdEncoding.EncodeToString(signature)
//nolint:canonicalheader
request.Header.Set(grpcMetadataPrefix+message.SignatureHeaderKey,
fmt.Sprintf("%s %s %s %s", message.SignatureVersionV1, email, key.Fingerprint(), signatureBase64))
@ -1642,8 +1641,8 @@ func AssertFrontendResourceAPI(ctx context.Context, rootCli *client.Client, serv
require.NoError(t, err)
for _, tt := range []struct {
requestBody []byte
method string
requestBody []byte
expectedStatus int
backend common.Runtime
}{
@ -1800,7 +1799,7 @@ func AssertFrontendResourceAPI(ctx context.Context, rootCli *client.Client, serv
fullURL, err := url.JoinPath(httpEndpoint, "api", tt.method)
require.NoError(t, err)
request, err := http.NewRequestWithContext(ctx, "POST",
request, err := http.NewRequestWithContext(ctx, http.MethodPost,
fullURL, bytes.NewBuffer(tt.requestBody),
)
require.NoError(t, err)
@ -1812,6 +1811,7 @@ func AssertFrontendResourceAPI(ctx context.Context, rootCli *client.Client, serv
request.Header.Set(grpcMetadataPrefix+"Cluster", clusterName)
case common.Runtime_Kubernetes:
request.Header.Set(grpcMetadataPrefix+"Cluster", clusterName)
case common.Runtime_Omni:
}
if sign {

View File

@ -43,7 +43,6 @@ func AssertEtcdManualBackupIsCreated(testCtx context.Context, st state.State, cl
// We can't use number of backups here because two backups can happen at the same second, where
// the newer will overwrite the older.
bs, err := safe.ReaderGetByID[*omni.EtcdBackupStatus](testCtx, st, clusterName)
if err == nil {
start = bs.TypedSpec().Value.LastBackupTime.AsTime()
} else if !state.IsNotFoundError(err) {

View File

@ -89,6 +89,7 @@ func AssertBlockClusterAndTalosAPIAndKubernetesShouldBeReady(
clusterName, expectedTalosVersion, expectedKubernetesVersion string,
) []subTest { //nolint:nolintlint,revive
omniState := options.omniClient.Omni().State()
return AssertBlockClusterShouldBeReady(ctx, options, clusterName, expectedTalosVersion).
Append(AssertBlockProxyAPIAccessShouldWork(ctx, options, clusterName)...).
Append(

View File

@ -74,6 +74,7 @@ func AssertDownloadUsingCLI(testCtx context.Context, client *client.Client, omni
output := filepath.Join(t.TempDir(), image.Metadata().ID())
stdout, stderr, err := runCmd(
testCtx,
omnictlPath,
httpEndpoint,
key, "download",
@ -95,12 +96,13 @@ func AssertDownloadUsingCLI(testCtx context.Context, client *client.Client, omni
}
}
func runCmd(path, endpoint, key string, args ...string) (bytes.Buffer, bytes.Buffer, error) {
func runCmd(ctx context.Context, path, endpoint, key string, args ...string) (bytes.Buffer, bytes.Buffer, error) {
var stdout, stderr bytes.Buffer
args = append([]string{"--insecure-skip-tls-verify"}, args...)
cmd := exec.Command(
cmd := exec.CommandContext(
ctx,
path,
args...,
)
@ -113,7 +115,7 @@ func runCmd(path, endpoint, key string, args ...string) (bytes.Buffer, bytes.Buf
if err != nil {
return stdout, stderr, fmt.Errorf("failed to create temp home dir: %w", err)
}
defer os.RemoveAll(tempHomeDir)
defer os.RemoveAll(tempHomeDir) //nolint:errcheck
cmd.Env = []string{
fmt.Sprintf("HOME=%s", tempHomeDir),
@ -121,7 +123,8 @@ func runCmd(path, endpoint, key string, args ...string) (bytes.Buffer, bytes.Buf
fmt.Sprintf("OMNI_SERVICE_ACCOUNT_KEY=%s", key),
}
if err := cmd.Start(); err != nil {
err = cmd.Start()
if err != nil {
return stdout, stderr, err
}
@ -160,21 +163,21 @@ func AssertUserCLI(testCtx context.Context, client *client.Client, omnictlPath,
key := createServiceAccount(testCtx, t, client, name, role.Admin)
stdout, stderr, err := runCmd(omnictlPath, httpEndpoint, key, "user", "create", "a@a.com", "--role", "Admin")
stdout, stderr, err := runCmd(testCtx, omnictlPath, httpEndpoint, key, "user", "create", "a@a.com", "--role", "Admin")
require.NoErrorf(t, err, "failed to create user. stdout: %q | stderr: %q", stdout.String(), stderr.String())
stdout, stderr, err = runCmd(omnictlPath, httpEndpoint, key, "user", "list")
stdout, stderr, err = runCmd(testCtx, omnictlPath, httpEndpoint, key, "user", "list")
require.NoErrorf(t, err, "failed to list users. stdout: %q | stderr: %q", stdout.String(), stderr.String())
require.Contains(t, stdout.String(), "a@a.com")
stdout, stderr, err = runCmd(omnictlPath, httpEndpoint, key, "user", "set-role", "--role", "Reader", "a@a.com")
stdout, stderr, err = runCmd(testCtx, omnictlPath, httpEndpoint, key, "user", "set-role", "--role", "Reader", "a@a.com")
require.NoErrorf(t, err, "failed to set role. stdout: %q | stderr: %q", stdout.String(), stderr.String())
stdout, stderr, err = runCmd(omnictlPath, httpEndpoint, key, "user", "delete", "a@a.com")
stdout, stderr, err = runCmd(testCtx, omnictlPath, httpEndpoint, key, "user", "delete", "a@a.com")
require.NoErrorf(t, err, "failed to delete user. stdout: %q | stderr: %q", stdout.String(), stderr.String())
stdout, stderr, err = runCmd(omnictlPath, httpEndpoint, key, "user", "list")
stdout, stderr, err = runCmd(testCtx, omnictlPath, httpEndpoint, key, "user", "list")
require.NoErrorf(t, err, "failed to list users. stdout: %q | stderr: %q", stdout.String(), stderr.String())
require.NotContains(t, stdout.String(), "a@a.com")

View File

@ -1041,10 +1041,10 @@ func updateMachineClassMachineSets(ctx context.Context, t *testing.T, st state.S
cps.Metadata().Labels().Set(omni.LabelCluster, options.Name)
cps.Metadata().Labels().Set(omni.LabelClusterMachine, machineID)
return cps.TypedSpec().Value.SetUncompressedData([]byte(fmt.Sprintf(`machine:
return cps.TypedSpec().Value.SetUncompressedData(fmt.Appendf(nil, `machine:
kubelet:
extraArgs:
node-labels: %s=%s`, nodeLabel, machineID)))
node-labels: %s=%s`, nodeLabel, machineID))
})
}
}

View File

@ -172,16 +172,6 @@ func talosNodeIPs(ctx context.Context, talosState state.State) ([]string, error)
return nodeIPs, nil
}
//nolint:govet
type testGroup struct {
Name string
Description string
Parallel bool
MachineClaim int
Subtests []subTest
Finalizer func(t *testing.T)
}
//nolint:govet
type subTest struct {
Name string
@ -286,9 +276,8 @@ type MachineProviderConfig struct {
// TestOptions constains all common data that might be required to run the tests.
type TestOptions struct {
Options
omniClient *client.Client
serviceAccountKey string
omniClient *client.Client
machineSemaphore *semaphore.Weighted
Options
serviceAccountKey string
}

View File

@ -90,6 +90,7 @@ func checkExtensions(ctx context.Context, talosClient *talosclient.Client, exten
if i < 0 {
return fmt.Errorf("extensions/order mismatch: expected %q to be a subsequence of %q", extensions, collectedExtensions)
}
pos += i + 1
}

View File

@ -35,8 +35,10 @@ import (
func testClusterImport(t *testing.T, options *TestOptions) {
t.Parallel()
var clusterID string
var clusterNodes []string
var (
clusterID string
clusterNodes []string
)
if options.ImportedClusterStatePath != "" {
f, err := os.ReadFile(options.ImportedClusterStatePath)
@ -80,6 +82,7 @@ func testImport(t *testing.T, options *TestOptions, clusterID string, clusterNod
ctx, cancel := context.WithTimeout(t.Context(), 5*time.Minute)
defer cancel()
logger := zaptest.NewLogger(t)
omniState := options.omniClient.Omni().State()
@ -160,6 +163,7 @@ func testImportAbort(t *testing.T, options *TestOptions, clusterID string, clust
ctx, cancel := context.WithTimeout(t.Context(), 5*time.Minute)
defer cancel()
logger := zaptest.NewLogger(t)
omniState := options.omniClient.Omni().State()

View File

@ -32,8 +32,6 @@ import (
)
// machineProvisionHook creates a machine request set and waits until all requests are fulfilled.
//
//nolint:gocognit
func machineProvisionHook(t *testing.T, client *client.Client, cfg MachineProvisionConfig, machineRequestSetName,
talosVersion string,
) {

View File

@ -67,7 +67,7 @@ var (
anotherKubernetesVersion string
expectedMachines int
// provisioning flags
// provisioning flags.
provisionMachinesCount int
infraProvider string
providerData string
@ -445,10 +445,10 @@ func runOmni(t *testing.T) (string, error) {
var logger *zap.Logger
switch {
case omniLogOutput == "inline":
switch omniLogOutput {
case "inline":
logger = zaptest.NewLogger(t)
case omniLogOutput == "":
case "":
logger = zap.NewNop()
t.Log("discard Omni log")

View File

@ -35,6 +35,7 @@ import (
// 5. Assert that the worker node eventually gets removed from Kubernetes due to node audit.
func AssertKubernetesNodeAudit(testCtx context.Context, clusterName string, options *TestOptions) TestFunc {
st := options.omniClient.Omni().State()
return func(t *testing.T) {
ctx := kubernetes.WrapContext(testCtx, t)

View File

@ -228,8 +228,10 @@ func AssertKubernetesUpgradeFlow(testCtx context.Context, st state.State, manage
for _, status := range r.TypedSpec().Value.Nodes {
if status.Ready && status.KubeletVersion == kubernetesVersion {
upgradedKubeletCount++
continue
}
pendingKubeletCount++
}
@ -281,7 +283,7 @@ func KubernetesBootstrapManifestSync(testCtx context.Context, managementClient *
t.Logf("running bootstrap manifest sync for %q", clusterName)
syncHandler := func(result *managementpb.KubernetesSyncManifestResponse) error {
syncHandler := func(result *managementpb.KubernetesSyncManifestResponse) error { //nolint:unparam
switch result.ResponseType { //nolint:exhaustive
case managementpb.KubernetesSyncManifestResponse_MANIFEST:
if result.Skipped {
@ -581,6 +583,7 @@ func AssertKubernetesDeploymentHasRunningPods(testCtx context.Context, managemen
}
}
//nolint:gocognit,maintidx
func AssertKubernetesManifestsSync(testCtx context.Context, rootClient *client.Client, clusterName string) TestFunc {
type params struct {
Name string
@ -750,9 +753,9 @@ spec:
})
require.NoError(t, retry.Constant(time.Minute).RetryWithContext(ctx, func(ctx context.Context) error {
deployment, err := kubeClient.AppsV1().Deployments(corev1.NamespaceDefault).Get(ctx, "nginx", metav1.GetOptions{})
if err != nil {
return retry.ExpectedErrorf("failed to get deployment: %q", err)
deployment, getErr := kubeClient.AppsV1().Deployments(corev1.NamespaceDefault).Get(ctx, "nginx", metav1.GetOptions{})
if getErr != nil {
return retry.ExpectedErrorf("failed to get deployment: %q", getErr)
}
if len(deployment.Spec.Template.Spec.Containers) != 1 {

View File

@ -30,10 +30,10 @@ endpoint: '[fdae:41e4:649b:9303::1]:8091'`
// AssertMaintenanceTestConfigIsPresent asserts that the test configuration is present on a machine in maintenance mode.
func AssertMaintenanceTestConfigIsPresent(ctx context.Context, omniState state.State, cluster resource.ID, machineIndex int) TestFunc {
return func(t *testing.T) {
ctx, cancel := context.WithTimeout(ctx, time.Minute*5)
timeoutCtx, cancel := context.WithTimeout(ctx, time.Minute*5)
defer cancel()
machineStatusList, err := safe.StateListAll[*omni.MachineStatus](ctx, omniState, state.WithLabelQuery(resource.LabelEqual(omni.LabelCluster, cluster)))
machineStatusList, err := safe.StateListAll[*omni.MachineStatus](timeoutCtx, omniState, state.WithLabelQuery(resource.LabelEqual(omni.LabelCluster, cluster)))
require.NoError(t, err)
ids := make([]resource.ID, 0, machineStatusList.Len())
@ -46,7 +46,7 @@ func AssertMaintenanceTestConfigIsPresent(ctx context.Context, omniState state.S
machineID := ids[machineIndex]
rtestutils.AssertResource[*omni.RedactedClusterMachineConfig](ctx, t, omniState, machineID, func(r *omni.RedactedClusterMachineConfig, assertion *assert.Assertions) {
rtestutils.AssertResource[*omni.RedactedClusterMachineConfig](timeoutCtx, t, omniState, machineID, func(r *omni.RedactedClusterMachineConfig, assertion *assert.Assertions) {
buffer, bufferErr := r.TypedSpec().Value.GetUncompressedData()
assertion.NoError(bufferErr)

View File

@ -67,7 +67,7 @@ func AssertWorkerNodesRollingConfigUpdate(testCtx context.Context, cli *client.C
pair.MakePair(omni.LabelCluster, clusterName),
pair.MakePair(omni.LabelMachineSet, workersResourceID))
err = machineSetPatch.TypedSpec().Value.SetUncompressedData([]byte(fmt.Sprintf(`{"machine":{"env":{"%d":"test-val"}}}`, epochSeconds)))
err = machineSetPatch.TypedSpec().Value.SetUncompressedData(fmt.Appendf(nil, `{"machine":{"env":{"%d":"test-val"}}}`, epochSeconds))
require.NoError(t, err)
require.NoError(t, st.Create(ctx, machineSetPatch))
@ -150,13 +150,9 @@ func AssertWorkerNodesRollingScaleDown(testCtx context.Context, cli *client.Clie
t.Cleanup(wg.Wait)
machineSetNodeList.ForEach(func(node *omni.MachineSetNode) {
wg.Add(1)
go func() {
defer wg.Done()
wg.Go(func() {
rtestutils.Destroy[*omni.MachineSetNode](ctx, t, st, []string{node.Metadata().ID()})
}()
})
})
// expect the machine set to go into the ScalingDown phase

View File

@ -50,94 +50,47 @@ func testRotateCA(t *testing.T, options *TestOptions) {
omniState := options.omniClient.Omni().State()
t.Run("TalosCAShouldBeRotated", func(t *testing.T) {
rotateTalosCA := omni.NewRotateTalosCA(clusterName)
require.NoError(t, omniState.Create(ctx, rotateTalosCA))
// assert rotation started
_, err := safe.StateWatchFor[*omni.ClusterSecretsRotationStatus](ctx, omniState, omni.NewClusterSecretsRotationStatus(clusterName).Metadata(), func(cond *state.WatchForCondition) error {
cond.Condition = func(res resource.Resource) (bool, error) {
resTyped, ok := res.(*omni.ClusterSecretsRotationStatus)
if !ok {
return false, fmt.Errorf("unexpected resource type: %T", res)
}
if resTyped.TypedSpec().Value.Phase != specs.SecretRotationSpec_OK {
return true, nil
}
return false, nil
}
return nil
})
require.NoError(t, err)
// assert rotation completed
_, err = safe.StateWatchFor[*omni.ClusterSecretsRotationStatus](ctx, omniState, omni.NewClusterSecretsRotationStatus(clusterName).Metadata(), func(cond *state.WatchForCondition) error {
cond.Condition = func(res resource.Resource) (bool, error) {
resTyped, ok := res.(*omni.ClusterSecretsRotationStatus)
if !ok {
return false, fmt.Errorf("unexpected resource type: %T", res)
}
if resTyped.TypedSpec().Value.Phase == specs.SecretRotationSpec_OK {
return true, nil
}
return false, nil
}
return nil
})
require.NoError(t, err)
assertCARotated(ctx, t, omniState, clusterName, omni.NewRotateTalosCA(clusterName))
})
runTests(t, AssertBlockClusterAndTalosAPIAndKubernetesShouldBeReady(t.Context(), options, clusterName, options.MachineOptions.TalosVersion, options.MachineOptions.KubernetesVersion))
t.Run("KubernetesCAShouldBeRotated", func(t *testing.T) {
rotateKubernetesCA := omni.NewRotateKubernetesCA(clusterName)
require.NoError(t, omniState.Create(ctx, rotateKubernetesCA))
// assert rotation started
_, err := safe.StateWatchFor[*omni.ClusterSecretsRotationStatus](ctx, omniState, omni.NewClusterSecretsRotationStatus(clusterName).Metadata(), func(cond *state.WatchForCondition) error {
cond.Condition = func(res resource.Resource) (bool, error) {
resTyped, ok := res.(*omni.ClusterSecretsRotationStatus)
if !ok {
return false, fmt.Errorf("unexpected resource type: %T", res)
}
if resTyped.TypedSpec().Value.Phase != specs.SecretRotationSpec_OK {
return true, nil
}
return false, nil
}
return nil
})
require.NoError(t, err)
// assert rotation completed
_, err = safe.StateWatchFor[*omni.ClusterSecretsRotationStatus](ctx, omniState, omni.NewClusterSecretsRotationStatus(clusterName).Metadata(), func(cond *state.WatchForCondition) error {
cond.Condition = func(res resource.Resource) (bool, error) {
resTyped, ok := res.(*omni.ClusterSecretsRotationStatus)
if !ok {
return false, fmt.Errorf("unexpected resource type: %T", res)
}
if resTyped.TypedSpec().Value.Phase == specs.SecretRotationSpec_OK {
return true, nil
}
return false, nil
}
return nil
})
require.NoError(t, err)
assertCARotated(ctx, t, omniState, clusterName, omni.NewRotateKubernetesCA(clusterName))
})
runTests(t, AssertBlockClusterAndTalosAPIAndKubernetesShouldBeReady(t.Context(), options, clusterName, options.MachineOptions.TalosVersion, options.MachineOptions.KubernetesVersion))
t.Run("ClusterShouldBeDestroyed", AssertDestroyCluster(t.Context(), options.omniClient.Omni().State(), clusterName, false, false))
}
func assertCARotated(ctx context.Context, t *testing.T, omniState state.State, clusterName string, rotation resource.Resource) {
require.NoError(t, omniState.Create(ctx, rotation))
// assert rotation started
require.NoError(t, waitForRotationPhase(ctx, omniState, clusterName, func(phase specs.SecretRotationSpec_Phase) bool {
return phase != specs.SecretRotationSpec_OK
}))
// assert rotation completed
require.NoError(t, waitForRotationPhase(ctx, omniState, clusterName, func(phase specs.SecretRotationSpec_Phase) bool {
return phase == specs.SecretRotationSpec_OK
}))
}
func waitForRotationPhase(ctx context.Context, omniState state.State, clusterName string, matches func(specs.SecretRotationSpec_Phase) bool) error {
_, err := safe.StateWatchFor[*omni.ClusterSecretsRotationStatus](ctx, omniState, omni.NewClusterSecretsRotationStatus(clusterName).Metadata(), func(cond *state.WatchForCondition) error {
cond.Condition = func(res resource.Resource) (bool, error) {
resTyped, ok := res.(*omni.ClusterSecretsRotationStatus)
if !ok {
return false, fmt.Errorf("unexpected resource type: %T", res)
}
return matches(resTyped.TypedSpec().Value.Phase), nil
}
return nil
})
return err
}

View File

@ -1228,6 +1228,7 @@ Test authorization on accessing Omni API, some tests run without a cluster, some
)
clientFactory := newTestClientFactory(omniEndpoint, options.omniClient)
t.Cleanup(func() {
clientFactory.close() //nolint:errcheck
})

View File

@ -82,7 +82,7 @@ func clearConnectionRefused(ctx context.Context, t *testing.T, c *talosclient.Cl
return retry.ExpectedError(err)
}
// nolint:exhaustive
//nolint:exhaustive
switch status.Code(err) {
case codes.DeadlineExceeded,
codes.Unavailable,
@ -299,6 +299,7 @@ func AssertEtcdMembershipMatchesOmniResources(testCtx context.Context, options *
memberIDs := xslices.Map(m.Members, func(m *machine.EtcdMember) string { return etcd.FormatMemberID(m.Id) })
t.Logf("the count of members doesn't match the count of machines, expected %d, got: %d, members list: %s", len(clusterMachines), len(m.Members), memberIDs)
return
}
@ -307,6 +308,7 @@ func AssertEtcdMembershipMatchesOmniResources(testCtx context.Context, options *
if !assert.True(collect, ok) {
t.Logf("found etcd member which doesn't have associated machine status")
return
}
}

View File

@ -80,8 +80,6 @@ type TestOptions struct {
}
// Test tests the exposed services functionality in Omni.
//
//nolint:prealloc
func Test(ctx context.Context, t *testing.T, omniClient *client.Client, serviceAccountKey string, opts TestOptions, clusterIDs ...string) {
ctx, cancel := context.WithTimeout(ctx, 20*time.Minute)
t.Cleanup(cancel)
@ -105,8 +103,8 @@ func Test(ctx context.Context, t *testing.T, omniClient *client.Client, serviceA
}
var (
allServices []serviceContext //nolint:prealloc
allExposedServices []*omni.ExposedService //nolint:prealloc
allServices []serviceContext
allExposedServices []*omni.ExposedService
deploymentsToScaleDown []deploymentContext
)

View File

@ -6,10 +6,10 @@
// Package internal contains the command that generates the TS code.
package internal
//go:generate go run -tags=tools github.com/siderolabs/omni/internal/internal/tools/tsgen -out ../../frontend/src/api/resources.ts ../../,../../client/
//go:generate go run -tags=sidero.tools github.com/siderolabs/omni/internal/internal/tools/tsgen -out ../../frontend/src/api/resources.ts ../../,../../client/
// Generate JSON schema.
//go:generate go tool go-jsonschema --only-models --struct-name-from-title --tags=json,yaml --package=config --extra-imports -o=../pkg/config/types.generated.go ../pkg/config/schema.json
// Generate nil-safe accessors for the config fields.
//go:generate go run -tags=tools github.com/siderolabs/omni/internal/internal/tools/accessorgen --source=../pkg/config/types.generated.go --output=../pkg/config/accessors.generated.go
//go:generate go run -tags=sidero.tools github.com/siderolabs/omni/internal/internal/tools/accessorgen --source=../pkg/config/types.generated.go --output=../pkg/config/accessors.generated.go

View File

@ -3,7 +3,7 @@
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//go:build tools
//go:build sidero.tools
package gen
@ -21,6 +21,7 @@ func Run(source string, output string) error {
if source == "" {
return fmt.Errorf("no source provided, use --source flag")
}
if output == "" {
return fmt.Errorf("no output provided, use --output flag")
}
@ -32,6 +33,7 @@ func Run(source string, output string) error {
return nil
}
//nolint:gocognit
func processFile(filename string, outputFilename string) error {
fset := token.NewFileSet()
@ -45,17 +47,19 @@ func processFile(filename string, outputFilename string) error {
packageName := node.Name.Name
buf.WriteString("// Code generated by go-accessor-gen. DO NOT EDIT.\n\n")
buf.WriteString(fmt.Sprintf("package %s\n\n", packageName))
fmt.Fprintf(&buf, "package %s\n\n", packageName)
if len(node.Imports) > 0 {
buf.WriteString("import (\n")
for _, imp := range node.Imports {
if imp.Name != nil {
buf.WriteString(fmt.Sprintf("\t%s %s\n", imp.Name.Name, imp.Path.Value))
fmt.Fprintf(&buf, "\t%s %s\n", imp.Name.Name, imp.Path.Value)
} else {
buf.WriteString(fmt.Sprintf("\t%s\n", imp.Path.Value))
fmt.Fprintf(&buf, "\t%s\n", imp.Path.Value)
}
}
buf.WriteString(")\n\n")
}
@ -99,18 +103,18 @@ func processFile(filename string, outputFilename string) error {
// Getter
// func (s *Struct) FieldName() Type
buf.WriteString(fmt.Sprintf("func (s *%s) Get%s() %s {\n", structName, fieldName, valueType))
buf.WriteString(fmt.Sprintf("\tif s == nil || s.%s == nil {\n", fieldName))
buf.WriteString(fmt.Sprintf("\t\treturn *new(%s)\n", valueType)) // Zero value
fmt.Fprintf(&buf, "func (s *%s) Get%s() %s {\n", structName, fieldName, valueType)
fmt.Fprintf(&buf, "\tif s == nil || s.%s == nil {\n", fieldName)
fmt.Fprintf(&buf, "\t\treturn *new(%s)\n", valueType) // Zero value
buf.WriteString("\t}\n")
buf.WriteString(fmt.Sprintf("\treturn *s.%s\n", fieldName))
fmt.Fprintf(&buf, "\treturn *s.%s\n", fieldName)
buf.WriteString("}\n\n")
// Setter
// func (s *Struct) SetFieldName(v Type)
buf.WriteString(fmt.Sprintf("func (s *%s) Set%s(v %s) {\n", structName, fieldName, valueType))
fmt.Fprintf(&buf, "func (s *%s) Set%s(v %s) {\n", structName, fieldName, valueType)
buf.WriteString(fmt.Sprintf("\ts.%s = &v\n", fieldName))
fmt.Fprintf(&buf, "\ts.%s = &v\n", fieldName)
buf.WriteString("}\n\n")
}
}

View File

@ -3,7 +3,7 @@
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//go:build tools
//go:build sidero.tools
package gen_test
@ -36,6 +36,7 @@ type Complex struct {
}
`
dir := t.TempDir()
inputFile := filepath.Join(dir, "input.go")
if err := os.WriteFile(inputFile, []byte(inputContent), 0o644); err != nil {
t.Fatalf("failed to write input file: %v", err)
@ -52,6 +53,7 @@ type Complex struct {
if err != nil {
t.Fatalf("failed to read output file: %v", err)
}
output := string(content)
expectedChecks := []string{

View File

@ -3,7 +3,7 @@
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//go:build tools
//go:build sidero.tools
package testdata

View File

@ -3,7 +3,7 @@
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//go:build tools
//go:build sidero.tools
package main

View File

@ -3,7 +3,7 @@
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//go:build tools
//go:build sidero.tools
package main
@ -62,12 +62,14 @@ func FindTSGenConstants(f file) ([]ConstantWithDirective, error) {
constants := FindConstants(f)
var result []ConstantWithDirective
for _, constant := range constants {
for _, comment := range constant.Doc {
if directive, ok := extractTsGenDirective(comment); ok {
if directive == "" {
return nil, fmt.Errorf("empty directive in %s", constant.Name)
}
result = append(result, ConstantWithDirective{
Constant: constant,
Directive: directive,
@ -97,26 +99,27 @@ func extractTsGenDirective(comment string) (string, bool) {
// Constant represents a constant with a doc, name and a value.
type Constant struct {
Doc []string
Name string
Value string
Doc []string
}
// ConstantWithDirective represents a Constant with a directive.
type ConstantWithDirective struct {
Constant
Directive string
Constant
}
// FindConstants returns a list of constants in the given file.
func FindConstants(file file) []Constant {
var result []Constant
ast.Inspect(file.f, func(n ast.Node) bool {
if decl, ok := n.(*ast.GenDecl); ok {
if decl.Tok == token.CONST {
if decl.Lparen == token.NoPos {
name := decl.Specs[0].(*ast.ValueSpec).Names[0]
obj := file.pkg.TypesInfo.Defs[name].(*types.Const)
name := decl.Specs[0].(*ast.ValueSpec).Names[0] //nolint:forcetypeassert,errcheck
obj := file.pkg.TypesInfo.Defs[name].(*types.Const) //nolint:forcetypeassert,errcheck
value := obj.Val().String()
result = append(result, Constant{
@ -137,7 +140,7 @@ func FindConstants(file file) []Constant {
}
name := valueSpec.Names[0]
obj := file.pkg.TypesInfo.Defs[name].(*types.Const)
obj := file.pkg.TypesInfo.Defs[name].(*types.Const) //nolint:forcetypeassert,errcheck
value := obj.Val().String()
result = append(result, Constant{
@ -151,6 +154,7 @@ func FindConstants(file file) []Constant {
return false
}
return true
})

View File

@ -3,7 +3,7 @@
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//go:build tools
//go:build sidero.tools
package main
@ -49,6 +49,7 @@ func run(dirsToParse []string, tags string, out string) error {
if err != nil {
return err
}
result = append(result, filtered...)
}
@ -60,14 +61,16 @@ func run(dirsToParse []string, tags string, out string) error {
func createFoldersForFile(out string) error {
dir := filepath.Dir(out)
return os.MkdirAll(dir, os.ModePerm)
}
// getParams parses the command line parameters and returns the output file, the directory to parse and an error
// getParams parses the command line parameters and returns the output file, the directory to parse and an error.
func getParams() (out string, dirsToParse []string, tags string, _ error) {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage: %s -out <output file> <input dir>\n", os.Args[0])
}
flag.StringVar(&out, "out", "", "output file")
flag.StringVar(&tags, "tags", "", "build tags")
flag.Parse()
@ -94,6 +97,7 @@ func getParams() (out string, dirsToParse []string, tags string, _ error) {
// fillTemplate fills the provided Template with the given data.
func fillTemplate(t *template.Template, data any) (string, error) {
var buf strings.Builder
err := t.Execute(&buf, data)
if err != nil {
return "", err
@ -115,6 +119,7 @@ var Tpl string
// SaveConstantsToFile saves the given constants to the given file.
func SaveConstantsToFile(file string, data any) error {
t := MakeTemplate("ts_gen", Tpl)
s, err := fillTemplate(t, data)
if err != nil {
return err
@ -133,7 +138,7 @@ func WriteFile(file string, s string) error {
if err != nil {
return err
}
defer f.Close()
defer f.Close() //nolint:errcheck
_, err = f.WriteString(s)
if err != nil {

View File

@ -3,7 +3,7 @@
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//go:build tools
//go:build sidero.tools
package main
@ -36,8 +36,7 @@ func Test_run(t *testing.T) {
err := run([]string{"./testdata/good/pkg/."}, "tools", "testdata/good/out/resources.ts")
require.NoError(t, err)
t.Cleanup(func() {
err := os.RemoveAll("testdata/good/out")
require.NoError(t, err)
require.NoError(t, os.RemoveAll("testdata/good/out"))
})
actual, err := os.ReadFile("testdata/good/out/resources.ts")

View File

@ -97,8 +97,6 @@ func WithVerifiedEmail() CheckOption {
// Check checks the given context for the given authentication and authorization conditions.
//
// The returned error can be checked against ErrUnauthenticated and ErrUnauthorized.
//
//nolint:gocyclo,cyclop
func Check(ctx context.Context, opt ...CheckOption) (CheckResult, error) {
authVal, ok := ctxstore.Value[EnabledAuthContextKey](ctx)
if !ok {

View File

@ -66,7 +66,7 @@ func TestActivity(t *testing.T) {
assert.Empty(t, list.Items)
})
for _, tc := range []struct { //nolint:dupl
for _, tc := range []struct {
ctxSetup func(ctx context.Context, identity string) context.Context
name string
identity string

View File

@ -263,8 +263,8 @@ func (p *Params) PopulateFallbacks() {
p.Services.DevServerProxy.SetKeyFile(p.Services.Api.GetKeyFile())
}
if p.Auth.Auth0.InitialUsers != nil && p.Auth.InitialUsers == nil { //nolint:staticcheck
p.Auth.InitialUsers = p.Auth.Auth0.InitialUsers //nolint:staticcheck
if p.Auth.Auth0.InitialUsers != nil && p.Auth.InitialUsers == nil {
p.Auth.InitialUsers = p.Auth.Auth0.InitialUsers
}
}

View File

@ -37,7 +37,7 @@ func RunServer(ctx context.Context, server *grpc.Server, lis net.Listener, eg *e
})
}
func serverGracefulStop(server *grpc.Server, ctx context.Context, logger *zap.Logger) { //nolint:revive
func serverGracefulStop(server *grpc.Server, ctx context.Context, logger *zap.Logger) {
<-ctx.Done()
stopped := make(chan struct{})

View File

@ -180,7 +180,7 @@ func collectLeafErrors(validationErr *jsonschema.ValidationError) []*jsonschema.
}
}
func (schema *Schema) formatLeafError(leaf *jsonschema.ValidationError) string { //nolint:cyclop
func (schema *Schema) formatLeafError(leaf *jsonschema.ValidationError) string {
switch ek := leaf.ErrorKind.(type) {
case *kind.Required:
return schema.formatRequiredError(leaf, ek)

View File

@ -286,7 +286,7 @@ func (s *Store) Reader(ctx context.Context, nLines int, follow bool) (logstore.L
}
}
conn, next, stop, err := s.readerRows(ctx, nLines) //nolint:rowserrcheck // false positive, we do not iterate the logs here
conn, next, stop, err := s.readerRows(ctx, nLines)
if err != nil {
s.unsubscribe(followCh)
close(closeCh)
@ -373,7 +373,7 @@ func (r *lineReader) fetchNextBatch(ctx context.Context) error {
var err error
r.next, r.stop, err = r.store.readerRowsAfter(r.conn, r.lastLogID) //nolint:rowserrcheck // false positive, we do not iterate the logs here
r.next, r.stop, err = r.store.readerRowsAfter(r.conn, r.lastLogID)
if err != nil {
// If the context was canceled during the query, return EOF
// to allow graceful shutdown of the reader loop.

View File

@ -313,7 +313,6 @@ func (suite *SiderolinkSuite) TestNodeWithSeveralAdvertisedIPs() {
ctx, cancel := context.WithTimeout(suite.ctx, time.Second*2)
defer cancel()
//nolint:staticcheck
rtestutils.AssertResources(ctx, suite.T(), suite.state, []string{
siderolink.DefaultJoinTokenID,
}, func(r *siderolink.DefaultJoinToken, assertion *assert.Assertions) {