mirror of
https://github.com/siderolabs/omni.git
synced 2026-05-05 14:46:12 +02:00
test: pick UKI and non-UKI machines correctly
Picker can retry if it couldn't pick enough machines, but we set the picked flags only once and never reset them. Change the approach to take it into account. Signed-off-by: Utku Ozdemir <utku.ozdemir@siderolabs.com>
This commit is contained in:
parent
19a6cd1219
commit
6ca43f371f
@ -14,6 +14,7 @@ import (
|
||||
"math/rand/v2"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@ -41,7 +42,7 @@ import (
|
||||
// BeforeClusterCreateFunc is a function that is called before a cluster is created.
|
||||
type BeforeClusterCreateFunc func(ctx context.Context, t *testing.T, cli *client.Client, machineIDs []resource.ID)
|
||||
|
||||
type PickFilterFunc func(machineStatus *omni.MachineStatus) bool
|
||||
type PickFilterFunc func(machineStatus *omni.MachineStatus, alreadyPicked []*omni.MachineStatus) bool
|
||||
|
||||
// ClusterOptions are the options for cluster creation.
|
||||
//
|
||||
@ -820,28 +821,40 @@ func pickUnallocatedMachines(ctx context.Context, t *testing.T, st state.State,
|
||||
result := make([]resource.ID, 0, count)
|
||||
|
||||
if filterFunc == nil {
|
||||
filterFunc = func(*omni.MachineStatus) bool { return true }
|
||||
filterFunc = func(*omni.MachineStatus, []*omni.MachineStatus) bool { return true }
|
||||
}
|
||||
|
||||
err := retry.Constant(time.Minute).RetryWithContext(ctx, func(ctx context.Context) error {
|
||||
result = result[:0] // clear the result between retries
|
||||
|
||||
machineStatuses, err := safe.StateListAll[*omni.MachineStatus](ctx, st, state.WithLabelQuery(resource.LabelExists(omni.MachineStatusLabelAvailable)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
machineIDs := make([]resource.ID, 0, machineStatuses.Len())
|
||||
for machineStatus := range machineStatuses.All() {
|
||||
if filterFunc(machineStatus) {
|
||||
machineIDs = append(machineIDs, machineStatus.Metadata().ID())
|
||||
shuffledMachineStatuses := slices.Collect(machineStatuses.All())
|
||||
rand.Shuffle(len(shuffledMachineStatuses), func(i, j int) {
|
||||
shuffledMachineStatuses[i], shuffledMachineStatuses[j] = shuffledMachineStatuses[j], shuffledMachineStatuses[i]
|
||||
})
|
||||
|
||||
pickedMachineStatuses := make([]*omni.MachineStatus, 0, count)
|
||||
|
||||
for _, machineStatus := range shuffledMachineStatuses {
|
||||
if filterFunc(machineStatus, pickedMachineStatuses) {
|
||||
pickedMachineStatuses = append(pickedMachineStatuses, machineStatus)
|
||||
}
|
||||
|
||||
if len(pickedMachineStatuses) >= count {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(machineIDs) < count {
|
||||
return retry.ExpectedErrorf("not enough machines: available %d, requested %d", len(machineIDs), count)
|
||||
if len(pickedMachineStatuses) < count {
|
||||
return retry.ExpectedErrorf("not enough machines: available %d, requested %d", len(pickedMachineStatuses), count)
|
||||
}
|
||||
|
||||
for _, j := range rand.Perm(len(machineIDs))[:count] {
|
||||
result = append(result, machineIDs[j])
|
||||
for _, ms := range pickedMachineStatuses {
|
||||
result = append(result, ms.Metadata().ID())
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@ -43,7 +43,7 @@ func testKernelArgsUpdate(t *testing.T, options *TestOptions) {
|
||||
SkipExtensionCheckOnCreate: options.SkipExtensionsCheckOnCreate,
|
||||
|
||||
// Pick machines which are booted with UKI, as kernel args upgrades are only supported for them.
|
||||
PickFilterFunc: func(ms *omni.MachineStatus) bool {
|
||||
PickFilterFunc: func(ms *omni.MachineStatus, _ []*omni.MachineStatus) bool {
|
||||
return ms.TypedSpec().Value.GetSecurityState().GetBootedWithUki()
|
||||
},
|
||||
}),
|
||||
|
||||
@ -1400,8 +1400,6 @@ Test Omni upgrades, the first half that runs on the previous Omni version
|
||||
omniClient := options.omniClient
|
||||
clusterName := "integration-omni-upgrades"
|
||||
|
||||
pickedUKI, pickedNonUKI := false, false
|
||||
|
||||
t.Run("ClusterShouldBeCreated", CreateCluster(t.Context(), omniClient, ClusterOptions{
|
||||
Name: clusterName,
|
||||
ControlPlanes: 3,
|
||||
@ -1420,22 +1418,27 @@ Test Omni upgrades, the first half that runs on the previous Omni version
|
||||
|
||||
// Pick at least one UKI and one non-UKI machine, so that we cover both flows during the upgrade tests.
|
||||
// Schematic calculation differs between UKI and non-UKI machines, so it's important to test both.
|
||||
PickFilterFunc: func(machineStatus *omni.MachineStatus) bool {
|
||||
PickFilterFunc: func(machineStatus *omni.MachineStatus, alreadyPicked []*omni.MachineStatus) bool {
|
||||
isUKI := machineStatus.TypedSpec().Value.SecurityState.GetBootedWithUki()
|
||||
pickedUKI, pickedNonUKI := false, false
|
||||
|
||||
for _, ms := range alreadyPicked {
|
||||
if ms.TypedSpec().Value.SecurityState.GetBootedWithUki() {
|
||||
pickedUKI = true
|
||||
} else {
|
||||
pickedNonUKI = true
|
||||
}
|
||||
}
|
||||
|
||||
if isUKI && !pickedUKI { // if it's UKI, and we haven't picked one yet, pick it
|
||||
pickedUKI = true
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
if !isUKI && !pickedNonUKI { // if it's non-UKI, and we haven't picked one yet, pick it
|
||||
pickedNonUKI = true
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
return pickedUKI && pickedNonUKI // if we have already picked both types, allow any
|
||||
return pickedUKI && pickedNonUKI // otherwise, pick only if we have both types already
|
||||
},
|
||||
}))
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user