docs: add COSI resource operations in API usage examples

Show how COSI resources should be created, updated and destroyed.

Closes siderolabs/omni#2804.

Signed-off-by: Utku Ozdemir <utku.ozdemir@siderolabs.com>
This commit is contained in:
Utku Ozdemir 2026-05-07 21:39:51 +02:00
parent 13c3f28978
commit 9f1bb2fa30
No known key found for this signature in database
GPG Key ID: DBD13117B0A14E93

View File

@ -9,25 +9,20 @@ import (
"log"
"github.com/cosi-project/runtime/pkg/safe"
"github.com/cosi-project/runtime/pkg/state"
"google.golang.org/protobuf/types/known/emptypb"
"github.com/siderolabs/omni/client/pkg/client"
"github.com/siderolabs/omni/client/pkg/omni/resources/omni"
"github.com/siderolabs/omni/client/pkg/template"
"github.com/siderolabs/omni/client/pkg/version"
)
//nolint:wsl,testableexamples
//nolint:wsl,testableexamples,gocognit,gocyclo,cyclop
func Example() {
// This example shows how to use Omni client to access resources.
// Setup versions information. You can embed that into `go build` too.
version.Name = "omni"
version.SHA = "build SHA"
version.Tag = "v0.9.1"
// For this example we will use Omni service account.
// You can create your service account in advance:
// For this example we will use an Omni service account.
// You can create one in advance:
//
// omnictl serviceaccount create example.account
// Created service account "example.account" with public key ID "<REDACTED>"
@ -36,30 +31,29 @@ func Example() {
// OMNI_ENDPOINT=https://<account>.omni.siderolabs.io:443
// OMNI_SERVICE_ACCOUNT_KEY=base64encodedkey
//
// Note: Store the service account key securely, it will not be displayed again
// Note: Store the service account key securely, it will not be displayed again.
ctx := context.Background()
// Creating a new client.
client, err := client.New("https://<account>.omni.siderolabs.io:443", client.WithServiceAccount(
// Create a new client.
c, err := client.New("https://<account>.omni.siderolabs.io:443", client.WithServiceAccount(
"base64encodedkey", // From the generated service account.
))
if err != nil {
log.Panicf("failed to create omni client %s", err)
}
// Omni service is using COSI https://github.com/cosi-project/runtime/.
// The same client is used to get resources in Talos.
st := client.Omni().State()
defer func() {
if e := client.Close(); e != nil {
if e := c.Close(); e != nil {
log.Printf("failed to close client %s", e)
}
}()
// Getting the resources from the Omni state.
machines, err := safe.StateList[*omni.MachineStatus](ctx, st, omni.NewMachineStatus("").Metadata())
// Omni uses COSI (https://github.com/cosi-project/runtime) for resource management,
// the same runtime used in Talos.
st := c.Omni().State()
// List all machine statuses.
machines, err := safe.ReaderListAll[*omni.MachineStatus](ctx, st)
if err != nil {
log.Panicf("failed to get machines %s", err)
}
@ -72,34 +66,25 @@ func Example() {
for item := range machines.All() {
log.Printf("machine %s, connected: %t", item.Metadata(), item.TypedSpec().Value.GetConnected())
// Check cluster assignment for a machine.
// Find a machine which is allocated into a cluster for the later use.
if c, ok := item.Metadata().Labels().Get(omni.LabelCluster); ok && machine == nil {
cluster = c
// Find a machine that is allocated to a cluster, for use in the Talos API example below.
if clusterName, ok := item.Metadata().Labels().Get(omni.LabelCluster); ok && machine == nil {
cluster = clusterName
machine = item
}
}
// Creating an empty cluster via template.
// Alternative is to use template.Load to load a cluster template.
template := template.WithCluster("example.cluster")
// Create an empty cluster via a template.
// Use template.Load to load a cluster template from YAML instead.
tmpl := template.WithCluster("example.cluster")
if _, err = template.Sync(ctx, st); err != nil {
if _, err = tmpl.Sync(ctx, st); err != nil {
log.Panicf("failed to sync cluster %s", err)
}
if _, err = template.Sync(ctx, st); err != nil {
log.Panicf("failed to sync cluster %s", err)
}
log.Printf("synced cluster")
log.Printf("sync cluster")
// Delete cluster.
if _, err = template.Delete(ctx, st); err != nil {
log.Panicf("failed to delete the cluster %s", err)
}
if _, err = template.Delete(ctx, st); err != nil {
// Delete the cluster.
if _, err = tmpl.Delete(ctx, st); err != nil {
log.Panicf("failed to delete the cluster %s", err)
}
@ -112,12 +97,12 @@ func Example() {
return
}
// Using Talos through Omni.
// Use cluster and machine which we previously found.
cpuInfo, err := client.Talos().WithCluster(
// Use the Talos API through Omni.
// You can use machine UUID as Omni will properly resolve it into machine IP.
cpuInfo, err := c.Talos().WithCluster(
cluster,
).WithNodes(
machine.Metadata().ID(), // You can use machine UUID as Omni will properly resolve it into machine IP.
machine.Metadata().ID(),
).CPUInfo(ctx, &emptypb.Empty{})
if err != nil {
log.Panicf("failed to read machine CPU info %s", err)
@ -133,9 +118,88 @@ func Example() {
}
}
// Talking to Omni specific APIs: getting talosconfig.
_, err = client.Management().Talosconfig(ctx)
// Get the talosconfig for the whole instance.
_, err = c.Management().Talosconfig(ctx)
if err != nil {
log.Panicf("failed to get talosconfig %s", err)
}
// Resource management via the COSI state API.
// The following example uses MachineClass as a representative user-managed resource to demonstrate
// watching for changes, as well as create, update, and destroy operations.
// The same patterns apply to any other user-managed resource type.
// Watch the MachineClass kind for changes.
// safe.StateWatchKind subscribes to all events for a resource type; safe.StateWatch watches a single
// resource by ID. The watch runs until its context is canceled, so the consumer is in full control
// of its lifetime.
watchCtx, watchCancel := context.WithCancel(ctx)
defer watchCancel()
eventCh := make(chan safe.WrappedStateEvent[*omni.MachineClass])
if err = safe.StateWatchKind(watchCtx, st, omni.NewMachineClass("").Metadata(), eventCh); err != nil {
log.Panicf("failed to watch machine classes %s", err)
}
go func() {
for {
select {
case <-watchCtx.Done():
return
case event := <-eventCh:
if event.Type() == state.Errored {
log.Printf("machine class watch error: %s", event.Error())
return
}
res, resErr := event.Resource()
if resErr != nil {
log.Printf("machine class watch: failed to decode resource %s", resErr)
continue
}
log.Printf("machine class event: %s %s", event.Type(), res.Metadata().ID())
}
}
}()
// MachineClass selects machines using label selector expressions.
// Each entry in MatchLabels is a comma-separated set of conditions (AND within one entry, OR across entries).
machineClass := omni.NewMachineClass("test")
machineClass.Metadata().Labels().Set("my-label", "my-value")
machineClass.TypedSpec().Value.MatchLabels = []string{
"omni.sidero.dev/arch = amd64, omni.sidero.dev/cores > 2", // amd64 machines with more than 2 cores
}
if err = st.Create(ctx, machineClass); err != nil {
log.Panicf("failed to create machine class %s", err)
}
// Update the machine class: add an OR condition to also match arm64 machines.
updated, err := safe.StateUpdateWithConflicts(ctx, st, machineClass.Metadata(), func(res *omni.MachineClass) error {
res.TypedSpec().Value.MatchLabels = append(res.TypedSpec().Value.MatchLabels, "omni.sidero.dev/arch = arm64")
return nil
})
if err != nil {
log.Panicf("failed to update machine class %s", err)
}
log.Printf("updated machine class labels: %v", updated.TypedSpec().Value.MatchLabels)
// For upsert (create-or-update) semantics, safe.StateModify or safe.StateModifyWithResult can be used instead.
// Destroy the machine class.
//
// The correct deletion sequence for COSI resources is: call Teardown, wait until all finalizers are
// cleared (controllers holding them will release upon seeing the teardown phase), then call Destroy.
// Calling only Teardown is not sufficient, and calling Destroy directly without Teardown first will fail
// if any finalizers are present. TeardownAndDestroy handles this full sequence.
if err = st.TeardownAndDestroy(ctx, machineClass.Metadata()); err != nil {
log.Panicf("failed to destroy machine class %s", err)
}
}