fix: validate provisioner when destroying local clusters

Return an error when attempting to destroy a cluster that was created
with a different provisioner.

This fixes a bug where the qemu cluster state is removed without any actual
cleanup being done when running `talosctl cluster destroy` (provisioner defaults to docker).

* move the the `vm.State` logic into -> `provision.State` as it's now reused by the docker provisioner as well.
* move "cluster create" command -> "cluster create dev"
* hide the "cluster create" command from docs
* fix omni api url validation
* fix machineconfig.yaml being unnecessarily written to disk on the qemu command

Signed-off-by: Orzelius <33936483+Orzelius@users.noreply.github.com>
This commit is contained in:
Orzelius 2025-10-22 21:31:19 +09:00
parent b494c54c81
commit 43b1d75375
No known key found for this signature in database
GPG Key ID: C17C8E3962A0D9B1
32 changed files with 383 additions and 410 deletions

View File

@ -159,8 +159,17 @@ var docsCmd = &cobra.Command{
// GenMarkdownReference is the same as GenMarkdownTree, but
// with custom filePrepender and linkHandler.
//
//nolint:gocyclo
func GenMarkdownReference(cmd *cobra.Command, w io.Writer, linkHandler func(string) string) error {
for _, c := range cmd.Commands() {
// Generate docs for children of the cluster create command although the command itself is hidden.
if cmd.Name() == "cluster" && c.Name() == "create" {
if err := GenMarkdownReference(c, w, linkHandler); err != nil {
return err
}
}
if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
continue
}
@ -170,6 +179,12 @@ func GenMarkdownReference(cmd *cobra.Command, w io.Writer, linkHandler func(stri
}
}
// Skip generating docs for the cluster create command itself and only generate docs for children.
// TODO: remove once "cluster create" is completely migrated to "cluster create dev".
if cmd.Name() == "create" && cmd.Parent() != nil && cmd.Parent().Name() == "cluster" {
return nil
}
return doc.GenMarkdownCustom(cmd, w, linkHandler)
}

View File

@ -15,8 +15,8 @@ import (
)
const (
// ProvisionerFlag is the flag with which the provisioner is configured.
ProvisionerFlag = "provisioner"
// ProvisionerFlagName is the flag with which the provisioner is configured.
ProvisionerFlagName = "provisioner"
)
// Cmd represents the cluster command.
@ -56,5 +56,5 @@ func init() {
// AddProvisionerFlag adds the provisioner flag to a command.
func AddProvisionerFlag(cmd *cobra.Command) {
cmd.Flags().StringVar(&provisionerName, ProvisionerFlag, providers.DockerProviderName, "Talos cluster provisioner to use")
cmd.Flags().StringVar(&provisionerName, ProvisionerFlagName, providers.DockerProviderName, "Talos cluster provisioner to use")
}

View File

@ -9,6 +9,7 @@ import (
"fmt"
"math/big"
"net/netip"
"net/url"
"os"
"slices"
"strconv"
@ -21,6 +22,7 @@ import (
"github.com/siderolabs/talos/cmd/talosctl/cmd/mgmt/cluster/create/clusterops"
"github.com/siderolabs/talos/cmd/talosctl/pkg/mgmt/helpers"
"github.com/siderolabs/talos/internal/app/machined/pkg/runtime"
"github.com/siderolabs/talos/pkg/machinery/config"
"github.com/siderolabs/talos/pkg/machinery/config/bundle"
"github.com/siderolabs/talos/pkg/machinery/config/configpatcher"
@ -252,13 +254,23 @@ func (m *Maker[T]) GetClusterConfigs() (clusterops.ClusterConfigs, error) {
func (m *Maker[T]) applyOmniConfigs() error {
cfg := siderolink.NewConfigV1Alpha1()
parsedURL, err := ParseOmniAPIUrl(m.Ops.OmniAPIEndpoint)
parsedURL, err := url.Parse(m.Ops.OmniAPIEndpoint)
if err != nil {
return fmt.Errorf("error parsing omni api url: %w", err)
}
cfg.APIUrlConfig.URL = parsedURL
mode, err := runtime.ParseMode(runtime.ModeMetal.String())
if err != nil {
return err
}
_, err = cfg.Validate(mode)
if err != nil {
return err
}
ctr, err := container.New(cfg)
if err != nil {
return err

View File

@ -1,33 +0,0 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
package makers
import (
"fmt"
"net/url"
"strings"
)
// ParseOmniAPIUrl validates and parses the omni api url.
func ParseOmniAPIUrl(urlIn string) (*url.URL, error) {
if !strings.HasPrefix(urlIn, "grpc://") && !strings.HasPrefix(urlIn, "https://") {
return nil, fmt.Errorf("invalid url scheme: must be either 'grpc://' or 'https://'")
}
if !strings.Contains(urlIn, "?jointoken=") {
return nil, fmt.Errorf("invalid url: must contain a jointoken query parameter")
}
url, err := url.Parse(urlIn)
if err != nil {
return nil, err
}
if url.Port() == "" {
return nil, fmt.Errorf("invalid url: must contain a port")
}
return url, nil
}

View File

@ -1,55 +0,0 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
package makers_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/siderolabs/talos/cmd/talosctl/cmd/mgmt/cluster/create/clusterops/configmaker/internal/makers"
)
func TestParseOmniAPIUrl(t *testing.T) {
t.Run("valid grpc", func(t *testing.T) {
u, err := makers.ParseOmniAPIUrl("grpc://10.5.0.1:8090?jointoken=abc")
assert.NoError(t, err)
if assert.NotNil(t, u) {
assert.Equal(t, "grpc", u.Scheme)
assert.Equal(t, "10.5.0.1:8090", u.Host)
assert.Equal(t, "abc", u.Query().Get("jointoken"))
}
})
t.Run("valid https", func(t *testing.T) {
u, err := makers.ParseOmniAPIUrl("https://example.com:443?jointoken=token123")
assert.NoError(t, err)
if assert.NotNil(t, u) {
assert.Equal(t, "https", u.Scheme)
assert.Equal(t, "example.com:443", u.Host)
assert.Equal(t, "token123", u.Query().Get("jointoken"))
}
})
t.Run("invalid scheme", func(t *testing.T) {
u, err := makers.ParseOmniAPIUrl("http://10.5.0.1:8090?jointoken=abc")
assert.Error(t, err)
assert.Nil(t, u)
})
t.Run("missing jointoken", func(t *testing.T) {
u, err := makers.ParseOmniAPIUrl("grpc://10.5.0.1:8090")
assert.Error(t, err)
assert.Nil(t, u)
})
t.Run("missing port", func(t *testing.T) {
u, err := makers.ParseOmniAPIUrl("grpc://10.5.0.1?jointoken=abc")
assert.Error(t, err)
assert.Nil(t, u)
})
}

View File

@ -29,10 +29,13 @@ type legacyOps struct {
extraDisksDrivers []string
}
var createCmd = getCreateCmd()
var (
createCmd = getCreateCmd("create", true)
createDevCmd = getCreateCmd("dev", false)
)
//nolint:gocyclo
func getCreateCmd() *cobra.Command {
func getCreateCmd(cmdName string, hidden bool) *cobra.Command {
const (
networkIPv4Flag = "ipv4"
networkIPv6Flag = "ipv6"
@ -256,12 +259,16 @@ func getCreateCmd() *cobra.Command {
// createCmd is the developer oriented create command.
createCmd := &cobra.Command{
Use: "create",
Hidden: false, // todo: hide once user-facing commands are implemented
Use: cmdName,
Hidden: hidden,
Short: "Creates a local qemu based cluster for Talos development",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
return cli.WithContext(context.Background(), func(ctx context.Context) error {
if cmdName == "create" {
cli.Warning("the developer oriented 'cluster create' command has been moved to 'cluster create dev'")
}
if err := validateQemuFlags(cmd.Flags(), unImplementedFlagsDarwin); err != nil {
return err
}
@ -299,7 +306,7 @@ func getCreateCmd() *cobra.Command {
createCmd.Flags().IntVar(&legacyOps.extraDiskSize, extraDiskSizeFlag, 5*1024, "default limit on disk size in MB (each VM)")
clustercmd.AddProvisionerFlag(createCmd)
cli.Should(createCmd.Flags().MarkHidden(clustercmd.ProvisionerFlag))
cli.Should(createCmd.Flags().MarkHidden(clustercmd.ProvisionerFlagName))
createCmd.Flags().AddFlagSet(getCommonFlags())
createCmd.Flags().AddFlagSet(getQemuFlags())
@ -313,6 +320,7 @@ func getCreateCmd() *cobra.Command {
}
func init() {
createCmd.AddCommand(createDevCmd)
clustercmd.Cmd.AddCommand(createCmd)
}

View File

@ -46,21 +46,19 @@ func init() {
return qemu
}
descriptionShort := "Create a local QEMU based Talos cluster"
descriptionLong := descriptionShort + "\n"
cmdDescription := "Create a local QEMU based Talos cluster\n"
descriptionLong += "Available presets:\n"
cmdDescription += "Available presets:\n"
for _, p := range preset.Presets {
descriptionLong += " - " + p.Name() + ": " + p.Description() + "\n"
cmdDescription += " - " + p.Name() + ": " + p.Description() + "\n"
}
descriptionLong += "\n"
descriptionLong += "Note: exactly one of 'iso', 'iso-secureboot', 'pxe' or 'disk-image' presets must be specified.\n"
cmdDescription += "\n"
cmdDescription += "Note: exactly one of 'iso', 'iso-secureboot', 'pxe' or 'disk-image' presets must be specified.\n"
createQemuCmd := &cobra.Command{
Use: providers.QemuProviderName,
Short: descriptionShort,
Long: descriptionLong,
Short: cmdDescription,
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
return cli.WithContext(context.Background(), func(ctx context.Context) error {

View File

@ -109,35 +109,41 @@ func preCreate(cOps clusterops.Common, clusterConfigs clusterops.ClusterConfigs)
// write machine config
if cOps.SkipInjectingConfig {
if clusterConfigs.ConfigBundle != nil {
types := []machine.Type{machine.TypeControlPlane, machine.TypeWorker}
if cOps.WithInitNode {
types = slices.Insert(types, 0, machine.TypeInit)
}
if err := clusterConfigs.ConfigBundle.Write(".", encoder.CommentsAll, types...); err != nil {
return err
}
}
// no configbundle, just write the machine config as-is
cfgBytes, err := clusterConfigs.ClusterRequest.Nodes[0].Config.Bytes()
if err != nil {
if err := writeMachineconfig(clusterConfigs, cOps); err != nil {
return err
}
fullFilePath := filepath.Join(".", "machineconfig.yaml")
if err = os.WriteFile(fullFilePath, cfgBytes, 0o644); err != nil {
return err
}
fmt.Fprintf(os.Stderr, "created %s\n", fullFilePath)
}
return nil
}
func writeMachineconfig(clusterConfigs clusterops.ClusterConfigs, cOps clusterops.Common) error {
if clusterConfigs.ConfigBundle != nil {
types := []machine.Type{machine.TypeControlPlane, machine.TypeWorker}
if cOps.WithInitNode {
types = slices.Insert(types, 0, machine.TypeInit)
}
return clusterConfigs.ConfigBundle.Write(".", encoder.CommentsAll, types...)
}
// no configbundle, just write the machine config as-is
cfgBytes, err := clusterConfigs.ClusterRequest.Nodes[0].Config.Bytes()
if err != nil {
return err
}
fullFilePath := filepath.Join(".", "machineconfig.yaml")
if err = os.WriteFile(fullFilePath, cfgBytes, 0o644); err != nil {
return err
}
fmt.Fprintf(os.Stderr, "created %s\n", fullFilePath)
return nil
}
func checkLoopbackOmniURL(cOps clusterops.Common, clusterConfigs clusterops.ClusterConfigs) error {
parsedURL, err := url.Parse(cOps.OmniAPIEndpoint)
if err != nil {

View File

@ -6,6 +6,7 @@ package cluster
import (
"context"
"fmt"
"github.com/spf13/cobra"
@ -23,8 +24,7 @@ var destroyCmdFlags struct {
// destroyCmd represents the cluster destroy command.
var destroyCmd = &cobra.Command{
Use: "destroy",
Short: "Destroys a local docker-based or firecracker-based kubernetes cluster",
Long: ``,
Short: "Destroys a local Talos kubernetes cluster",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
return cli.WithContext(context.Background(), destroy)
@ -32,7 +32,12 @@ var destroyCmd = &cobra.Command{
}
func destroy(ctx context.Context) error {
provisioner, err := providers.Factory(ctx, provisionerName)
state, err := provision.ReadState(ctx, PersistentFlags.ClusterName, PersistentFlags.StateDir)
if err != nil {
return fmt.Errorf("failed to read cluster state: %w", err)
}
provisioner, err := providers.Factory(ctx, state.ProvisionerName)
if err != nil {
return err
}
@ -58,6 +63,7 @@ func init() {
destroyCmd.PersistentFlags().StringVarP(&destroyCmdFlags.saveSupportArchivePath, "save-support-archive-path", "", "", "save support archive to the specified file on destroy")
destroyCmd.PersistentFlags().StringVarP(&destroyCmdFlags.saveClusterLogsArchivePath, "save-cluster-logs-archive-path", "", "", "save cluster logs archive to the specified file on destroy")
AddProvisionerFlag(destroyCmd)
cli.Should(destroyCmd.Flags().MarkDeprecated(ProvisionerFlagName, "the provisioner is inferred automatically"))
Cmd.AddCommand(destroyCmd)
}

View File

@ -7,7 +7,6 @@ package docker
import (
"context"
"fmt"
"os"
"path/filepath"
"github.com/siderolabs/talos/pkg/machinery/constants"
@ -30,8 +29,9 @@ func (p *provisioner) Create(ctx context.Context, request provision.ClusterReque
fmt.Fprintf(options.LogWriter, "creating state directory in %q\n", statePath)
if err := os.MkdirAll(statePath, 0o755); err != nil {
return nil, fmt.Errorf("unable to create state directory: %w", err)
state, err := provision.NewState(statePath, "docker", request.Name)
if err != nil {
return nil, fmt.Errorf("failed to initialize provisioner state: %w", err)
}
if err = p.ensureImageExists(ctx, request.Image, &options); err != nil {
@ -62,19 +62,24 @@ func (p *provisioner) Create(ctx context.Context, request provision.ClusterReque
nodeInfo = append(nodeInfo, workerNodeInfo...)
res := &result{
clusterInfo: provision.ClusterInfo{
ClusterName: request.Name,
Network: provision.NetworkInfo{
Name: request.Network.Name,
CIDRs: request.Network.CIDRs[:1],
GatewayAddrs: request.Network.GatewayAddrs[:1],
MTU: request.Network.MTU,
},
Nodes: nodeInfo,
KubernetesEndpoint: p.GetExternalKubernetesControlPlaneEndpoint(request.Network, constants.DefaultControlPlanePort),
state.ClusterInfo = provision.ClusterInfo{
ClusterName: request.Name,
Network: provision.NetworkInfo{
Name: request.Network.Name,
CIDRs: request.Network.CIDRs[:1],
GatewayAddrs: request.Network.GatewayAddrs[:1],
MTU: request.Network.MTU,
},
statePath: statePath,
Nodes: nodeInfo,
KubernetesEndpoint: p.GetExternalKubernetesControlPlaneEndpoint(request.Network, constants.DefaultControlPlanePort),
}
if err := state.Save(); err != nil {
return nil, err
}
res := &result{
clusterInfo: state.ClusterInfo,
statePath: statePath,
}
return res, nil

View File

@ -11,7 +11,6 @@ import (
"github.com/siderolabs/talos/pkg/machinery/constants"
"github.com/siderolabs/talos/pkg/provision"
"github.com/siderolabs/talos/pkg/provision/providers/vm"
)
// Create Talos cluster as a set of qemu VMs.
@ -39,7 +38,7 @@ func (p *provisioner) Create(ctx context.Context, request provision.ClusterReque
fmt.Fprintf(options.LogWriter, "creating state directory in %q\n", statePath)
state, err := vm.NewState(
state, err := provision.NewState(
statePath,
p.Name,
request.Name,

View File

@ -11,7 +11,6 @@ import (
cl "github.com/siderolabs/talos/pkg/cluster"
"github.com/siderolabs/talos/pkg/provision"
"github.com/siderolabs/talos/pkg/provision/providers/vm"
)
// Destroy Talos cluster as set of qemu VMs.
@ -60,7 +59,7 @@ func (p *provisioner) Destroy(ctx context.Context, cluster provision.Cluster, op
return err
}
state, ok := cluster.(*vm.State)
state, ok := cluster.(*provision.State)
if !ok {
return fmt.Errorf("error inspecting QEMU state, %#+v", cluster)
}

View File

@ -12,7 +12,6 @@ import (
"os/exec"
"github.com/siderolabs/talos/pkg/provision"
"github.com/siderolabs/talos/pkg/provision/providers/vm"
)
type networkConfig struct {
@ -21,7 +20,7 @@ type networkConfig struct {
EndAddr netip.Addr
}
func getLaunchNetworkConfig(state *vm.State, clusterReq provision.ClusterRequest, nodeReq provision.NodeRequest) networkConfig {
func getLaunchNetworkConfig(state *provision.State, clusterReq provision.ClusterRequest, nodeReq provision.NodeRequest) networkConfig {
// This ip will be assigned to the bridge
// The following ips will be assigned to the vms
startAddr := clusterReq.Nodes[0].IPs[0].Prev()

View File

@ -29,7 +29,6 @@ import (
"github.com/siderolabs/talos/pkg/provision"
"github.com/siderolabs/talos/pkg/provision/internal/cniutils"
"github.com/siderolabs/talos/pkg/provision/providers/vm"
)
type networkConfig struct {
@ -47,7 +46,7 @@ type networkConfig struct {
ns ns.NetNS
}
func getLaunchNetworkConfig(state *vm.State, clusterReq provision.ClusterRequest, nodeReq provision.NodeRequest) networkConfig {
func getLaunchNetworkConfig(state *provision.State, clusterReq provision.ClusterRequest, nodeReq provision.NodeRequest) networkConfig {
return networkConfig{
networkConfigBase: getLaunchNetworkConfigBase(state, clusterReq, nodeReq),
CniNetworkConfig: state.VMCNIConfig,

View File

@ -29,11 +29,10 @@ import (
"github.com/siderolabs/talos/pkg/machinery/constants"
"github.com/siderolabs/talos/pkg/machinery/kernel"
"github.com/siderolabs/talos/pkg/provision"
"github.com/siderolabs/talos/pkg/provision/providers/vm"
)
//nolint:gocyclo,cyclop
func (p *provisioner) createNode(ctx context.Context, state *vm.State, clusterReq provision.ClusterRequest, nodeReq provision.NodeRequest, opts *provision.Options) (provision.NodeInfo, error) {
func (p *provisioner) createNode(ctx context.Context, state *provision.State, clusterReq provision.ClusterRequest, nodeReq provision.NodeRequest, opts *provision.Options) (provision.NodeInfo, error) {
arch := Arch(opts.TargetArch)
pidPath := state.GetRelativePath(fmt.Sprintf("%s.pid", nodeReq.Name))
@ -270,7 +269,13 @@ func (p *provisioner) createNode(ctx context.Context, state *vm.State, clusterRe
return nodeInfo, nil
}
func (p *provisioner) createNodes(ctx context.Context, state *vm.State, clusterReq provision.ClusterRequest, nodeReqs []provision.NodeRequest, opts *provision.Options) ([]provision.NodeInfo, error) {
func (p *provisioner) createNodes(
ctx context.Context,
state *provision.State,
clusterReq provision.ClusterRequest,
nodeReqs []provision.NodeRequest,
opts *provision.Options,
) ([]provision.NodeInfo, error) {
errCh := make(chan error)
nodeCh := make(chan provision.NodeInfo, len(nodeReqs))
@ -345,7 +350,7 @@ func (p *provisioner) handleOptionalZSTDDiskImage(provisionerDisk, diskImagePath
return err
}
func (p *provisioner) createMetalConfigISO(state *vm.State, nodeName, config string) (string, error) {
func (p *provisioner) createMetalConfigISO(state *provision.State, nodeName, config string) (string, error) {
isoPath := state.GetRelativePath(nodeName + "-metal-config.iso")
tmpDir, err := os.MkdirTemp("", "talos-metal-config-iso")
@ -367,7 +372,7 @@ func (p *provisioner) createMetalConfigISO(state *vm.State, nodeName, config str
return isoPath, nil
}
func getLaunchNetworkConfigBase(state *vm.State, clusterReq provision.ClusterRequest, nodeReq provision.NodeRequest) networkConfigBase {
func getLaunchNetworkConfigBase(state *provision.State, clusterReq provision.ClusterRequest, nodeReq provision.NodeRequest) networkConfigBase {
return networkConfigBase{
BridgeName: state.BridgeName,
CIDRs: clusterReq.Network.CIDRs,

View File

@ -11,11 +11,11 @@ import (
"io/fs"
"os"
"github.com/siderolabs/talos/pkg/provision/providers/vm"
"github.com/siderolabs/talos/pkg/provision"
)
//nolint:gocyclo
func (p *provisioner) createPFlashImages(state *vm.State, nodeName string, pflashSpec []PFlash) ([]string, error) {
func (p *provisioner) createPFlashImages(state *provision.State, nodeName string, pflashSpec []PFlash) ([]string, error) {
var images []string
for i, pflash := range pflashSpec {

View File

@ -15,7 +15,7 @@ import (
"github.com/siderolabs/talos/pkg/provision/providers/vm"
)
func (p *provisioner) createVirtualTPMState(state *vm.State, nodeName string, tpm2Enabled bool) (tpmConfig, error) {
func (p *provisioner) createVirtualTPMState(state *provision.State, nodeName string, tpm2Enabled bool) (tpmConfig, error) {
tpmStateDir := state.GetRelativePath(fmt.Sprintf("%s-tpm", nodeName))
if err := os.MkdirAll(tpmStateDir, 0o755); err != nil {

View File

@ -276,7 +276,7 @@ const (
)
// startDHCPd starts the DHCPd server.
func (p *Provisioner) startDHCPd(state *State, clusterReq provision.ClusterRequest) error {
func (p *Provisioner) startDHCPd(state *provision.State, clusterReq provision.ClusterRequest) error {
pidPath := state.GetRelativePath(dhcpPid)
logFile, err := os.OpenFile(state.GetRelativePath(dhcpLog), os.O_APPEND|os.O_CREATE|os.O_RDWR, 0o666)
@ -320,7 +320,7 @@ func (p *Provisioner) startDHCPd(state *State, clusterReq provision.ClusterReque
}
// DestroyDHCPd destoys load balancer.
func (p *Provisioner) DestroyDHCPd(state *State) error {
func (p *Provisioner) DestroyDHCPd(state *provision.State) error {
pidPath := state.GetRelativePath(dhcpPid)
return StopProcessByPidfile(pidPath)

View File

@ -22,7 +22,7 @@ import (
// It waits for the interface to appear, shut's down the apple bootp DHCPd server created by qemu by default,
// starts the talos DHCP server and then starts the apple bootp server again, which is configured such
// that it detects existing dhcp servers on interfaces and doesn't interfare with them.
func (p *Provisioner) CreateDHCPd(ctx context.Context, state *State, clusterReq provision.ClusterRequest) error {
func (p *Provisioner) CreateDHCPd(ctx context.Context, state *provision.State, clusterReq provision.ClusterRequest) error {
err := waitForInterface(ctx, state.BridgeName)
if err != nil {
return err
@ -75,7 +75,7 @@ func waitForInterface(ctx context.Context, interfaceName string) error {
})
}
func waitForDHCPServerUp(ctx context.Context, state *State) error {
func waitForDHCPServerUp(ctx context.Context, state *provision.State) error {
return retry.Constant(1*time.Minute, retry.WithUnits(100*time.Millisecond)).RetryWithContext(ctx, func(_ context.Context) error {
logFileData, err := os.ReadFile(state.GetRelativePath(dhcpLog))
if err != nil {

View File

@ -11,6 +11,6 @@ import (
)
// CreateDHCPd creates a DHCP server.
func (p *Provisioner) CreateDHCPd(ctx context.Context, state *State, clusterReq provision.ClusterRequest) error {
func (p *Provisioner) CreateDHCPd(ctx context.Context, state *provision.State, clusterReq provision.ClusterRequest) error {
return p.startDHCPd(state, clusterReq)
}

View File

@ -24,7 +24,7 @@ func (p *Provisioner) UserDiskName(index int) string {
}
// CreateDisks creates empty disk files for each disk.
func (p *Provisioner) CreateDisks(state *State, nodeReq provision.NodeRequest) (diskPaths []string, err error) {
func (p *Provisioner) CreateDisks(state *provision.State, nodeReq provision.NodeRequest) (diskPaths []string, err error) {
const QEMUAlignment = 4 * 1024 * 1024 // 4 MiB, required by QEMU
diskPaths = make([]string, len(nodeReq.Disks))

View File

@ -22,7 +22,7 @@ const (
)
// CreateJSONLogs creates JSON logs server.
func (p *Provisioner) CreateJSONLogs(state *State, clusterReq provision.ClusterRequest, options provision.Options) error {
func (p *Provisioner) CreateJSONLogs(state *provision.State, clusterReq provision.ClusterRequest, options provision.Options) error {
pidPath := state.GetRelativePath(jsonLogsPid)
logFile, err := os.OpenFile(state.GetRelativePath(jsonLogsLog), os.O_APPEND|os.O_CREATE|os.O_RDWR, 0o666)
@ -61,7 +61,7 @@ func (p *Provisioner) CreateJSONLogs(state *State, clusterReq provision.ClusterR
}
// DestroyJSONLogs destroys JSON logs server.
func (p *Provisioner) DestroyJSONLogs(state *State) error {
func (p *Provisioner) DestroyJSONLogs(state *provision.State) error {
pidPath := state.GetRelativePath(jsonLogsPid)
return StopProcessByPidfile(pidPath)

View File

@ -23,7 +23,7 @@ const (
)
// CreateKMS creates KMS server.
func (p *Provisioner) CreateKMS(state *State, clusterReq provision.ClusterRequest, options provision.Options) error {
func (p *Provisioner) CreateKMS(state *provision.State, clusterReq provision.ClusterRequest, options provision.Options) error {
pidPath := state.GetRelativePath(kmsPid)
logFile, err := os.OpenFile(state.GetRelativePath(kmsLog), os.O_APPEND|os.O_CREATE|os.O_RDWR, 0o666)
@ -63,7 +63,7 @@ func (p *Provisioner) CreateKMS(state *State, clusterReq provision.ClusterReques
}
// DestroyKMS destroys KMS server.
func (p *Provisioner) DestroyKMS(state *State) error {
func (p *Provisioner) DestroyKMS(state *provision.State) error {
pidPath := state.GetRelativePath(kmsPid)
return StopProcessByPidfile(pidPath)

View File

@ -23,7 +23,7 @@ const (
)
// CreateLoadBalancer creates load balancer.
func (p *Provisioner) CreateLoadBalancer(state *State, clusterReq provision.ClusterRequest) error {
func (p *Provisioner) CreateLoadBalancer(state *provision.State, clusterReq provision.ClusterRequest) error {
pidPath := state.GetRelativePath(lbPid)
logFile, err := os.OpenFile(state.GetRelativePath(lbLog), os.O_APPEND|os.O_CREATE|os.O_RDWR, 0o666)
@ -65,7 +65,7 @@ func (p *Provisioner) CreateLoadBalancer(state *State, clusterReq provision.Clus
}
// DestroyLoadBalancer destroys load balancer.
func (p *Provisioner) DestroyLoadBalancer(state *State) error {
func (p *Provisioner) DestroyLoadBalancer(state *provision.State) error {
pidPath := state.GetRelativePath(lbPid)
return StopProcessByPidfile(pidPath)

View File

@ -15,7 +15,7 @@ import (
// CreateNetwork on darwin assigns the bridge name to the to-be created interface name.
// The interface itself is later created by qemu, but the name needs to be known so that the dhcp server can be linked to the interface.
func (p *Provisioner) CreateNetwork(ctx context.Context, state *State, network provision.NetworkRequest, options provision.Options) error {
func (p *Provisioner) CreateNetwork(ctx context.Context, state *provision.State, network provision.NetworkRequest, options provision.Options) error {
ifaces, err := net.Interfaces()
if err != nil {
return err
@ -34,6 +34,6 @@ func (p *Provisioner) CreateNetwork(ctx context.Context, state *State, network p
}
// DestroyNetwork does nothing on darwin as the network is automatically cleaned up by qemu when the final machine of a cidr block is killed.
func (p *Provisioner) DestroyNetwork(state *State) error {
func (p *Provisioner) DestroyNetwork(state *provision.State) error {
return nil
}

View File

@ -39,7 +39,7 @@ import (
// different bridge interfaces.
//
//nolint:gocyclo
func (p *Provisioner) CreateNetwork(ctx context.Context, state *State, network provision.NetworkRequest, options provision.Options) error {
func (p *Provisioner) CreateNetwork(ctx context.Context, state *provision.State, network provision.NetworkRequest, options provision.Options) error {
networkNameHash := sha256.Sum256([]byte(network.Name))
state.BridgeName = fmt.Sprintf("%s%s", "talos", hex.EncodeToString(networkNameHash[:])[:8])
@ -237,7 +237,7 @@ func getTicksInUsec() (float64, error) {
}
//nolint:gocyclo
func (p *Provisioner) configureNetworkChaos(network provision.NetworkRequest, state *State, options provision.Options) error {
func (p *Provisioner) configureNetworkChaos(network provision.NetworkRequest, state *provision.State, options provision.Options) error {
if (network.Bandwidth != 0) && (network.Latency != 0 || network.Jitter != 0 || network.PacketLoss != 0 || network.PacketReorder != 0 || network.PacketCorrupt != 0) {
return errors.New("bandwidth and other chaos options cannot be used together")
}
@ -344,7 +344,7 @@ func (p *Provisioner) configureNetworkChaos(network provision.NetworkRequest, st
}
// DestroyNetwork destroy bridge interface by name to clean up.
func (p *Provisioner) DestroyNetwork(state *State) error {
func (p *Provisioner) DestroyNetwork(state *provision.State) error {
iface, err := net.InterfaceByName(state.BridgeName)
if err != nil {
return fmt.Errorf("error looking up bridge interface %q: %w", state.BridgeName, err)

View File

@ -6,52 +6,21 @@ package vm
import (
"context"
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
yaml "gopkg.in/yaml.v3"
"github.com/siderolabs/talos/pkg/provision"
)
// Reflect decode state file.
func (p *Provisioner) Reflect(ctx context.Context, clusterName, stateDirectory string) (provision.Cluster, error) {
statePath := filepath.Join(stateDirectory, clusterName)
st, err := os.Stat(statePath)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil, fmt.Errorf("cluster %q not found: %w", clusterName, err)
}
return nil, err
}
if !st.IsDir() {
return nil, fmt.Errorf("state path %q is not a directory: %s", statePath, st.Mode())
}
stateFile, err := os.Open(filepath.Join(statePath, stateFileName))
state, err := provision.ReadState(ctx, clusterName, stateDirectory)
if err != nil {
return nil, err
}
defer stateFile.Close() //nolint:errcheck
state := &State{}
if err = yaml.NewDecoder(stateFile).Decode(state); err != nil {
return nil, fmt.Errorf("error unmarshalling state file: %w", err)
}
if state.ProvisionerName != p.Name {
return nil, fmt.Errorf("cluster %q was created with different provisioner %q", clusterName, state.ProvisionerName)
}
state.statePath = statePath
return state, nil
}

View File

@ -23,7 +23,7 @@ const (
)
// CreateSiderolinkAgent creates siderlink agent.
func (p *Provisioner) CreateSiderolinkAgent(state *State, clusterReq provision.ClusterRequest) error {
func (p *Provisioner) CreateSiderolinkAgent(state *provision.State, clusterReq provision.ClusterRequest) error {
pidPath := state.GetRelativePath(siderolinkAgentPid)
logFile, err := os.OpenFile(state.GetRelativePath(siderolinkAgentLog), os.O_APPEND|os.O_CREATE|os.O_RDWR, 0o666)
@ -80,7 +80,7 @@ func (p *Provisioner) CreateSiderolinkAgent(state *State, clusterReq provision.C
}
// DestroySiderolinkAgent destroys siderolink agent.
func (p *Provisioner) DestroySiderolinkAgent(state *State) error {
func (p *Provisioner) DestroySiderolinkAgent(state *provision.State) error {
pidPath := state.GetRelativePath(siderolinkAgentPid)
if _, err := os.Stat(pidPath); errors.Is(err, os.ErrNotExist) {

View File

@ -2,97 +2,13 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
//nolint:revive
package vm
import (
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
import "github.com/siderolabs/talos/pkg/provision"
"github.com/containernetworking/cni/libcni"
yaml "gopkg.in/yaml.v3"
// Deprecated: Use provision.State instead.
type State = provision.State
"github.com/siderolabs/talos/pkg/provision"
)
// State common state representation for vm provisioners.
type State struct {
ProvisionerName string
BridgeName string
ClusterInfo provision.ClusterInfo
VMCNIConfig *libcni.NetworkConfigList
statePath string
}
// NewState create new vm provisioner state.
func NewState(statePath, provisionerName, clusterName string) (*State, error) {
s := &State{
ProvisionerName: provisionerName,
statePath: statePath,
}
_, err := os.Stat(s.statePath)
if err == nil {
return nil, fmt.Errorf(
"state directory %q already exists, is the cluster %q already running? remove cluster state with talosctl cluster destroy",
s.statePath,
clusterName,
)
}
if !errors.Is(err, fs.ErrNotExist) {
return nil, fmt.Errorf("error checking state directory: %w", err)
}
if err = os.MkdirAll(s.statePath, os.ModePerm); err != nil {
return nil, fmt.Errorf("error creating state directory: %w", err)
}
return s, nil
}
// Provisioner get provisioner name.
func (s *State) Provisioner() string {
return s.ProvisionerName
}
// Info get cluster info.
func (s *State) Info() provision.ClusterInfo {
return s.ClusterInfo
}
// StatePath get state config file path.
func (s *State) StatePath() (string, error) {
if s.statePath == "" {
return "", errors.New("state path is not set")
}
return s.statePath, nil
}
// Save save state to config file.
func (s *State) Save() error {
// save state
stateFile, err := os.Create(filepath.Join(s.statePath, stateFileName))
if err != nil {
return err
}
defer stateFile.Close() //nolint:errcheck
if err = yaml.NewEncoder(stateFile).Encode(&s); err != nil {
return fmt.Errorf("error marshaling state: %w", err)
}
return stateFile.Close()
}
// GetRelativePath get file path relative to config folder.
func (s *State) GetRelativePath(path string) string {
return filepath.Join(s.statePath, path)
}
// Deprecated: Use provision.NewState instead.
var NewState = provision.NewState

View File

@ -5,8 +5,6 @@
// Package vm implements common methods for VM provisioners.
package vm
const stateFileName = "state.yaml"
// Provisioner base for VM provisioners.
type Provisioner struct {
// Name actual provisioner type.

135
pkg/provision/state.go Normal file
View File

@ -0,0 +1,135 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
package provision
import (
"context"
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
"github.com/containernetworking/cni/libcni"
yaml "gopkg.in/yaml.v3"
)
// StateFileName is the name of the yaml state file.
const StateFileName = "state.yaml"
// State common state representation for vm provisioners.
type State struct {
ProvisionerName string
BridgeName string
ClusterInfo ClusterInfo
VMCNIConfig *libcni.NetworkConfigList
statePath string
}
// NewState create new vm provisioner state.
func NewState(statePath, provisionerName, clusterName string) (*State, error) {
s := &State{
ProvisionerName: provisionerName,
statePath: statePath,
}
_, err := os.Stat(s.statePath)
if err == nil {
return nil, fmt.Errorf(
"state directory %q already exists, is the cluster %q already running? remove cluster state with talosctl cluster destroy",
s.statePath,
clusterName,
)
}
if !errors.Is(err, fs.ErrNotExist) {
return nil, fmt.Errorf("error checking state directory: %w", err)
}
if err = os.MkdirAll(s.statePath, os.ModePerm); err != nil {
return nil, fmt.Errorf("error creating state directory: %w", err)
}
return s, nil
}
// ReadState reads and parses the state saved to a file.
func ReadState(ctx context.Context, clusterName, stateDirectory string) (*State, error) {
statePath := filepath.Join(stateDirectory, clusterName)
st, err := os.Stat(statePath)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil, fmt.Errorf("cluster %q not found: %w", clusterName, err)
}
return nil, err
}
if !st.IsDir() {
return nil, fmt.Errorf("state path %q is not a directory: %s", statePath, st.Mode())
}
stateFile, err := os.Open(filepath.Join(statePath, StateFileName))
if err != nil {
return nil, err
}
defer stateFile.Close() //nolint:errcheck
state := &State{}
if err = yaml.NewDecoder(stateFile).Decode(state); err != nil {
return nil, fmt.Errorf("error unmarshalling state file: %w", err)
}
state.statePath = statePath
return state, nil
}
// Provisioner get provisioner name.
func (s *State) Provisioner() string {
return s.ProvisionerName
}
// Info get cluster info.
func (s *State) Info() ClusterInfo {
return s.ClusterInfo
}
// StatePath get state config file path.
func (s *State) StatePath() (string, error) {
if s.statePath == "" {
return "", errors.New("state path is not set")
}
return s.statePath, nil
}
// Save save state to config file.
func (s *State) Save() error {
// save state
stateFile, err := os.Create(filepath.Join(s.statePath, StateFileName))
if err != nil {
return err
}
defer stateFile.Close() //nolint:errcheck
if err = yaml.NewEncoder(stateFile).Encode(&s); err != nil {
return fmt.Errorf("error marshaling state: %w", err)
}
return stateFile.Close()
}
// GetRelativePath get file path relative to config folder.
func (s *State) GetRelativePath(path string) string {
return filepath.Join(s.statePath, path)
}

View File

@ -118,108 +118,12 @@ talosctl cgroups [flags]
* [talosctl](#talosctl) - A CLI for out-of-band management of Kubernetes nodes created by Talos
## talosctl cluster create docker
Create a local Docker based kubernetes cluster
```
talosctl cluster create docker [flags]
```
### Options
```
--config-patch stringArray patch generated machineconfigs (applied to all node types), use @file to read a patch from file
--config-patch-controlplanes stringArray patch generated machineconfigs (applied to 'controlplane' type)
--config-patch-workers stringArray patch generated machineconfigs (applied to 'worker' type)
--cpus-controlplanes string the share of CPUs as fraction for each control plane/VM (default "2.0")
--cpus-workers string the share of CPUs as fraction for each worker/VM (default "2.0")
-p, --exposed-ports string comma-separated list of ports/protocols to expose on init node. Ex -p <hostPort>:<containerPort>/<protocol (tcp or udp)>
-h, --help help for docker
--host-ip string Host IP to forward exposed ports to (default "0.0.0.0")
--image string the talos image to run (default "ghcr.io/siderolabs/talos:latest")
--kubernetes-version string desired kubernetes version to run (default "1.34.1")
--memory-controlplanes string(mb,gb) the limit on memory usage for each control plane/VM (default 2.0GiB)
--memory-workers string(mb,gb) the limit on memory usage for each worker/VM (default 2.0GiB)
--mount mount attach a mount to the container (docker --mount syntax)
--subnet string Docker network subnet CIDR (default "10.5.0.0/24")
--talosconfig-destination string The location to save the generated Talos configuration file to. Defaults to 'TALOSCONFIG' env variable if set, otherwise '$HOME/.talos/config' and '/var/run/secrets/talos.dev/config' in order.
--workers int the number of workers to create (default 1)
```
### Options inherited from parent commands
```
--name string the name of the cluster (default "talos-default")
--state string directory path to store cluster state (default "/home/user/.talos/clusters")
```
### SEE ALSO
* [talosctl cluster create](#talosctl-cluster-create) - Creates a local qemu based cluster for Talos development
## talosctl cluster create qemu
Create a local QEMU based Talos cluster
### Synopsis
Create a local QEMU based Talos cluster
Available presets:
- iso: Configure Talos to boot from an ISO from the Image Factory.
- iso-secureboot: Configure Talos for Secureboot via ISO. Only available on Linux hosts.
- pxe: Configure Talos to boot via PXE from the Image Factory.
- disk-image: Configure Talos to boot from a disk image from the Image Factory.
- maintenance: Skip applying machine configuration and leave the machines in maintenance mode. The machine configuration files are written to the working directory.
Note: exactly one of 'iso', 'iso-secureboot', 'pxe' or 'disk-image' presets must be specified.
```
talosctl cluster create qemu [flags]
```
### Options
```
--cidr string CIDR of the cluster network (default "10.5.0.0/24")
--config-patch stringArray patch generated machineconfigs (applied to all node types), use @file to read a patch from file
--config-patch-controlplanes stringArray patch generated machineconfigs (applied to 'controlplane' type)
--config-patch-workers stringArray patch generated machineconfigs (applied to 'worker' type)
--controlplanes int the number of controlplanes to create (default 1)
--cpus-controlplanes string the share of CPUs as fraction for each control plane/VM (default "2.0")
--cpus-workers string the share of CPUs as fraction for each worker/VM (default "2.0")
--disks disks list of disks to create in format "<driver1>:<size1>" (disks after the first one are added only to worker machines) (default virtio:10GiB,virtio:6GiB)
-h, --help help for qemu
--image-factory-url string image factory url (default "https://factory.talos.dev/")
--kubernetes-version string desired kubernetes version to run (default "1.34.1")
--memory-controlplanes string(mb,gb) the limit on memory usage for each control plane/VM (default 2.0GiB)
--memory-workers string(mb,gb) the limit on memory usage for each worker/VM (default 2.0GiB)
--omni-api-endpoint string the Omni API endpoint (must include a scheme, a port and a join token)
--presets strings list of presets to apply (default [iso])
--schematic-id string image factory schematic id (defaults to an empty schematic)
--talos-version string the desired talos version (default "latest")
--talosconfig-destination string The location to save the generated Talos configuration file to. Defaults to 'TALOSCONFIG' env variable if set, otherwise '$HOME/.talos/config' and '/var/run/secrets/talos.dev/config' in order.
--workers int the number of workers to create (default 1)
```
### Options inherited from parent commands
```
--name string the name of the cluster (default "talos-default")
--state string directory path to store cluster state (default "/home/user/.talos/clusters")
```
### SEE ALSO
* [talosctl cluster create](#talosctl-cluster-create) - Creates a local qemu based cluster for Talos development
## talosctl cluster create
## talosctl cluster create dev
Creates a local qemu based cluster for Talos development
```
talosctl cluster create [flags]
talosctl cluster create dev [flags]
```
### Options
@ -258,7 +162,7 @@ talosctl cluster create [flags]
--extra-disks-drivers strings driver for each extra disk (virtio, ide, ahci, scsi, nvme, megaraid)
--extra-disks-size int default limit on disk size in MB (each VM) (default 5120)
--extra-uefi-search-paths strings additional search paths for UEFI firmware (only applies when UEFI is enabled)
-h, --help help for create
-h, --help help for dev
--image-cache-path string path to image cache
--image-cache-port uint16 port on which to serve image cache (default 5000)
--image-cache-tls-cert-file string path to image cache TLS cert
@ -326,13 +230,103 @@ talosctl cluster create [flags]
### SEE ALSO
* [talosctl cluster](#talosctl-cluster) - A collection of commands for managing local docker-based or QEMU-based clusters
* [talosctl cluster create docker](#talosctl-cluster-create-docker) - Create a local Docker based kubernetes cluster
* [talosctl cluster create qemu](#talosctl-cluster-create-qemu) - Create a local QEMU based Talos cluster
* [talosctl cluster create](#talosctl-cluster-create) - Creates a local qemu based cluster for Talos development
## talosctl cluster create docker
Create a local Docker based kubernetes cluster
```
talosctl cluster create docker [flags]
```
### Options
```
--config-patch stringArray patch generated machineconfigs (applied to all node types), use @file to read a patch from file
--config-patch-controlplanes stringArray patch generated machineconfigs (applied to 'controlplane' type)
--config-patch-workers stringArray patch generated machineconfigs (applied to 'worker' type)
--cpus-controlplanes string the share of CPUs as fraction for each control plane/VM (default "2.0")
--cpus-workers string the share of CPUs as fraction for each worker/VM (default "2.0")
-p, --exposed-ports string comma-separated list of ports/protocols to expose on init node. Ex -p <hostPort>:<containerPort>/<protocol (tcp or udp)>
-h, --help help for docker
--host-ip string Host IP to forward exposed ports to (default "0.0.0.0")
--image string the talos image to run (default "ghcr.io/siderolabs/talos:latest")
--kubernetes-version string desired kubernetes version to run (default "1.34.1")
--memory-controlplanes string(mb,gb) the limit on memory usage for each control plane/VM (default 2.0GiB)
--memory-workers string(mb,gb) the limit on memory usage for each worker/VM (default 2.0GiB)
--mount mount attach a mount to the container (docker --mount syntax)
--subnet string Docker network subnet CIDR (default "10.5.0.0/24")
--talosconfig-destination string The location to save the generated Talos configuration file to. Defaults to 'TALOSCONFIG' env variable if set, otherwise '$HOME/.talos/config' and '/var/run/secrets/talos.dev/config' in order.
--workers int the number of workers to create (default 1)
```
### Options inherited from parent commands
```
--name string the name of the cluster (default "talos-default")
--state string directory path to store cluster state (default "/home/user/.talos/clusters")
```
### SEE ALSO
* [talosctl cluster create](#talosctl-cluster-create) - Creates a local qemu based cluster for Talos development
## talosctl cluster create qemu
Create a local QEMU based Talos cluster
Available presets:
- iso: Configure Talos to boot from an ISO from the Image Factory.
- iso-secureboot: Configure Talos for Secureboot via ISO. Only available on Linux hosts.
- pxe: Configure Talos to boot via PXE from the Image Factory.
- disk-image: Configure Talos to boot from a disk image from the Image Factory.
- maintenance: Skip applying machine configuration and leave the machines in maintenance mode. The machine configuration files are written to the working directory.
Note: exactly one of 'iso', 'iso-secureboot', 'pxe' or 'disk-image' presets must be specified.
```
talosctl cluster create qemu [flags]
```
### Options
```
--cidr string CIDR of the cluster network (default "10.5.0.0/24")
--config-patch stringArray patch generated machineconfigs (applied to all node types), use @file to read a patch from file
--config-patch-controlplanes stringArray patch generated machineconfigs (applied to 'controlplane' type)
--config-patch-workers stringArray patch generated machineconfigs (applied to 'worker' type)
--controlplanes int the number of controlplanes to create (default 1)
--cpus-controlplanes string the share of CPUs as fraction for each control plane/VM (default "2.0")
--cpus-workers string the share of CPUs as fraction for each worker/VM (default "2.0")
--disks disks list of disks to create in format "<driver1>:<size1>" (disks after the first one are added only to worker machines) (default virtio:10GiB,virtio:6GiB)
-h, --help help for qemu
--image-factory-url string image factory url (default "https://factory.talos.dev/")
--kubernetes-version string desired kubernetes version to run (default "1.34.1")
--memory-controlplanes string(mb,gb) the limit on memory usage for each control plane/VM (default 2.0GiB)
--memory-workers string(mb,gb) the limit on memory usage for each worker/VM (default 2.0GiB)
--omni-api-endpoint string the Omni API endpoint (must include a scheme, a port and a join token)
--presets strings list of presets to apply (default [iso])
--schematic-id string image factory schematic id (defaults to an empty schematic)
--talos-version string the desired talos version (default "latest")
--talosconfig-destination string The location to save the generated Talos configuration file to. Defaults to 'TALOSCONFIG' env variable if set, otherwise '$HOME/.talos/config' and '/var/run/secrets/talos.dev/config' in order.
--workers int the number of workers to create (default 1)
```
### Options inherited from parent commands
```
--name string the name of the cluster (default "talos-default")
--state string directory path to store cluster state (default "/home/user/.talos/clusters")
```
### SEE ALSO
* [talosctl cluster create](#talosctl-cluster-create) - Creates a local qemu based cluster for Talos development
## talosctl cluster destroy
Destroys a local docker-based or firecracker-based kubernetes cluster
Destroys a local Talos kubernetes cluster
```
talosctl cluster destroy [flags]
@ -343,7 +337,6 @@ talosctl cluster destroy [flags]
```
-f, --force force deletion of cluster directory if there were errors
-h, --help help for destroy
--provisioner string Talos cluster provisioner to use (default "docker")
--save-cluster-logs-archive-path string save cluster logs archive to the specified file on destroy
--save-support-archive-path string save support archive to the specified file on destroy
```
@ -400,8 +393,7 @@ A collection of commands for managing local docker-based or QEMU-based clusters
### SEE ALSO
* [talosctl](#talosctl) - A CLI for out-of-band management of Kubernetes nodes created by Talos
* [talosctl cluster create](#talosctl-cluster-create) - Creates a local qemu based cluster for Talos development
* [talosctl cluster destroy](#talosctl-cluster-destroy) - Destroys a local docker-based or firecracker-based kubernetes cluster
* [talosctl cluster destroy](#talosctl-cluster-destroy) - Destroys a local Talos kubernetes cluster
* [talosctl cluster show](#talosctl-cluster-show) - Shows info about a local provisioned kubernetes cluster
## talosctl completion