mirror of
https://github.com/siderolabs/talos.git
synced 2025-10-26 14:01:39 +01:00
Uses go-siderolabs/go-blockdevice/v2 for all the hard parts, provides new resource `Disk` which describes all disks in the system. Additional resource `SystemDisk` always point to the system disk (based on the location of `META` partition). The `Disks` API (and `talosctl disks`) provides a view now into the `talosctl get disks` to keep backwards compatibility. QEMU provisioner can now create extra disks of various types: IDE, AHCI, SCSI, NVME, this allows to test detection properly. The new resource will be the foundation for volume provisioning (to pick up the disk to provision the volume on). Example: ``` talosctl -n 172.20.0.5 get disks NODE NAMESPACE TYPE ID VERSION SIZE READ ONLY TRANSPORT ROTATIONAL WWID MODEL SERIAL 172.20.0.5 runtime Disk loop0 1 65568768 true 172.20.0.5 runtime Disk nvme0n1 1 10485760000 false nvme nvme.1b36-6465616462656566-51454d55204e564d65204374726c-00000001 QEMU NVMe Ctrl deadbeef 172.20.0.5 runtime Disk sda 1 10485760000 false virtio true QEMU HARDDISK 172.20.0.5 runtime Disk sdb 1 10485760000 false sata true t10.ATA QEMU HARDDISK QM00013 QEMU HARDDISK 172.20.0.5 runtime Disk sdc 1 10485760000 false sata true t10.ATA QEMU HARDDISK QM00001 QEMU HARDDISK 172.20.0.5 runtime Disk vda 1 12884901888 false virtio true ``` Signed-off-by: Andrey Smirnov <andrey.smirnov@siderolabs.com>
35 lines
861 B
Go
35 lines
861 B
Go
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
|
|
//go:build integration_cli
|
|
|
|
package cli
|
|
|
|
import (
|
|
"github.com/siderolabs/talos/internal/integration/base"
|
|
)
|
|
|
|
// DisksSuite verifies dmesg command.
|
|
type DisksSuite struct {
|
|
base.CLISuite
|
|
}
|
|
|
|
// SuiteName ...
|
|
func (suite *DisksSuite) SuiteName() string {
|
|
return "cli.DisksSuite"
|
|
}
|
|
|
|
// TestSuccess runs comand with success.
|
|
func (suite *DisksSuite) TestSuccess() {
|
|
if suite.Cluster != nil && suite.Cluster.Provisioner() == "docker" {
|
|
suite.T().Skip("docker provisioner doesn't support disks command")
|
|
}
|
|
|
|
suite.RunCLI([]string{"disks", "--nodes", suite.RandomDiscoveredNodeInternalIP()})
|
|
}
|
|
|
|
func init() {
|
|
allSuites = append(allSuites, new(DisksSuite))
|
|
}
|