mirror of
https://github.com/siderolabs/talos.git
synced 2025-11-28 22:21:34 +01:00
chore: simplify code
- replace `interface{}` with `any` using `gofmt -r 'interface{} -> any -w'`
- replace `a = []T{}` with `var a []T` where possible.
- replace `a = []T{}` with `a = make([]T, 0, len(b))` where possible.
Signed-off-by: Dmitriy Matrenichev <dmitry.matrenichev@siderolabs.com>
This commit is contained in:
parent
963612bcca
commit
dad9c40c73
@ -129,7 +129,7 @@ func NewManifest(mode Mode, uefiOnlyBoot bool, bootLoaderPresent bool, opts *Opt
|
||||
manifest.Targets[opts.Disk] = []*Target{}
|
||||
}
|
||||
|
||||
targets := []*Target{}
|
||||
var targets []*Target
|
||||
|
||||
// create GRUB BIOS+UEFI partitions, or only one big EFI partition if not using GRUB
|
||||
if !uefiOnlyBoot {
|
||||
|
||||
@ -29,7 +29,7 @@ var dhcpdLaunchCmd = &cobra.Command{
|
||||
Args: cobra.NoArgs,
|
||||
Hidden: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ips := []net.IP{}
|
||||
var ips []net.IP
|
||||
|
||||
for _, ip := range strings.Split(dhcpdLaunchCmdFlags.addr, ",") {
|
||||
ips = append(ips, net.ParseIP(ip))
|
||||
|
||||
@ -49,7 +49,7 @@ var genCSRCmd = &cobra.Command{
|
||||
return fmt.Errorf("error parsing ECDSA key: %s", err)
|
||||
}
|
||||
|
||||
opts := []x509.Option{}
|
||||
var opts []x509.Option
|
||||
|
||||
parsed := net.ParseIP(genCSRCmdFlags.ip)
|
||||
if parsed == nil {
|
||||
|
||||
@ -20,14 +20,14 @@ var genKeypairCmdFlags struct {
|
||||
organization string
|
||||
}
|
||||
|
||||
// genKeypairCmd represents the `gen keypair` command.
|
||||
var genKeypairCmd = &cobra.Command{
|
||||
Use: "keypair",
|
||||
Short: "Generates an X.509 Ed25519 key pair",
|
||||
Long: ``,
|
||||
Args: cobra.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts := []x509.Option{}
|
||||
var opts []x509.Option
|
||||
|
||||
if genKeypairCmdFlags.ip != "" {
|
||||
parsed := net.ParseIP(genKeypairCmdFlags.ip)
|
||||
if parsed == nil {
|
||||
|
||||
@ -92,7 +92,7 @@ func printDisks(ctx context.Context, c *client.Client) error {
|
||||
}
|
||||
}
|
||||
|
||||
args := []interface{}{}
|
||||
var args []any
|
||||
|
||||
if node != "" {
|
||||
args = append(args, node)
|
||||
@ -110,7 +110,7 @@ func printDisks(ctx context.Context, c *client.Client) error {
|
||||
isSystemDisk = "*"
|
||||
}
|
||||
|
||||
args = append(args, []interface{}{
|
||||
args = append(args, []any{
|
||||
getWithPlaceholder(disk.DeviceName),
|
||||
getWithPlaceholder(disk.Model),
|
||||
getWithPlaceholder(disk.Serial),
|
||||
|
||||
@ -89,7 +89,7 @@ var duCmd = &cobra.Command{
|
||||
|
||||
size := stringifySize(info.Size)
|
||||
|
||||
args := []interface{}{
|
||||
args := []any{
|
||||
size, info.RelativeName,
|
||||
}
|
||||
|
||||
@ -109,7 +109,7 @@ var duCmd = &cobra.Command{
|
||||
|
||||
if multipleNodes {
|
||||
pattern = "%s\t%s\t%s\n"
|
||||
args = append([]interface{}{node}, args...)
|
||||
args = append([]any{node}, args...)
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, pattern, args...)
|
||||
|
||||
@ -67,12 +67,12 @@ func displayAlarms(messages []alarmMessage) error {
|
||||
fmt.Fprintln(w, header)
|
||||
}
|
||||
|
||||
args := []interface{}{
|
||||
args := []any{
|
||||
etcdresource.FormatMemberID(alarm.GetMemberId()),
|
||||
alarm.GetAlarm().String(),
|
||||
}
|
||||
if node != "" {
|
||||
args = append([]interface{}{node}, args...)
|
||||
args = append([]any{node}, args...)
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, pattern, args...)
|
||||
@ -233,7 +233,7 @@ var etcdMemberListCmd = &cobra.Command{
|
||||
}
|
||||
}
|
||||
|
||||
args := []interface{}{
|
||||
args := []any{
|
||||
etcdresource.FormatMemberID(member.Id),
|
||||
member.Hostname,
|
||||
strings.Join(member.PeerUrls, ","),
|
||||
@ -241,7 +241,7 @@ var etcdMemberListCmd = &cobra.Command{
|
||||
member.IsLearner,
|
||||
}
|
||||
if node != "" {
|
||||
args = append([]interface{}{node}, args...)
|
||||
args = append([]any{node}, args...)
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, pattern, args...)
|
||||
@ -292,7 +292,7 @@ var etcdStatusCmd = &cobra.Command{
|
||||
ratio = float64(message.GetMemberStatus().GetDbSizeInUse()) / float64(message.GetMemberStatus().GetDbSize()) * 100.0
|
||||
}
|
||||
|
||||
args := []interface{}{
|
||||
args := []any{
|
||||
etcdresource.FormatMemberID(message.GetMemberStatus().GetMemberId()),
|
||||
humanize.Bytes(uint64(message.GetMemberStatus().GetDbSize())),
|
||||
humanize.Bytes(uint64(message.GetMemberStatus().GetDbSizeInUse())),
|
||||
@ -305,7 +305,7 @@ var etcdStatusCmd = &cobra.Command{
|
||||
strings.Join(message.GetMemberStatus().GetErrors(), ", "),
|
||||
}
|
||||
if node != "" {
|
||||
args = append([]interface{}{node}, args...)
|
||||
args = append([]any{node}, args...)
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, pattern, args...)
|
||||
|
||||
@ -38,7 +38,7 @@ var eventsCmd = &cobra.Command{
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
|
||||
fmt.Fprintln(w, "NODE\tID\tEVENT\tACTOR\tSOURCE\tMESSAGE")
|
||||
|
||||
opts := []client.EventsOptionFunc{}
|
||||
var opts []client.EventsOptionFunc
|
||||
|
||||
if eventsCmdFlags.tailEvents != 0 {
|
||||
opts = append(opts, client.WithTailEvents(eventsCmdFlags.tailEvents))
|
||||
@ -73,30 +73,30 @@ var eventsCmd = &cobra.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
var args []interface{}
|
||||
var args []any
|
||||
|
||||
switch msg := event.Payload.(type) {
|
||||
case *machine.SequenceEvent:
|
||||
args = []interface{}{msg.GetSequence()}
|
||||
args = []any{msg.GetSequence()}
|
||||
if msg.Error != nil {
|
||||
args = append(args, "error:"+" "+msg.GetError().GetMessage())
|
||||
} else {
|
||||
args = append(args, msg.GetAction().String())
|
||||
}
|
||||
case *machine.PhaseEvent:
|
||||
args = []interface{}{msg.GetPhase(), msg.GetAction().String()}
|
||||
args = []any{msg.GetPhase(), msg.GetAction().String()}
|
||||
case *machine.TaskEvent:
|
||||
args = []interface{}{msg.GetTask(), msg.GetAction().String()}
|
||||
args = []any{msg.GetTask(), msg.GetAction().String()}
|
||||
case *machine.ServiceStateEvent:
|
||||
args = []interface{}{msg.GetService(), fmt.Sprintf("%s: %s", msg.GetAction(), msg.GetMessage())}
|
||||
args = []any{msg.GetService(), fmt.Sprintf("%s: %s", msg.GetAction(), msg.GetMessage())}
|
||||
case *machine.ConfigLoadErrorEvent:
|
||||
args = []interface{}{"error", msg.GetError()}
|
||||
args = []any{"error", msg.GetError()}
|
||||
case *machine.ConfigValidationErrorEvent:
|
||||
args = []interface{}{"error", msg.GetError()}
|
||||
args = []any{"error", msg.GetError()}
|
||||
case *machine.AddressEvent:
|
||||
args = []interface{}{msg.GetHostname(), fmt.Sprintf("ADDRESSES: %s", strings.Join(msg.GetAddresses(), ","))}
|
||||
args = []any{msg.GetHostname(), fmt.Sprintf("ADDRESSES: %s", strings.Join(msg.GetAddresses(), ","))}
|
||||
case *machine.MachineStatusEvent:
|
||||
args = []interface{}{
|
||||
args = []any{
|
||||
msg.GetStage().String(),
|
||||
fmt.Sprintf("ready: %v, unmet conditions: %v",
|
||||
msg.GetStatus().Ready,
|
||||
@ -109,7 +109,7 @@ var eventsCmd = &cobra.Command{
|
||||
}
|
||||
}
|
||||
|
||||
args = append([]interface{}{event.Node, event.ID, event.TypeURL, event.ActorID}, args...)
|
||||
args = append([]any{event.Node, event.ID, event.TypeURL, event.ActorID}, args...)
|
||||
fmt.Fprintf(w, format, args...)
|
||||
|
||||
return w.Flush()
|
||||
|
||||
@ -281,7 +281,7 @@ func (n *netstat) printNetstat(response *machine.NetstatResponse) error {
|
||||
}
|
||||
}
|
||||
|
||||
args := []interface{}{}
|
||||
var args []any
|
||||
|
||||
if node != "" {
|
||||
args = append(args, node)
|
||||
@ -292,7 +292,7 @@ func (n *netstat) printNetstat(response *machine.NetstatResponse) error {
|
||||
state = record.State.String()
|
||||
}
|
||||
|
||||
args = append(args, []interface{}{
|
||||
args = append(args, []any{
|
||||
record.L4Proto,
|
||||
strconv.FormatUint(record.Rxqueue, 10),
|
||||
strconv.FormatUint(record.Txqueue, 10),
|
||||
@ -302,7 +302,7 @@ func (n *netstat) printNetstat(response *machine.NetstatResponse) error {
|
||||
}...)
|
||||
|
||||
if netstatCmdFlags.extend {
|
||||
args = append(args, []interface{}{
|
||||
args = append(args, []any{
|
||||
strconv.FormatUint(uint64(record.Uid), 10),
|
||||
strconv.FormatUint(record.Inode, 10),
|
||||
}...)
|
||||
@ -310,11 +310,11 @@ func (n *netstat) printNetstat(response *machine.NetstatResponse) error {
|
||||
|
||||
if netstatCmdFlags.pid {
|
||||
if record.Process.Pid != 0 {
|
||||
args = append(args, []interface{}{
|
||||
args = append(args, []any{
|
||||
fmt.Sprintf("%d/%s", record.Process.Pid, record.Process.Name),
|
||||
}...)
|
||||
} else {
|
||||
args = append(args, []interface{}{
|
||||
args = append(args, []any{
|
||||
"-",
|
||||
}...)
|
||||
}
|
||||
@ -322,11 +322,11 @@ func (n *netstat) printNetstat(response *machine.NetstatResponse) error {
|
||||
|
||||
if netstatCmdFlags.pods {
|
||||
if record.Netns == "" || node == "" || n.NodeNetNSPods[node] == nil {
|
||||
args = append(args, []interface{}{
|
||||
args = append(args, []any{
|
||||
"-",
|
||||
}...)
|
||||
} else {
|
||||
args = append(args, []interface{}{
|
||||
args = append(args, []any{
|
||||
n.NodeNetNSPods[node][record.Netns],
|
||||
}...)
|
||||
}
|
||||
@ -335,7 +335,7 @@ func (n *netstat) printNetstat(response *machine.NetstatResponse) error {
|
||||
if netstatCmdFlags.timers {
|
||||
timerwhen := strconv.FormatFloat(float64(record.Timerwhen)/100, 'f', 2, 64)
|
||||
|
||||
args = append(args, []interface{}{
|
||||
args = append(args, []any{
|
||||
fmt.Sprintf("%s (%s/%d/%d)", strings.ToLower(record.Tr.String()), timerwhen, record.Retrnsmt, record.Timeout),
|
||||
}...)
|
||||
}
|
||||
|
||||
@ -38,7 +38,7 @@ func (j *JSON) WriteHeader(definition *meta.ResourceDefinition, withEvents bool)
|
||||
}
|
||||
|
||||
// prepareEncodableData prepares the data of a resource to be encoded as JSON and populates it with some extra information.
|
||||
func (j *JSON) prepareEncodableData(node string, r resource.Resource, event state.EventType) (map[string]interface{}, error) {
|
||||
func (j *JSON) prepareEncodableData(node string, r resource.Resource, event state.EventType) (map[string]any, error) {
|
||||
if r.Metadata().Type() == config.MachineConfigType {
|
||||
r = &mcYamlRepr{r}
|
||||
}
|
||||
@ -53,7 +53,7 @@ func (j *JSON) prepareEncodableData(node string, r resource.Resource, event stat
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var data map[string]interface{}
|
||||
var data map[string]any
|
||||
|
||||
err = yaml.Unmarshal(yamlBytes, &data)
|
||||
if err != nil {
|
||||
@ -69,7 +69,7 @@ func (j *JSON) prepareEncodableData(node string, r resource.Resource, event stat
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func writeAsIndentedJSON(wr io.Writer, data interface{}) error {
|
||||
func writeAsIndentedJSON(wr io.Writer, data any) error {
|
||||
enc := json.NewEncoder(wr)
|
||||
enc.SetIndent("", " ")
|
||||
|
||||
|
||||
@ -27,7 +27,7 @@ type Table struct {
|
||||
dynamicColumns []dynamicColumn
|
||||
}
|
||||
|
||||
type dynamicColumn func(value interface{}) (string, error)
|
||||
type dynamicColumn func(value any) (string, error)
|
||||
|
||||
// NewTable initializes table resource output.
|
||||
func NewTable(writer io.Writer) *Table {
|
||||
@ -60,7 +60,7 @@ func (table *Table) WriteHeader(definition *meta.ResourceDefinition, withEvents
|
||||
|
||||
expr = expr.AllowMissingKeys(true)
|
||||
|
||||
table.dynamicColumns = append(table.dynamicColumns, func(val interface{}) (string, error) {
|
||||
table.dynamicColumns = append(table.dynamicColumns, func(val any) (string, error) {
|
||||
var buf bytes.Buffer
|
||||
|
||||
if e := expr.Execute(&buf, val); e != nil {
|
||||
@ -104,7 +104,7 @@ func (table *Table) WriteResource(node string, r resource.Resource, event state.
|
||||
return err
|
||||
}
|
||||
|
||||
var unstructured interface{}
|
||||
var unstructured any
|
||||
|
||||
if err = yaml.Unmarshal(yml, &unstructured); err != nil {
|
||||
return err
|
||||
|
||||
@ -188,7 +188,7 @@ func processesOutput(ctx context.Context, c *client.Client) (output string, err
|
||||
|
||||
defaultNode := client.AddrFromPeer(&remotePeer)
|
||||
|
||||
s := []string{}
|
||||
var s []string
|
||||
|
||||
s = append(s, "NODE | PID | STATE | THREADS | CPU-TIME | VIRTMEM | RESMEM | LABEL | COMMAND")
|
||||
|
||||
|
||||
@ -61,7 +61,8 @@ func NewWireguardConfigBundle(ips []netip.Addr, wireguardCidr string, listenPort
|
||||
|
||||
config := &v1alpha1.DeviceWireguardConfig{}
|
||||
|
||||
currentPeers := []*v1alpha1.DeviceWireguardPeer{}
|
||||
var currentPeers []*v1alpha1.DeviceWireguardPeer
|
||||
|
||||
// add all peers except self
|
||||
for _, peer := range peers {
|
||||
if peer.PublicKey() != keys[i].PublicKey().String() {
|
||||
|
||||
@ -18,7 +18,7 @@ func AppendErrors(err error, errs ...error) error {
|
||||
res := multierror.Append(err, errs...)
|
||||
|
||||
res.ErrorFormat = func(errs []error) string {
|
||||
lines := []string{}
|
||||
var lines []string
|
||||
|
||||
for _, err := range errs {
|
||||
lines = append(lines, fmt.Sprintf(" %s", err.Error()))
|
||||
|
||||
@ -65,7 +65,8 @@ func removeComments(node *yaml.Node) {
|
||||
}
|
||||
|
||||
func stripManual(b []byte) []byte {
|
||||
stripped := []byte{}
|
||||
var stripped []byte
|
||||
|
||||
lines := bytes.Split(b, []byte("\n"))
|
||||
|
||||
for i, line := range lines {
|
||||
|
||||
@ -36,7 +36,7 @@ func GetAWSDefaultRegions() ([]string, error) {
|
||||
return nil, fmt.Errorf("failed getting list of regions: %w", err)
|
||||
}
|
||||
|
||||
regions := []string{}
|
||||
var regions []string
|
||||
|
||||
for _, r := range result.Regions {
|
||||
if r.OptInStatus != nil {
|
||||
|
||||
@ -198,7 +198,8 @@ type aliasType struct {
|
||||
}
|
||||
|
||||
func collectStructs(node ast.Node) ([]*structType, map[string]aliasType) {
|
||||
structs := []*structType{}
|
||||
var structs []*structType
|
||||
|
||||
aliases := map[string]aliasType{}
|
||||
|
||||
collectStructs := func(n ast.Node) bool {
|
||||
@ -306,7 +307,7 @@ func parseComment(comment []byte) *Text {
|
||||
return text
|
||||
}
|
||||
|
||||
func getFieldType(p interface{}) string {
|
||||
func getFieldType(p any) string {
|
||||
if m, ok := p.(*ast.MapType); ok {
|
||||
return getFieldType(m.Value)
|
||||
}
|
||||
@ -325,7 +326,7 @@ func getFieldType(p interface{}) string {
|
||||
}
|
||||
}
|
||||
|
||||
func formatFieldType(p interface{}) string {
|
||||
func formatFieldType(p any) string {
|
||||
if m, ok := p.(*ast.MapType); ok {
|
||||
return fmt.Sprintf("map[%s]%s", formatFieldType(m.Key), formatFieldType(m.Value))
|
||||
}
|
||||
|
||||
@ -145,7 +145,7 @@ func mountRootFS() error {
|
||||
image string
|
||||
}
|
||||
|
||||
layers := []layer{}
|
||||
var layers []layer
|
||||
|
||||
squashfs := mount.NewMountPoints()
|
||||
|
||||
|
||||
@ -1055,7 +1055,8 @@ func (s *Server) Mounts(ctx context.Context, in *emptypb.Empty) (reply *machine.
|
||||
multiErr *multierror.Error
|
||||
)
|
||||
|
||||
stats := []*machine.MountStat{}
|
||||
var stats []*machine.MountStat
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
|
||||
for scanner.Scan() {
|
||||
@ -1409,7 +1410,7 @@ func (s *Server) Containers(ctx context.Context, in *machine.ContainersRequest)
|
||||
log.Println(err.Error())
|
||||
}
|
||||
|
||||
containers := []*machine.ContainerInfo{}
|
||||
var containers []*machine.ContainerInfo
|
||||
|
||||
for _, pod := range pods {
|
||||
for _, container := range pod.Containers {
|
||||
@ -1457,7 +1458,7 @@ func (s *Server) Stats(ctx context.Context, in *machine.StatsRequest) (reply *ma
|
||||
log.Println(err.Error())
|
||||
}
|
||||
|
||||
stats := []*machine.Stat{}
|
||||
var stats []*machine.Stat
|
||||
|
||||
for _, pod := range pods {
|
||||
for _, container := range pod.Containers {
|
||||
|
||||
@ -46,7 +46,7 @@ func (a staticPod) SetPod(podSpec *v1.Pod) error {
|
||||
return err
|
||||
}
|
||||
|
||||
a.StaticPod.TypedSpec().Pod = map[string]interface{}{}
|
||||
a.StaticPod.TypedSpec().Pod = map[string]any{}
|
||||
|
||||
return json.Unmarshal(jsonSerialized, &a.StaticPod.TypedSpec().Pod)
|
||||
}
|
||||
|
||||
@ -32,7 +32,7 @@ func (a staticPodStatus) SetStatus(status *v1.PodStatus) error {
|
||||
return err
|
||||
}
|
||||
|
||||
a.StaticPodStatus.TypedSpec().PodStatus = map[string]interface{}{}
|
||||
a.StaticPodStatus.TypedSpec().PodStatus = map[string]any{}
|
||||
|
||||
return json.Unmarshal(jsonSerialized, &a.StaticPodStatus.TypedSpec().PodStatus)
|
||||
}
|
||||
|
||||
@ -93,7 +93,7 @@ func (ctrl *NodeIdentityController) Run(ctx context.Context, r controller.Runtim
|
||||
|
||||
var localIdentity cluster.IdentitySpec
|
||||
|
||||
if err := controllers.LoadOrNewFromFile(filepath.Join(ctrl.StatePath, constants.NodeIdentityFilename), &localIdentity, func(v interface{}) error {
|
||||
if err := controllers.LoadOrNewFromFile(filepath.Join(ctrl.StatePath, constants.NodeIdentityFilename), &localIdentity, func(v any) error {
|
||||
return clusteradapter.IdentitySpec(v.(*cluster.IdentitySpec)).Generate()
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error caching node identity: %w", err)
|
||||
|
||||
@ -28,17 +28,17 @@ func (suite *CRISeccompProfileFileSuite) TestReconcileSeccompProfileFile() {
|
||||
|
||||
for _, tt := range []struct {
|
||||
seccompProfileName string
|
||||
seccompProfileValue map[string]interface{}
|
||||
seccompProfileValue map[string]any
|
||||
}{
|
||||
{
|
||||
seccompProfileName: "audit.json",
|
||||
seccompProfileValue: map[string]interface{}{
|
||||
seccompProfileValue: map[string]any{
|
||||
"defaultAction": "SCMP_ACT_LOG",
|
||||
},
|
||||
},
|
||||
{
|
||||
seccompProfileName: "deny.json",
|
||||
seccompProfileValue: map[string]interface{}{
|
||||
seccompProfileValue: map[string]any{
|
||||
"defaultAction": "SCMP_ACT_ERRNO",
|
||||
},
|
||||
},
|
||||
|
||||
@ -27,7 +27,7 @@ func (suite *CRISeccompProfileSuite) TestReconcileSeccompProfile() {
|
||||
{
|
||||
MachineSeccompProfileName: "audit.json",
|
||||
MachineSeccompProfileValue: v1alpha1.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
Object: map[string]any{
|
||||
"defaultAction": "SCMP_ACT_LOG",
|
||||
},
|
||||
},
|
||||
@ -35,7 +35,7 @@ func (suite *CRISeccompProfileSuite) TestReconcileSeccompProfile() {
|
||||
{
|
||||
MachineSeccompProfileName: "deny.json",
|
||||
MachineSeccompProfileValue: v1alpha1.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
Object: map[string]any{
|
||||
"defaultAction": "SCMP_ACT_ERRNO",
|
||||
},
|
||||
},
|
||||
@ -48,17 +48,17 @@ func (suite *CRISeccompProfileSuite) TestReconcileSeccompProfile() {
|
||||
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
value map[string]interface{}
|
||||
value map[string]any
|
||||
}{
|
||||
{
|
||||
name: "audit.json",
|
||||
value: map[string]interface{}{
|
||||
value: map[string]any{
|
||||
"defaultAction": "SCMP_ACT_LOG",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deny.json",
|
||||
value: map[string]interface{}{
|
||||
value: map[string]any{
|
||||
"defaultAction": "SCMP_ACT_ERRNO",
|
||||
},
|
||||
},
|
||||
@ -92,7 +92,7 @@ func (suite *CRISeccompProfileSuite) TestReconcileSeccompProfile() {
|
||||
{
|
||||
MachineSeccompProfileName: "audit.json",
|
||||
MachineSeccompProfileValue: v1alpha1.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
Object: map[string]any{
|
||||
"defaultAction": "SCMP_ACT_LOG",
|
||||
},
|
||||
},
|
||||
|
||||
@ -28,7 +28,7 @@ type EtcFileController struct {
|
||||
ShadowPath string
|
||||
|
||||
// Cache of bind mounts created.
|
||||
bindMounts map[string]interface{}
|
||||
bindMounts map[string]any
|
||||
}
|
||||
|
||||
// Name implements controller.Controller interface.
|
||||
@ -62,7 +62,7 @@ func (ctrl *EtcFileController) Outputs() []controller.Output {
|
||||
//nolint:gocyclo,cyclop
|
||||
func (ctrl *EtcFileController) Run(ctx context.Context, r controller.Runtime, logger *zap.Logger) error {
|
||||
if ctrl.bindMounts == nil {
|
||||
ctrl.bindMounts = make(map[string]interface{})
|
||||
ctrl.bindMounts = make(map[string]any)
|
||||
}
|
||||
|
||||
for {
|
||||
|
||||
@ -175,7 +175,7 @@ func NewCRDController(
|
||||
|
||||
if _, err = informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: controller.enqueueTalosSA,
|
||||
UpdateFunc: func(oldTalosSA, newTalosSA interface{}) {
|
||||
UpdateFunc: func(oldTalosSA, newTalosSA any) {
|
||||
controller.enqueueTalosSA(newTalosSA)
|
||||
},
|
||||
}); err != nil {
|
||||
@ -184,7 +184,7 @@ func NewCRDController(
|
||||
|
||||
if _, err = secrets.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: controller.handleSecret,
|
||||
UpdateFunc: func(oldSec, newSec interface{}) {
|
||||
UpdateFunc: func(oldSec, newSec any) {
|
||||
newSecret := newSec.(*corev1.Secret) //nolint:errcheck
|
||||
oldSecret := oldSec.(*corev1.Secret) //nolint:errcheck
|
||||
|
||||
@ -259,7 +259,7 @@ func (t *CRDController) processNextWorkItem(ctx context.Context) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
err := func(obj interface{}) error {
|
||||
err := func(obj any) error {
|
||||
defer t.queue.Done(obj)
|
||||
|
||||
var key string
|
||||
@ -428,7 +428,7 @@ func (t *CRDController) syncHandler(ctx context.Context, key string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *CRDController) enqueueTalosSA(obj interface{}) {
|
||||
func (t *CRDController) enqueueTalosSA(obj any) {
|
||||
key, err := cache.MetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
@ -439,7 +439,7 @@ func (t *CRDController) enqueueTalosSA(obj interface{}) {
|
||||
t.queue.Add(key)
|
||||
}
|
||||
|
||||
func (t *CRDController) handleSecret(obj interface{}) {
|
||||
func (t *CRDController) handleSecret(obj any) {
|
||||
var object metav1.Object
|
||||
|
||||
var ok bool
|
||||
|
||||
@ -107,7 +107,7 @@ func (ctrl *IdentityController) Run(ctx context.Context, r controller.Runtime, l
|
||||
if cfg != nil && firstMAC != nil && cfg.(*kubespan.Config).TypedSpec().Enabled {
|
||||
var localIdentity kubespan.IdentitySpec
|
||||
|
||||
if err = controllers.LoadOrNewFromFile(filepath.Join(ctrl.StatePath, constants.KubeSpanIdentityFilename), &localIdentity, func(v interface{}) error {
|
||||
if err = controllers.LoadOrNewFromFile(filepath.Join(ctrl.StatePath, constants.KubeSpanIdentityFilename), &localIdentity, func(v any) error {
|
||||
return kubespanadapter.IdentitySpec(v.(*kubespan.IdentitySpec)).GenerateKey()
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error caching kubespan identity: %w", err)
|
||||
|
||||
@ -70,7 +70,7 @@ func (ctrl *AddressEventController) Run(ctx context.Context, r controller.Runtim
|
||||
case <-r.EventCh():
|
||||
}
|
||||
|
||||
addresses := []string{}
|
||||
var addresses []string
|
||||
|
||||
nodeAddr, err := r.Get(
|
||||
ctx,
|
||||
|
||||
@ -156,7 +156,7 @@ func (suite *LinkStatusSuite) TestInterfaceHwInfo() {
|
||||
continue
|
||||
}
|
||||
|
||||
emptyFields := []string{}
|
||||
var emptyFields []string
|
||||
|
||||
for key, value := range map[string]string{
|
||||
"hw addr": spec.HardwareAddr.String(),
|
||||
|
||||
@ -217,18 +217,18 @@ func (ctrl *PlatformConfigController) apply(ctx context.Context, r controller.Ru
|
||||
// handle all network specs in a loop as all specs can be handled in a similar way
|
||||
for _, specType := range []struct {
|
||||
length int
|
||||
getter func(i int) interface{}
|
||||
idBuilder func(spec interface{}) (resource.ID, error)
|
||||
getter func(i int) any
|
||||
idBuilder func(spec any) (resource.ID, error)
|
||||
resourceBuilder func(id string) resource.Resource
|
||||
resourceModifier func(newSpec interface{}) func(r resource.Resource) error
|
||||
resourceModifier func(newSpec any) func(r resource.Resource) error
|
||||
}{
|
||||
// AddressSpec
|
||||
{
|
||||
length: len(networkConfig.Addresses),
|
||||
getter: func(i int) interface{} {
|
||||
getter: func(i int) any {
|
||||
return networkConfig.Addresses[i]
|
||||
},
|
||||
idBuilder: func(spec interface{}) (resource.ID, error) {
|
||||
idBuilder: func(spec any) (resource.ID, error) {
|
||||
addressSpec := spec.(network.AddressSpecSpec) //nolint:errcheck,forcetypeassert
|
||||
|
||||
return network.LayeredID(network.ConfigPlatform, network.AddressID(addressSpec.LinkName, addressSpec.Address)), nil
|
||||
@ -236,7 +236,7 @@ func (ctrl *PlatformConfigController) apply(ctx context.Context, r controller.Ru
|
||||
resourceBuilder: func(id string) resource.Resource {
|
||||
return network.NewAddressSpec(network.ConfigNamespaceName, id)
|
||||
},
|
||||
resourceModifier: func(newSpec interface{}) func(r resource.Resource) error {
|
||||
resourceModifier: func(newSpec any) func(r resource.Resource) error {
|
||||
return func(r resource.Resource) error {
|
||||
spec := r.(*network.AddressSpec).TypedSpec()
|
||||
|
||||
@ -250,10 +250,10 @@ func (ctrl *PlatformConfigController) apply(ctx context.Context, r controller.Ru
|
||||
// LinkSpec
|
||||
{
|
||||
length: len(networkConfig.Links),
|
||||
getter: func(i int) interface{} {
|
||||
getter: func(i int) any {
|
||||
return networkConfig.Links[i]
|
||||
},
|
||||
idBuilder: func(spec interface{}) (resource.ID, error) {
|
||||
idBuilder: func(spec any) (resource.ID, error) {
|
||||
linkSpec := spec.(network.LinkSpecSpec) //nolint:errcheck,forcetypeassert
|
||||
|
||||
return network.LayeredID(network.ConfigPlatform, network.LinkID(linkSpec.Name)), nil
|
||||
@ -261,7 +261,7 @@ func (ctrl *PlatformConfigController) apply(ctx context.Context, r controller.Ru
|
||||
resourceBuilder: func(id string) resource.Resource {
|
||||
return network.NewLinkSpec(network.ConfigNamespaceName, id)
|
||||
},
|
||||
resourceModifier: func(newSpec interface{}) func(r resource.Resource) error {
|
||||
resourceModifier: func(newSpec any) func(r resource.Resource) error {
|
||||
return func(r resource.Resource) error {
|
||||
spec := r.(*network.LinkSpec).TypedSpec()
|
||||
|
||||
@ -275,10 +275,10 @@ func (ctrl *PlatformConfigController) apply(ctx context.Context, r controller.Ru
|
||||
// RouteSpec
|
||||
{
|
||||
length: len(networkConfig.Routes),
|
||||
getter: func(i int) interface{} {
|
||||
getter: func(i int) any {
|
||||
return networkConfig.Routes[i]
|
||||
},
|
||||
idBuilder: func(spec interface{}) (resource.ID, error) {
|
||||
idBuilder: func(spec any) (resource.ID, error) {
|
||||
routeSpec := spec.(network.RouteSpecSpec) //nolint:errcheck,forcetypeassert
|
||||
|
||||
return network.LayeredID(
|
||||
@ -289,7 +289,7 @@ func (ctrl *PlatformConfigController) apply(ctx context.Context, r controller.Ru
|
||||
resourceBuilder: func(id string) resource.Resource {
|
||||
return network.NewRouteSpec(network.ConfigNamespaceName, id)
|
||||
},
|
||||
resourceModifier: func(newSpec interface{}) func(r resource.Resource) error {
|
||||
resourceModifier: func(newSpec any) func(r resource.Resource) error {
|
||||
return func(r resource.Resource) error {
|
||||
spec := r.(*network.RouteSpec).TypedSpec()
|
||||
|
||||
@ -303,16 +303,16 @@ func (ctrl *PlatformConfigController) apply(ctx context.Context, r controller.Ru
|
||||
// HostnameSpec
|
||||
{
|
||||
length: len(networkConfig.Hostnames),
|
||||
getter: func(i int) interface{} {
|
||||
getter: func(i int) any {
|
||||
return networkConfig.Hostnames[i]
|
||||
},
|
||||
idBuilder: func(spec interface{}) (resource.ID, error) {
|
||||
idBuilder: func(spec any) (resource.ID, error) {
|
||||
return network.LayeredID(network.ConfigPlatform, network.HostnameID), nil
|
||||
},
|
||||
resourceBuilder: func(id string) resource.Resource {
|
||||
return network.NewHostnameSpec(network.ConfigNamespaceName, id)
|
||||
},
|
||||
resourceModifier: func(newSpec interface{}) func(r resource.Resource) error {
|
||||
resourceModifier: func(newSpec any) func(r resource.Resource) error {
|
||||
return func(r resource.Resource) error {
|
||||
spec := r.(*network.HostnameSpec).TypedSpec()
|
||||
|
||||
@ -326,16 +326,16 @@ func (ctrl *PlatformConfigController) apply(ctx context.Context, r controller.Ru
|
||||
// ResolverSpec
|
||||
{
|
||||
length: len(networkConfig.Resolvers),
|
||||
getter: func(i int) interface{} {
|
||||
getter: func(i int) any {
|
||||
return networkConfig.Resolvers[i]
|
||||
},
|
||||
idBuilder: func(spec interface{}) (resource.ID, error) {
|
||||
idBuilder: func(spec any) (resource.ID, error) {
|
||||
return network.LayeredID(network.ConfigPlatform, network.ResolverID), nil
|
||||
},
|
||||
resourceBuilder: func(id string) resource.Resource {
|
||||
return network.NewResolverSpec(network.ConfigNamespaceName, id)
|
||||
},
|
||||
resourceModifier: func(newSpec interface{}) func(r resource.Resource) error {
|
||||
resourceModifier: func(newSpec any) func(r resource.Resource) error {
|
||||
return func(r resource.Resource) error {
|
||||
spec := r.(*network.ResolverSpec).TypedSpec()
|
||||
|
||||
@ -349,16 +349,16 @@ func (ctrl *PlatformConfigController) apply(ctx context.Context, r controller.Ru
|
||||
// TimeServerSpec
|
||||
{
|
||||
length: len(networkConfig.TimeServers),
|
||||
getter: func(i int) interface{} {
|
||||
getter: func(i int) any {
|
||||
return networkConfig.TimeServers[i]
|
||||
},
|
||||
idBuilder: func(spec interface{}) (resource.ID, error) {
|
||||
idBuilder: func(spec any) (resource.ID, error) {
|
||||
return network.LayeredID(network.ConfigPlatform, network.TimeServerID), nil
|
||||
},
|
||||
resourceBuilder: func(id string) resource.Resource {
|
||||
return network.NewTimeServerSpec(network.ConfigNamespaceName, id)
|
||||
},
|
||||
resourceModifier: func(newSpec interface{}) func(r resource.Resource) error {
|
||||
resourceModifier: func(newSpec any) func(r resource.Resource) error {
|
||||
return func(r resource.Resource) error {
|
||||
spec := r.(*network.TimeServerSpec).TypedSpec()
|
||||
|
||||
@ -372,10 +372,10 @@ func (ctrl *PlatformConfigController) apply(ctx context.Context, r controller.Ru
|
||||
// OperatorSpec
|
||||
{
|
||||
length: len(networkConfig.Operators),
|
||||
getter: func(i int) interface{} {
|
||||
getter: func(i int) any {
|
||||
return networkConfig.Operators[i]
|
||||
},
|
||||
idBuilder: func(spec interface{}) (resource.ID, error) {
|
||||
idBuilder: func(spec any) (resource.ID, error) {
|
||||
operatorSpec := spec.(network.OperatorSpecSpec) //nolint:errcheck,forcetypeassert
|
||||
|
||||
return network.LayeredID(network.ConfigPlatform, network.OperatorID(operatorSpec.Operator, operatorSpec.LinkName)), nil
|
||||
@ -383,7 +383,7 @@ func (ctrl *PlatformConfigController) apply(ctx context.Context, r controller.Ru
|
||||
resourceBuilder: func(id string) resource.Resource {
|
||||
return network.NewOperatorSpec(network.ConfigNamespaceName, id)
|
||||
},
|
||||
resourceModifier: func(newSpec interface{}) func(r resource.Resource) error {
|
||||
resourceModifier: func(newSpec any) func(r resource.Resource) error {
|
||||
return func(r resource.Resource) error {
|
||||
spec := r.(*network.OperatorSpec).TypedSpec()
|
||||
|
||||
@ -397,10 +397,10 @@ func (ctrl *PlatformConfigController) apply(ctx context.Context, r controller.Ru
|
||||
// ExternalIPs
|
||||
{
|
||||
length: len(networkConfig.ExternalIPs),
|
||||
getter: func(i int) interface{} {
|
||||
getter: func(i int) any {
|
||||
return networkConfig.ExternalIPs[i]
|
||||
},
|
||||
idBuilder: func(spec interface{}) (resource.ID, error) {
|
||||
idBuilder: func(spec any) (resource.ID, error) {
|
||||
ipAddr := spec.(netip.Addr) //nolint:errcheck,forcetypeassert
|
||||
ipPrefix := netip.PrefixFrom(ipAddr, ipAddr.BitLen())
|
||||
|
||||
@ -409,7 +409,7 @@ func (ctrl *PlatformConfigController) apply(ctx context.Context, r controller.Ru
|
||||
resourceBuilder: func(id string) resource.Resource {
|
||||
return network.NewAddressStatus(network.NamespaceName, id)
|
||||
},
|
||||
resourceModifier: func(newSpec interface{}) func(r resource.Resource) error {
|
||||
resourceModifier: func(newSpec any) func(r resource.Resource) error {
|
||||
return func(r resource.Resource) error {
|
||||
ipAddr := newSpec.(netip.Addr) //nolint:errcheck,forcetypeassert
|
||||
ipPrefix := netip.PrefixFrom(ipAddr, ipAddr.BitLen())
|
||||
@ -434,10 +434,10 @@ func (ctrl *PlatformConfigController) apply(ctx context.Context, r controller.Ru
|
||||
// ProbeSpec
|
||||
{
|
||||
length: len(networkConfig.Probes),
|
||||
getter: func(i int) interface{} {
|
||||
getter: func(i int) any {
|
||||
return networkConfig.Probes[i]
|
||||
},
|
||||
idBuilder: func(spec interface{}) (resource.ID, error) {
|
||||
idBuilder: func(spec any) (resource.ID, error) {
|
||||
probeSpec := spec.(network.ProbeSpecSpec) //nolint:errcheck,forcetypeassert
|
||||
|
||||
return probeSpec.ID()
|
||||
@ -445,7 +445,7 @@ func (ctrl *PlatformConfigController) apply(ctx context.Context, r controller.Ru
|
||||
resourceBuilder: func(id string) resource.Resource {
|
||||
return network.NewProbeSpec(network.NamespaceName, id)
|
||||
},
|
||||
resourceModifier: func(newSpec interface{}) func(r resource.Resource) error {
|
||||
resourceModifier: func(newSpec any) func(r resource.Resource) error {
|
||||
return func(r resource.Resource) error {
|
||||
spec := r.(*network.ProbeSpec).TypedSpec()
|
||||
|
||||
@ -459,16 +459,16 @@ func (ctrl *PlatformConfigController) apply(ctx context.Context, r controller.Ru
|
||||
// Platform metadata
|
||||
{
|
||||
length: metadataLength,
|
||||
getter: func(i int) interface{} {
|
||||
getter: func(i int) any {
|
||||
return networkConfig.Metadata
|
||||
},
|
||||
idBuilder: func(spec interface{}) (resource.ID, error) {
|
||||
idBuilder: func(spec any) (resource.ID, error) {
|
||||
return runtimeres.PlatformMetadataID, nil
|
||||
},
|
||||
resourceBuilder: func(id string) resource.Resource {
|
||||
return runtimeres.NewPlatformMetadataSpec(runtimeres.NamespaceName, id)
|
||||
},
|
||||
resourceModifier: func(newSpec interface{}) func(r resource.Resource) error {
|
||||
resourceModifier: func(newSpec any) func(r resource.Resource) error {
|
||||
return func(r resource.Resource) error {
|
||||
metadata := newSpec.(*runtimeres.PlatformMetadataSpec) //nolint:errcheck,forcetypeassert
|
||||
|
||||
|
||||
@ -178,7 +178,8 @@ func (ctrl *EventsSinkController) Run(ctx context.Context, r controller.Runtime,
|
||||
if watchCh == nil {
|
||||
watchCh = make(chan machinedruntime.EventInfo)
|
||||
|
||||
opts := []machinedruntime.WatchOptionFunc{}
|
||||
var opts []machinedruntime.WatchOptionFunc
|
||||
|
||||
if ctrl.eventID.IsNil() {
|
||||
opts = append(opts, machinedruntime.WithTailEvents(-1))
|
||||
} else {
|
||||
|
||||
@ -8,7 +8,6 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@ -38,7 +37,7 @@ func (mock *serviceMock) Load(services ...system.Service) []string {
|
||||
mock.mu.Lock()
|
||||
defer mock.mu.Unlock()
|
||||
|
||||
ids := []string{}
|
||||
ids := make([]string, 0, len(services))
|
||||
|
||||
for _, svc := range services {
|
||||
mock.services[svc.ID(nil)] = svc
|
||||
@ -90,13 +89,13 @@ func (mock *serviceMock) getIDs() []string {
|
||||
mock.mu.Lock()
|
||||
defer mock.mu.Unlock()
|
||||
|
||||
ids := []string{}
|
||||
ids := make([]string, 0, len(mock.services))
|
||||
|
||||
for id := range mock.services {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
|
||||
sort.Strings(ids)
|
||||
slices.Sort(ids)
|
||||
|
||||
return ids
|
||||
}
|
||||
|
||||
@ -196,7 +196,7 @@ func (ctrl *KmsgLogDeliveryController) deliverLogs(ctx context.Context, r contro
|
||||
Msg: msg.Message.Message,
|
||||
Time: msg.Message.Timestamp,
|
||||
Level: kmsgPriorityToLevel(msg.Message.Priority),
|
||||
Fields: map[string]interface{}{
|
||||
Fields: map[string]any{
|
||||
"facility": msg.Message.Facility.String(),
|
||||
"seq": msg.Message.SequenceNumber,
|
||||
"clock": msg.Message.Clock,
|
||||
|
||||
@ -66,7 +66,7 @@ func (ctrl *KmsgLogConfigController) Run(ctx context.Context, r controller.Runti
|
||||
case <-r.EventCh():
|
||||
}
|
||||
|
||||
destinations := []*url.URL{}
|
||||
var destinations []*url.URL
|
||||
|
||||
if ctrl.Cmdline != nil {
|
||||
if val := ctrl.Cmdline.Get(constants.KernelParamLoggingKernel).First(); val != nil {
|
||||
|
||||
@ -35,7 +35,7 @@ type logHandler struct {
|
||||
}
|
||||
|
||||
// HandleLog implements logreceiver.Handler.
|
||||
func (s *logHandler) HandleLog(srcAddr netip.Addr, msg map[string]interface{}) {
|
||||
func (s *logHandler) HandleLog(srcAddr netip.Addr, msg map[string]any) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
|
||||
@ -15,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
// LoadOrNewFromFile either loads value from file.yaml or generates new values and saves as file.yaml.
|
||||
func LoadOrNewFromFile(path string, empty interface{}, generate func(interface{}) error) error {
|
||||
func LoadOrNewFromFile(path string, empty any, generate func(any) error) error {
|
||||
f, err := os.OpenFile(path, os.O_RDONLY, 0)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("error reading state file: %w", err)
|
||||
|
||||
@ -53,7 +53,7 @@ func DefaultControllerOptions() LockOptions {
|
||||
type Controller interface {
|
||||
Runtime() Runtime
|
||||
Sequencer() Sequencer
|
||||
Run(context.Context, Sequence, interface{}, ...LockOption) error
|
||||
Run(context.Context, Sequence, any, ...LockOption) error
|
||||
V1Alpha2() V1Alpha2Controller
|
||||
}
|
||||
|
||||
|
||||
@ -66,7 +66,7 @@ type LogEvent struct {
|
||||
Msg string
|
||||
Time time.Time
|
||||
Level zapcore.Level
|
||||
Fields map[string]interface{}
|
||||
Fields map[string]any
|
||||
}
|
||||
|
||||
// ErrDontRetry indicates that log event should not be resent.
|
||||
|
||||
@ -74,7 +74,7 @@ func (manager *CircularBufferLoggingManager) ServiceLog(id string) runtime.LogHa
|
||||
return &circularHandler{
|
||||
manager: manager,
|
||||
id: id,
|
||||
fields: map[string]interface{}{
|
||||
fields: map[string]any{
|
||||
// use field name that is not used by anything else
|
||||
"talos-service": id,
|
||||
},
|
||||
@ -153,7 +153,7 @@ func (manager *CircularBufferLoggingManager) RegisteredLogs() []string {
|
||||
type circularHandler struct {
|
||||
manager *CircularBufferLoggingManager
|
||||
id string
|
||||
fields map[string]interface{}
|
||||
fields map[string]any
|
||||
|
||||
buf *circular.Buffer
|
||||
}
|
||||
|
||||
@ -102,7 +102,7 @@ func parseLogLine(l []byte, now time.Time) *runtime.LogEvent {
|
||||
return e
|
||||
}
|
||||
|
||||
func parseJSONLogLine(l []byte) (msg string, m map[string]interface{}) {
|
||||
func parseJSONLogLine(l []byte) (msg string, m map[string]any) {
|
||||
// the whole line is valid JSON
|
||||
if err := json.Unmarshal(l, &m); err == nil {
|
||||
return
|
||||
|
||||
@ -37,7 +37,7 @@ func TestParseLogLine(t *testing.T) {
|
||||
Msg: `reconfigured wireguard link`,
|
||||
Time: now,
|
||||
Level: zapcore.InfoLevel,
|
||||
Fields: map[string]interface{}{
|
||||
Fields: map[string]any{
|
||||
"component": "controller-runtime",
|
||||
"controller": "network.LinkSpecController",
|
||||
"link": "kubespan",
|
||||
@ -51,7 +51,7 @@ func TestParseLogLine(t *testing.T) {
|
||||
Msg: `finished scheduled compaction`,
|
||||
Time: time.Date(2021, 10, 19, 14, 53, 5, 815000000, time.UTC),
|
||||
Level: zapcore.InfoLevel,
|
||||
Fields: map[string]interface{}{
|
||||
Fields: map[string]any{
|
||||
"caller": "mvcc/kvstore_compaction.go:57",
|
||||
"compact-revision": float64(34567),
|
||||
"took": "21.041639ms",
|
||||
@ -64,7 +64,7 @@ func TestParseLogLine(t *testing.T) {
|
||||
Msg: `cleanup warnings time="2021-10-19T14:52:20Z" level=info msg="starting signal loop" namespace=k8s.io pid=2629`,
|
||||
Time: time.Date(2021, 10, 19, 14, 52, 20, 578858689, time.UTC),
|
||||
Level: zapcore.WarnLevel,
|
||||
Fields: map[string]interface{}{},
|
||||
Fields: map[string]any{},
|
||||
},
|
||||
},
|
||||
"kubelet": {
|
||||
@ -73,7 +73,7 @@ func TestParseLogLine(t *testing.T) {
|
||||
Msg: `RemoveContainer`,
|
||||
Time: time.Date(2021, 10, 26, 16, 46, 4, 792702913, time.UTC),
|
||||
Level: zapcore.InfoLevel,
|
||||
Fields: map[string]interface{}{
|
||||
Fields: map[string]any{
|
||||
"caller": "topologymanager/scope.go:110",
|
||||
"containerID": "0194fac91ac1d3949497f6912f3c7e73a062c3bf29b6d3da05557d4db2f8482b",
|
||||
"v": float64(0),
|
||||
@ -87,7 +87,7 @@ func TestParseLogLine(t *testing.T) {
|
||||
Msg: `Failed creating a mirror pod for: pods "kube-controller-manager-talos-dev-qemu-master-1" already exists`,
|
||||
Time: time.Date(2021, 10, 26, 16, 45, 51, 595943212, time.UTC),
|
||||
Level: zapcore.WarnLevel,
|
||||
Fields: map[string]interface{}{
|
||||
Fields: map[string]any{
|
||||
"caller": "kubelet/kubelet.go:1703",
|
||||
"pod": "kube-system/kube-controller-manager-talos-dev-qemu-master-1",
|
||||
},
|
||||
|
||||
@ -50,7 +50,7 @@ func (j *jsonLinesSender) tryLock(ctx context.Context) (unlock func()) {
|
||||
}
|
||||
|
||||
func (j *jsonLinesSender) marshalJSON(e *runtime.LogEvent) ([]byte, error) {
|
||||
m := make(map[string]interface{}, len(e.Fields)+3)
|
||||
m := make(map[string]any, len(e.Fields)+3)
|
||||
for k, v := range e.Fields {
|
||||
m[k] = v
|
||||
}
|
||||
|
||||
@ -56,7 +56,7 @@ type Meta interface {
|
||||
}
|
||||
|
||||
// ClusterState defines the cluster state.
|
||||
type ClusterState interface{}
|
||||
type ClusterState any
|
||||
|
||||
// V1Alpha2State defines the next generation (v2) interface binding into v1 runtime.
|
||||
type V1Alpha2State interface {
|
||||
|
||||
@ -62,7 +62,7 @@ func (a *AWS) ParseMetadata(metadata *MetadataConfig) (*runtime.PlatformNetworkC
|
||||
networkConfig.Hostnames = append(networkConfig.Hostnames, hostnameSpec)
|
||||
}
|
||||
|
||||
publicIPs := []string{}
|
||||
var publicIPs []string
|
||||
|
||||
if metadata.PublicIPv4 != "" {
|
||||
publicIPs = append(publicIPs, metadata.PublicIPv4)
|
||||
|
||||
@ -92,7 +92,7 @@ func (a *Azure) ParseMetadata(metadata *ComputeMetadata, interfaceAddresses []Ne
|
||||
networkConfig.Hostnames = append(networkConfig.Hostnames, hostnameSpec)
|
||||
}
|
||||
|
||||
publicIPs := []string{}
|
||||
var publicIPs []string
|
||||
|
||||
// external IP
|
||||
for _, iface := range interfaceAddresses {
|
||||
|
||||
@ -73,7 +73,7 @@ func (d *DigitalOcean) ParseMetadata(metadata *MetadataConfig) (*runtime.Platfor
|
||||
ConfigLayer: network.ConfigPlatform,
|
||||
})
|
||||
|
||||
publicIPs := []string{}
|
||||
var publicIPs []string
|
||||
|
||||
for _, iface := range metadata.Interfaces["public"] {
|
||||
if iface.IPv4 != nil {
|
||||
|
||||
@ -232,7 +232,7 @@ func (p *EquinixMetal) ParseMetadata(ctx context.Context, equinixMetadata *Metad
|
||||
|
||||
// 2. addresses
|
||||
|
||||
publicIPs := []string{}
|
||||
var publicIPs []string
|
||||
|
||||
for _, addr := range equinixMetadata.Network.Addresses {
|
||||
if !(addr.Enabled && addr.Management) {
|
||||
|
||||
@ -52,7 +52,7 @@ func (h *Hcloud) ParseMetadata(unmarshalledNetworkConfig *NetworkConfig, metadat
|
||||
networkConfig.Hostnames = append(networkConfig.Hostnames, hostnameSpec)
|
||||
}
|
||||
|
||||
publicIPs := []string{}
|
||||
var publicIPs []string
|
||||
|
||||
if metadata.PublicIPv4 != "" {
|
||||
publicIPs = append(publicIPs, metadata.PublicIPv4)
|
||||
|
||||
@ -53,7 +53,7 @@ func (s *Scaleway) ParseMetadata(metadata *instance.Metadata) (*runtime.Platform
|
||||
networkConfig.Hostnames = append(networkConfig.Hostnames, hostnameSpec)
|
||||
}
|
||||
|
||||
publicIPs := []string{}
|
||||
var publicIPs []string
|
||||
|
||||
if metadata.PublicIP.Address != "" {
|
||||
publicIPs = append(publicIPs, metadata.PublicIP.Address)
|
||||
|
||||
@ -52,7 +52,7 @@ func (v *Vultr) ParseMetadata(metadata *metadata.MetaData) (*runtime.PlatformNet
|
||||
networkConfig.Hostnames = append(networkConfig.Hostnames, hostnameSpec)
|
||||
}
|
||||
|
||||
publicIPs := []string{}
|
||||
var publicIPs []string
|
||||
|
||||
for i, addr := range metadata.Interfaces {
|
||||
iface := fmt.Sprintf("eth%d", i)
|
||||
|
||||
@ -104,7 +104,7 @@ func (c *Controller) setupLogging() error {
|
||||
|
||||
// Run executes all phases known to the controller in serial. `Controller`
|
||||
// aborts immediately if any phase fails.
|
||||
func (c *Controller) Run(ctx context.Context, seq runtime.Sequence, data interface{}, setters ...runtime.LockOption) error {
|
||||
func (c *Controller) Run(ctx context.Context, seq runtime.Sequence, data any, setters ...runtime.LockOption) error {
|
||||
// We must ensure that the runtime is configured since all sequences depend
|
||||
// on the runtime.
|
||||
if c.r == nil {
|
||||
@ -223,7 +223,7 @@ func (c *Controller) ListenForEvents(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Controller) run(ctx context.Context, seq runtime.Sequence, phases []runtime.Phase, data interface{}) error {
|
||||
func (c *Controller) run(ctx context.Context, seq runtime.Sequence, phases []runtime.Phase, data any) error {
|
||||
c.Runtime().Events().Publish(ctx, &machine.SequenceEvent{
|
||||
Sequence: seq.String(),
|
||||
Action: machine.SequenceEvent_START,
|
||||
@ -288,7 +288,7 @@ func (c *Controller) run(ctx context.Context, seq runtime.Sequence, phases []run
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) runPhase(ctx context.Context, phase runtime.Phase, seq runtime.Sequence, data interface{}) error {
|
||||
func (c *Controller) runPhase(ctx context.Context, phase runtime.Phase, seq runtime.Sequence, data any) error {
|
||||
c.Runtime().Events().Publish(ctx, &machine.PhaseEvent{
|
||||
Phase: phase.Name,
|
||||
Action: machine.PhaseEvent_START,
|
||||
@ -319,7 +319,7 @@ func (c *Controller) runPhase(ctx context.Context, phase runtime.Phase, seq runt
|
||||
return eg.Wait()
|
||||
}
|
||||
|
||||
func (c *Controller) runTask(ctx context.Context, progress string, f runtime.TaskSetupFunc, seq runtime.Sequence, data interface{}) error {
|
||||
func (c *Controller) runTask(ctx context.Context, progress string, f runtime.TaskSetupFunc, seq runtime.Sequence, data any) error {
|
||||
task, taskName := f(seq, data)
|
||||
if task == nil {
|
||||
return nil
|
||||
@ -359,7 +359,7 @@ func (c *Controller) runTask(ctx context.Context, progress string, f runtime.Tas
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func (c *Controller) phases(seq runtime.Sequence, data interface{}) ([]runtime.Phase, error) {
|
||||
func (c *Controller) phases(seq runtime.Sequence, data any) ([]runtime.Phase, error) {
|
||||
var phases []runtime.Phase
|
||||
|
||||
switch seq {
|
||||
|
||||
@ -67,8 +67,8 @@ func (m *mockSequencer) Upgrade(r runtime.Runtime, req *machine.UpgradeRequest)
|
||||
return m.phases[runtime.SequenceUpgrade]
|
||||
}
|
||||
|
||||
func (m *mockSequencer) trackCall(name string, doneCh chan struct{}) func(runtime.Sequence, interface{}) (runtime.TaskExecutionFunc, string) {
|
||||
return func(seq runtime.Sequence, data interface{}) (runtime.TaskExecutionFunc, string) {
|
||||
func (m *mockSequencer) trackCall(name string, doneCh chan struct{}) func(runtime.Sequence, any) (runtime.TaskExecutionFunc, string) {
|
||||
return func(seq runtime.Sequence, data any) (runtime.TaskExecutionFunc, string) {
|
||||
return func(ctx context.Context, logger *log.Logger, r runtime.Runtime) error {
|
||||
if doneCh != nil {
|
||||
defer func() {
|
||||
@ -97,8 +97,8 @@ func TestRun(t *testing.T) {
|
||||
from runtime.Sequence
|
||||
to runtime.Sequence
|
||||
expectError error
|
||||
dataFrom interface{}
|
||||
dataTo interface{}
|
||||
dataFrom any
|
||||
dataTo any
|
||||
}{
|
||||
{
|
||||
name: "reboot should take over boot",
|
||||
@ -202,7 +202,7 @@ func TestRun(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func wait(seq runtime.Sequence, data interface{}) (runtime.TaskExecutionFunc, string) {
|
||||
func wait(seq runtime.Sequence, data any) (runtime.TaskExecutionFunc, string) {
|
||||
return func(ctx context.Context, logger *log.Logger, r runtime.Runtime) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
||||
@ -642,7 +642,7 @@ func StartSyslogd(r runtime.Sequence, _ any) (runtime.TaskExecutionFunc, string)
|
||||
}
|
||||
|
||||
// StartDashboard represents the task to start dashboard.
|
||||
func StartDashboard(_ runtime.Sequence, _ interface{}) (runtime.TaskExecutionFunc, string) {
|
||||
func StartDashboard(_ runtime.Sequence, _ any) (runtime.TaskExecutionFunc, string) {
|
||||
return func(_ context.Context, _ *log.Logger, r runtime.Runtime) error {
|
||||
system.Services(r).LoadAndStart(&services.Dashboard{})
|
||||
|
||||
@ -726,7 +726,7 @@ func StartAllServices(runtime.Sequence, any) (runtime.TaskExecutionFunc, string)
|
||||
|
||||
svcs.LoadAndStart(serviceList...)
|
||||
|
||||
all := []conditions.Condition{}
|
||||
var all []conditions.Condition
|
||||
|
||||
logger.Printf("waiting for %d services", len(svcs.List()))
|
||||
|
||||
|
||||
@ -141,8 +141,8 @@ func (events *ServiceEvents) AsProto(count int) *machineapi.ServiceEvents {
|
||||
}
|
||||
|
||||
// Recorder adds new event to the history of events, formatting message with args using Sprintf.
|
||||
type Recorder func(newstate ServiceState, message string, args ...interface{})
|
||||
type Recorder func(newstate ServiceState, message string, args ...any)
|
||||
|
||||
// NullRecorder discards events.
|
||||
func NullRecorder(newstate ServiceState, message string, args ...interface{}) {
|
||||
func NullRecorder(newstate ServiceState, message string, args ...any) {
|
||||
}
|
||||
|
||||
@ -65,7 +65,8 @@ func (suite *EventsSuite) TestOverflow() {
|
||||
suite.Assert().Equal([]events.ServiceEvent(nil), e.Get(0))
|
||||
suite.assertEvents([]string{strconv.Itoa(numEvents - 1)}, e.Get(1))
|
||||
|
||||
expected := []string{}
|
||||
var expected []string
|
||||
|
||||
for i := numEvents - events.MaxEventsToKeep; i < numEvents; i++ {
|
||||
expected = append(expected, strconv.Itoa(i))
|
||||
}
|
||||
|
||||
@ -268,7 +268,7 @@ func (c *containerdRunner) newContainerOpts(
|
||||
image containerd.Image,
|
||||
specOpts []oci.SpecOpts,
|
||||
) []containerd.NewContainerOpts {
|
||||
containerOpts := []containerd.NewContainerOpts{}
|
||||
var containerOpts []containerd.NewContainerOpts
|
||||
|
||||
if image != nil {
|
||||
containerOpts = append(
|
||||
@ -292,7 +292,7 @@ func (c *containerdRunner) newContainerOpts(
|
||||
}
|
||||
|
||||
func (c *containerdRunner) newOCISpecOpts(image oci.Image) []oci.SpecOpts {
|
||||
specOpts := []oci.SpecOpts{}
|
||||
var specOpts []oci.SpecOpts
|
||||
|
||||
if image != nil {
|
||||
specOpts = append(
|
||||
|
||||
@ -41,7 +41,7 @@ const (
|
||||
busyboxImage = "docker.io/library/busybox:latest"
|
||||
)
|
||||
|
||||
func MockEventSink(state events.ServiceState, message string, args ...interface{}) {
|
||||
func MockEventSink(state events.ServiceState, message string, args ...any) {
|
||||
log.Printf("state %s: %s", state, fmt.Sprintf(message, args...))
|
||||
}
|
||||
|
||||
|
||||
@ -27,7 +27,7 @@ import (
|
||||
v1alpha1cfg "github.com/siderolabs/talos/pkg/machinery/config/types/v1alpha1"
|
||||
)
|
||||
|
||||
func MockEventSink(state events.ServiceState, message string, args ...interface{}) {
|
||||
func MockEventSink(state events.ServiceState, message string, args ...any) {
|
||||
log.Printf("state %s: %s", state, fmt.Sprintf(message, args...))
|
||||
}
|
||||
|
||||
|
||||
@ -25,7 +25,7 @@ import (
|
||||
"github.com/siderolabs/talos/internal/app/machined/pkg/system/runner/restart"
|
||||
)
|
||||
|
||||
func MockEventSink(state events.ServiceState, message string, args ...interface{}) {
|
||||
func MockEventSink(state events.ServiceState, message string, args ...any) {
|
||||
log.Printf("state %s: %s", state, fmt.Sprintf(message, args...))
|
||||
}
|
||||
|
||||
|
||||
@ -69,7 +69,7 @@ func (m *MockRunner) String() string {
|
||||
return "MockRunner()"
|
||||
}
|
||||
|
||||
func MockEventSink(state events.ServiceState, message string, args ...interface{}) {
|
||||
func MockEventSink(state events.ServiceState, message string, args ...any) {
|
||||
log.Printf("state %s: %s", state, fmt.Sprintf(message, args...))
|
||||
}
|
||||
|
||||
|
||||
@ -70,7 +70,7 @@ func (svcrunner *ServiceRunner) GetState() events.ServiceState {
|
||||
}
|
||||
|
||||
// UpdateState implements events.Recorder.
|
||||
func (svcrunner *ServiceRunner) UpdateState(ctx context.Context, newstate events.ServiceState, message string, args ...interface{}) {
|
||||
func (svcrunner *ServiceRunner) UpdateState(ctx context.Context, newstate events.ServiceState, message string, args ...any) {
|
||||
svcrunner.mu.Lock()
|
||||
|
||||
event := events.ServiceEvent{
|
||||
@ -280,7 +280,7 @@ func (svcrunner *ServiceRunner) run(ctx context.Context, runnr runner.Runner) er
|
||||
errCh := make(chan error)
|
||||
|
||||
go func() {
|
||||
errCh <- runnr.Run(func(s events.ServiceState, msg string, args ...interface{}) {
|
||||
errCh <- runnr.Run(func(s events.ServiceState, msg string, args ...any) {
|
||||
svcrunner.UpdateState(ctx, s, msg, args...)
|
||||
})
|
||||
}()
|
||||
|
||||
@ -22,7 +22,7 @@ type ServiceRunnerSuite struct {
|
||||
}
|
||||
|
||||
func (suite *ServiceRunnerSuite) assertStateSequence(expectedStates []events.ServiceState, sr *system.ServiceRunner) {
|
||||
states := []events.ServiceState{}
|
||||
states := make([]events.ServiceState, 0, 1000)
|
||||
|
||||
for _, event := range sr.GetEventHistory(1000) {
|
||||
states = append(states, event.State)
|
||||
|
||||
@ -350,7 +350,7 @@ func buildInitialCluster(ctx context.Context, r runtime.Runtime, name string, pe
|
||||
return retry.ExpectedError(err)
|
||||
}
|
||||
|
||||
conf := []string{}
|
||||
var conf []string
|
||||
|
||||
for _, memb := range resp.Members {
|
||||
for _, u := range memb.PeerURLs {
|
||||
|
||||
@ -69,7 +69,7 @@ func (svc *Extension) PostFunc(r runtime.Runtime, state events.ServiceState) (er
|
||||
|
||||
// Condition implements the Service interface.
|
||||
func (svc *Extension) Condition(r runtime.Runtime) conditions.Condition {
|
||||
conds := []conditions.Condition{}
|
||||
var conds []conditions.Condition
|
||||
|
||||
if svc.Spec.Container.EnvironmentFile != "" {
|
||||
// add a dependency on the environment file
|
||||
|
||||
@ -57,7 +57,7 @@ func (mr *MockSnapshotterMockRecorder) Close() *gomock.Call {
|
||||
// Commit mocks base method.
|
||||
func (m *MockSnapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error {
|
||||
m.ctrl.T.Helper()
|
||||
varargs := []interface{}{ctx, name, key}
|
||||
varargs := []any{ctx, name, key}
|
||||
for _, a := range opts {
|
||||
varargs = append(varargs, a)
|
||||
}
|
||||
@ -67,9 +67,9 @@ func (m *MockSnapshotter) Commit(ctx context.Context, name, key string, opts ...
|
||||
}
|
||||
|
||||
// Commit indicates an expected call of Commit.
|
||||
func (mr *MockSnapshotterMockRecorder) Commit(ctx, name, key interface{}, opts ...interface{}) *gomock.Call {
|
||||
func (mr *MockSnapshotterMockRecorder) Commit(ctx, name, key any, opts ...any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
varargs := append([]interface{}{ctx, name, key}, opts...)
|
||||
varargs := append([]any{ctx, name, key}, opts...)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Commit", reflect.TypeOf((*MockSnapshotter)(nil).Commit), varargs...)
|
||||
}
|
||||
|
||||
@ -83,7 +83,7 @@ func (m *MockSnapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount
|
||||
}
|
||||
|
||||
// Mounts indicates an expected call of Mounts.
|
||||
func (mr *MockSnapshotterMockRecorder) Mounts(ctx, key interface{}) *gomock.Call {
|
||||
func (mr *MockSnapshotterMockRecorder) Mounts(ctx, key any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Mounts", reflect.TypeOf((*MockSnapshotter)(nil).Mounts), ctx, key)
|
||||
}
|
||||
@ -91,7 +91,7 @@ func (mr *MockSnapshotterMockRecorder) Mounts(ctx, key interface{}) *gomock.Call
|
||||
// Prepare mocks base method.
|
||||
func (m *MockSnapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) {
|
||||
m.ctrl.T.Helper()
|
||||
varargs := []interface{}{ctx, key, parent}
|
||||
varargs := []any{ctx, key, parent}
|
||||
for _, a := range opts {
|
||||
varargs = append(varargs, a)
|
||||
}
|
||||
@ -102,9 +102,9 @@ func (m *MockSnapshotter) Prepare(ctx context.Context, key, parent string, opts
|
||||
}
|
||||
|
||||
// Prepare indicates an expected call of Prepare.
|
||||
func (mr *MockSnapshotterMockRecorder) Prepare(ctx, key, parent interface{}, opts ...interface{}) *gomock.Call {
|
||||
func (mr *MockSnapshotterMockRecorder) Prepare(ctx, key, parent any, opts ...any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
varargs := append([]interface{}{ctx, key, parent}, opts...)
|
||||
varargs := append([]any{ctx, key, parent}, opts...)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Prepare", reflect.TypeOf((*MockSnapshotter)(nil).Prepare), varargs...)
|
||||
}
|
||||
|
||||
@ -117,7 +117,7 @@ func (m *MockSnapshotter) Remove(ctx context.Context, key string) error {
|
||||
}
|
||||
|
||||
// Remove indicates an expected call of Remove.
|
||||
func (mr *MockSnapshotterMockRecorder) Remove(ctx, key interface{}) *gomock.Call {
|
||||
func (mr *MockSnapshotterMockRecorder) Remove(ctx, key any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockSnapshotter)(nil).Remove), ctx, key)
|
||||
}
|
||||
@ -132,7 +132,7 @@ func (m *MockSnapshotter) Stat(ctx context.Context, key string) (snapshots.Info,
|
||||
}
|
||||
|
||||
// Stat indicates an expected call of Stat.
|
||||
func (mr *MockSnapshotterMockRecorder) Stat(ctx, key interface{}) *gomock.Call {
|
||||
func (mr *MockSnapshotterMockRecorder) Stat(ctx, key any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stat", reflect.TypeOf((*MockSnapshotter)(nil).Stat), ctx, key)
|
||||
}
|
||||
@ -140,7 +140,7 @@ func (mr *MockSnapshotterMockRecorder) Stat(ctx, key interface{}) *gomock.Call {
|
||||
// Update mocks base method.
|
||||
func (m *MockSnapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) {
|
||||
m.ctrl.T.Helper()
|
||||
varargs := []interface{}{ctx, info}
|
||||
varargs := []any{ctx, info}
|
||||
for _, a := range fieldpaths {
|
||||
varargs = append(varargs, a)
|
||||
}
|
||||
@ -151,9 +151,9 @@ func (m *MockSnapshotter) Update(ctx context.Context, info snapshots.Info, field
|
||||
}
|
||||
|
||||
// Update indicates an expected call of Update.
|
||||
func (mr *MockSnapshotterMockRecorder) Update(ctx, info interface{}, fieldpaths ...interface{}) *gomock.Call {
|
||||
func (mr *MockSnapshotterMockRecorder) Update(ctx, info any, fieldpaths ...any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
varargs := append([]interface{}{ctx, info}, fieldpaths...)
|
||||
varargs := append([]any{ctx, info}, fieldpaths...)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockSnapshotter)(nil).Update), varargs...)
|
||||
}
|
||||
|
||||
@ -167,7 +167,7 @@ func (m *MockSnapshotter) Usage(ctx context.Context, key string) (snapshots.Usag
|
||||
}
|
||||
|
||||
// Usage indicates an expected call of Usage.
|
||||
func (mr *MockSnapshotterMockRecorder) Usage(ctx, key interface{}) *gomock.Call {
|
||||
func (mr *MockSnapshotterMockRecorder) Usage(ctx, key any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Usage", reflect.TypeOf((*MockSnapshotter)(nil).Usage), ctx, key)
|
||||
}
|
||||
@ -175,7 +175,7 @@ func (mr *MockSnapshotterMockRecorder) Usage(ctx, key interface{}) *gomock.Call
|
||||
// View mocks base method.
|
||||
func (m *MockSnapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) {
|
||||
m.ctrl.T.Helper()
|
||||
varargs := []interface{}{ctx, key, parent}
|
||||
varargs := []any{ctx, key, parent}
|
||||
for _, a := range opts {
|
||||
varargs = append(varargs, a)
|
||||
}
|
||||
@ -186,16 +186,16 @@ func (m *MockSnapshotter) View(ctx context.Context, key, parent string, opts ...
|
||||
}
|
||||
|
||||
// View indicates an expected call of View.
|
||||
func (mr *MockSnapshotterMockRecorder) View(ctx, key, parent interface{}, opts ...interface{}) *gomock.Call {
|
||||
func (mr *MockSnapshotterMockRecorder) View(ctx, key, parent any, opts ...any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
varargs := append([]interface{}{ctx, key, parent}, opts...)
|
||||
varargs := append([]any{ctx, key, parent}, opts...)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "View", reflect.TypeOf((*MockSnapshotter)(nil).View), varargs...)
|
||||
}
|
||||
|
||||
// Walk mocks base method.
|
||||
func (m *MockSnapshotter) Walk(ctx context.Context, fn snapshots.WalkFunc, filters ...string) error {
|
||||
m.ctrl.T.Helper()
|
||||
varargs := []interface{}{ctx, fn}
|
||||
varargs := []any{ctx, fn}
|
||||
for _, a := range filters {
|
||||
varargs = append(varargs, a)
|
||||
}
|
||||
@ -205,9 +205,9 @@ func (m *MockSnapshotter) Walk(ctx context.Context, fn snapshots.WalkFunc, filte
|
||||
}
|
||||
|
||||
// Walk indicates an expected call of Walk.
|
||||
func (mr *MockSnapshotterMockRecorder) Walk(ctx, fn interface{}, filters ...interface{}) *gomock.Call {
|
||||
func (mr *MockSnapshotterMockRecorder) Walk(ctx, fn any, filters ...any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
varargs := append([]interface{}{ctx, fn}, filters...)
|
||||
varargs := append([]any{ctx, fn}, filters...)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Walk", reflect.TypeOf((*MockSnapshotter)(nil).Walk), varargs...)
|
||||
}
|
||||
|
||||
@ -243,7 +243,7 @@ func (m *MockCleaner) Cleanup(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// Cleanup indicates an expected call of Cleanup.
|
||||
func (mr *MockCleanerMockRecorder) Cleanup(ctx interface{}) *gomock.Call {
|
||||
func (mr *MockCleanerMockRecorder) Cleanup(ctx any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Cleanup", reflect.TypeOf((*MockCleaner)(nil).Cleanup), ctx)
|
||||
}
|
||||
|
||||
@ -64,7 +64,7 @@ func TestParsing(t *testing.T) {
|
||||
|
||||
select {
|
||||
case msg := <-ch.ch:
|
||||
var parsed map[string]interface{}
|
||||
var parsed map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(msg, &parsed))
|
||||
|
||||
|
||||
@ -216,13 +216,13 @@ func (suite *ServiceAccountSuite) getCRD() (*unstructured.Unstructured, error) {
|
||||
|
||||
func (suite *ServiceAccountSuite) createServiceAccount(ns string, name string, roles []string) (*unstructured.Unstructured, error) {
|
||||
return suite.DynamicClient.Resource(serviceAccountGVR).Namespace(ns).Create(suite.ctx, &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
Object: map[string]any{
|
||||
"apiVersion": fmt.Sprintf("%s/%s", constants.ServiceAccountResourceGroup, constants.ServiceAccountResourceVersion),
|
||||
"kind": constants.ServiceAccountResourceKind,
|
||||
"metadata": map[string]interface{}{
|
||||
"metadata": map[string]any{
|
||||
"name": name,
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"spec": map[string]any{
|
||||
"roles": roles,
|
||||
},
|
||||
},
|
||||
|
||||
@ -32,7 +32,7 @@ type duInfo struct {
|
||||
}
|
||||
|
||||
func splitLine(line string) []string {
|
||||
columns := []string{}
|
||||
var columns []string
|
||||
|
||||
parts := strings.Split(line, " ")
|
||||
for _, part := range parts {
|
||||
|
||||
@ -132,7 +132,7 @@ func (suite *GenSuite) TestGenConfigURLValidation() {
|
||||
|
||||
// TestGenConfigPatchJSON6902 verifies that gen config --config-patch works with JSON patches.
|
||||
func (suite *GenSuite) TestGenConfigPatchJSON6902() {
|
||||
patch, err := json.Marshal([]map[string]interface{}{
|
||||
patch, err := json.Marshal([]map[string]any{
|
||||
{
|
||||
"op": "replace",
|
||||
"path": "/cluster/clusterName",
|
||||
@ -147,8 +147,8 @@ func (suite *GenSuite) TestGenConfigPatchJSON6902() {
|
||||
|
||||
// TestGenConfigPatchStrategic verifies that gen config --config-patch works with strategic merge patches.
|
||||
func (suite *GenSuite) TestGenConfigPatchStrategic() {
|
||||
patch, err := yaml.Marshal(map[string]interface{}{
|
||||
"cluster": map[string]interface{}{
|
||||
patch, err := yaml.Marshal(map[string]any{
|
||||
"cluster": map[string]any{
|
||||
"clusterName": "bar",
|
||||
},
|
||||
})
|
||||
|
||||
@ -67,7 +67,7 @@ func (suite *MachineConfigSuite) TestGen() {
|
||||
// TestPatchPrintStdout tests the patch subcommand with output set to stdout
|
||||
// with multiple patches from the command line and from file.
|
||||
func (suite *MachineConfigSuite) TestPatchPrintStdout() {
|
||||
patch1, err := json.Marshal([]map[string]interface{}{
|
||||
patch1, err := json.Marshal([]map[string]any{
|
||||
{
|
||||
"op": "replace",
|
||||
"path": "/cluster/clusterName",
|
||||
@ -76,7 +76,7 @@ func (suite *MachineConfigSuite) TestPatchPrintStdout() {
|
||||
})
|
||||
suite.Require().NoError(err)
|
||||
|
||||
patch2, err := json.Marshal([]map[string]interface{}{
|
||||
patch2, err := json.Marshal([]map[string]any{
|
||||
{
|
||||
"op": "replace",
|
||||
"path": "/cluster/controlPlane/endpoint",
|
||||
@ -121,7 +121,7 @@ func (suite *MachineConfigSuite) TestPatchPrintStdout() {
|
||||
|
||||
// TestPatchWriteToFile tests the patch subcommand with output set to a file.
|
||||
func (suite *MachineConfigSuite) TestPatchWriteToFile() {
|
||||
patch1, err := json.Marshal([]map[string]interface{}{
|
||||
patch1, err := json.Marshal([]map[string]any{
|
||||
{
|
||||
"op": "replace",
|
||||
"path": "/cluster/clusterName",
|
||||
|
||||
@ -54,11 +54,11 @@ func (suite *PatchSuite) TestSuccess() {
|
||||
func (suite *PatchSuite) TestError() {
|
||||
node := suite.RandomDiscoveredNodeInternalIP(machine.TypeControlPlane)
|
||||
|
||||
patch := []map[string]interface{}{
|
||||
patch := []map[string]any{
|
||||
{
|
||||
"op": "crash",
|
||||
"path": "/cluster/proxy",
|
||||
"value": map[string]interface{}{
|
||||
"value": map[string]any{
|
||||
"image": fmt.Sprintf("%s:v%s", constants.KubeProxyImage, constants.DefaultKubernetesVersion),
|
||||
},
|
||||
},
|
||||
|
||||
@ -40,7 +40,7 @@ func Generate(ctx context.Context, in *machine.GenerateConfigurationRequest) (re
|
||||
case "v1alpha1":
|
||||
machineType := v1alpha1machine.Type(in.MachineConfig.Type)
|
||||
|
||||
options := []generate.Option{}
|
||||
var options []generate.Option
|
||||
|
||||
if in.MachineConfig.NetworkConfig != nil {
|
||||
networkConfig := &v1alpha1.NetworkConfig{
|
||||
|
||||
@ -89,7 +89,8 @@ func (c *Container) GetLogChunker(ctx context.Context, follow bool, tailLines in
|
||||
}
|
||||
}
|
||||
|
||||
chunkerOptions := []file.Option{}
|
||||
var chunkerOptions []file.Option
|
||||
|
||||
if follow {
|
||||
chunkerOptions = append(chunkerOptions, file.WithFollow())
|
||||
}
|
||||
|
||||
@ -37,7 +37,7 @@ const (
|
||||
busyboxImageDigest = "sha256:4b6ad3a68d34da29bf7c8ccb5d355ba8b4babcad1f99798204e7abb43e54ee3d"
|
||||
)
|
||||
|
||||
func MockEventSink(state events.ServiceState, message string, args ...interface{}) {
|
||||
func MockEventSink(state events.ServiceState, message string, args ...any) {
|
||||
}
|
||||
|
||||
//nolint:maligned
|
||||
@ -163,7 +163,7 @@ func (suite *ContainerdSuite) run(runners ...runner.Runner) {
|
||||
suite.containersWg.Add(1)
|
||||
|
||||
go func(r runner.Runner) {
|
||||
runningSink := func(state events.ServiceState, message string, args ...interface{}) {
|
||||
runningSink := func(state events.ServiceState, message string, args ...any) {
|
||||
if state == events.StateRunning {
|
||||
runningCh <- true
|
||||
}
|
||||
|
||||
@ -271,7 +271,7 @@ func (i *inspector) buildContainer(container *runtimeapi.Container) (*ctrs.Conta
|
||||
}
|
||||
|
||||
if info, ok := containerInfo["info"]; ok {
|
||||
var verboseInfo map[string]interface{}
|
||||
var verboseInfo map[string]any
|
||||
|
||||
if err := json.Unmarshal([]byte(info), &verboseInfo); err == nil {
|
||||
if pid, ok := verboseInfo["pid"]; ok {
|
||||
|
||||
@ -37,7 +37,7 @@ const (
|
||||
// pauseImageDigest = "sha256:da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e".
|
||||
)
|
||||
|
||||
func MockEventSink(state events.ServiceState, message string, args ...interface{}) {
|
||||
func MockEventSink(state events.ServiceState, message string, args ...any) {
|
||||
}
|
||||
|
||||
type CRISuite struct {
|
||||
|
||||
@ -31,8 +31,8 @@ const (
|
||||
busyboxImage = "docker.io/library/busybox:1.30.1"
|
||||
)
|
||||
|
||||
func MockEventSink(t *testing.T) func(state events.ServiceState, message string, args ...interface{}) {
|
||||
return func(state events.ServiceState, message string, args ...interface{}) {
|
||||
func MockEventSink(t *testing.T) func(state events.ServiceState, message string, args ...any) {
|
||||
return func(state events.ServiceState, message string, args ...any) {
|
||||
t.Logf(message, args...)
|
||||
}
|
||||
}
|
||||
|
||||
@ -269,7 +269,7 @@ func (r *Kubernetes) Watch(ctx context.Context, logger *zap.Logger) (<-chan stru
|
||||
|
||||
notifyCh := make(chan struct{}, 1)
|
||||
|
||||
notify := func(_ interface{}) {
|
||||
notify := func(_ any) {
|
||||
select {
|
||||
case notifyCh <- struct{}{}:
|
||||
default:
|
||||
@ -287,7 +287,7 @@ func (r *Kubernetes) Watch(ctx context.Context, logger *zap.Logger) (<-chan stru
|
||||
if _, err := r.nodes.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: notify,
|
||||
DeleteFunc: notify,
|
||||
UpdateFunc: func(_, _ interface{}) { notify(nil) },
|
||||
UpdateFunc: func(_, _ any) { notify(nil) },
|
||||
}); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to add event handler: %w", err)
|
||||
}
|
||||
|
||||
@ -44,7 +44,7 @@ func NewHandler(device *blockdevice.BlockDevice, partition *gpt.Partition, encry
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts := []luks.Option{}
|
||||
var opts []luks.Option
|
||||
if encryptionConfig.KeySize() != 0 {
|
||||
opts = append(opts, luks.WithKeySize(encryptionConfig.KeySize()))
|
||||
}
|
||||
|
||||
@ -104,7 +104,7 @@ func SystemMountPointForLabel(ctx context.Context, device *blockdevice.BlockDevi
|
||||
|
||||
o := NewDefaultOptions(opts...)
|
||||
|
||||
preMountHooks := []Hook{}
|
||||
var preMountHooks []Hook
|
||||
|
||||
if o.Encryption != nil {
|
||||
encryptionHandler, err := encryption.NewHandler(
|
||||
|
||||
@ -56,7 +56,7 @@ func (builder *Builder) assemble() error {
|
||||
}
|
||||
|
||||
// create the output file
|
||||
args := []string{}
|
||||
args := make([]string, 0, len(builder.sections)+2)
|
||||
|
||||
for _, section := range builder.sections {
|
||||
if !section.Append {
|
||||
|
||||
@ -44,20 +44,20 @@ func NewSeparator(description string) *Item {
|
||||
type Item struct {
|
||||
Name string
|
||||
description string
|
||||
dest interface{}
|
||||
options []interface{}
|
||||
dest any
|
||||
options []any
|
||||
}
|
||||
|
||||
// TableHeaders represents table headers list for item options which are using table representation.
|
||||
type TableHeaders []interface{}
|
||||
type TableHeaders []any
|
||||
|
||||
// NewTableHeaders creates TableHeaders object.
|
||||
func NewTableHeaders(headers ...interface{}) TableHeaders {
|
||||
func NewTableHeaders(headers ...any) TableHeaders {
|
||||
return TableHeaders(headers)
|
||||
}
|
||||
|
||||
// NewItem creates new form item.
|
||||
func NewItem(name, description string, dest interface{}, options ...interface{}) *Item {
|
||||
func NewItem(name, description string, dest any, options ...any) *Item {
|
||||
return &Item{
|
||||
Name: name,
|
||||
dest: dest,
|
||||
@ -75,7 +75,7 @@ func (item *Item) assign(value string) error {
|
||||
//
|
||||
//nolint:gocyclo,cyclop
|
||||
func (item *Item) createFormItems() ([]tview.Primitive, error) {
|
||||
res := []tview.Primitive{}
|
||||
var res []tview.Primitive
|
||||
|
||||
v := reflect.ValueOf(item.dest)
|
||||
if v.Kind() == reflect.Ptr {
|
||||
|
||||
@ -22,7 +22,7 @@ func NewTable() *Table {
|
||||
Table: tview.NewTable(),
|
||||
selectedRow: -1,
|
||||
hoveredRow: -1,
|
||||
rows: [][]interface{}{},
|
||||
rows: [][]any{},
|
||||
}
|
||||
|
||||
hasFocus := false
|
||||
@ -77,16 +77,16 @@ type Table struct {
|
||||
selectedRow int
|
||||
hoveredRow int
|
||||
onRowSelected func(row int)
|
||||
rows [][]interface{}
|
||||
rows [][]any
|
||||
}
|
||||
|
||||
// SetHeader sets table header.
|
||||
func (t *Table) SetHeader(keys ...interface{}) {
|
||||
func (t *Table) SetHeader(keys ...any) {
|
||||
t.AddRow(keys...)
|
||||
}
|
||||
|
||||
// AddRow adds a new row to the table.
|
||||
func (t *Table) AddRow(columns ...interface{}) {
|
||||
func (t *Table) AddRow(columns ...any) {
|
||||
row := t.GetRowCount()
|
||||
col := backgroundColor
|
||||
textColor := tview.Styles.PrimaryTextColor
|
||||
@ -201,7 +201,7 @@ func (t *Table) SetRowSelectedFunc(callback func(row int)) {
|
||||
}
|
||||
|
||||
// GetValue returns value in row/column.
|
||||
func (t *Table) GetValue(row, column int) interface{} {
|
||||
func (t *Table) GetValue(row, column int) any {
|
||||
if row < len(t.rows) && column < len(t.rows[row]) {
|
||||
return t.rows[row][column]
|
||||
}
|
||||
|
||||
@ -176,7 +176,8 @@ func (installer *Installer) configure() error {
|
||||
)
|
||||
|
||||
currentPage := 0
|
||||
menuButtons := []*components.MenuButton{}
|
||||
|
||||
var menuButtons []*components.MenuButton
|
||||
|
||||
done := make(chan struct{})
|
||||
state := installer.state
|
||||
|
||||
@ -62,7 +62,7 @@ func NewState(ctx context.Context, installer *Installer, conn *Connection) (*Sta
|
||||
opts.ClusterConfig.ControlPlane.Endpoint = fmt.Sprintf("https://%s", nethelpers.JoinHostPort(conn.nodeEndpoint, constants.DefaultControlPlanePort))
|
||||
}
|
||||
|
||||
installDiskOptions := []interface{}{
|
||||
installDiskOptions := []any{
|
||||
components.NewTableHeaders("DEVICE NAME", "MODEL NAME", "SIZE"),
|
||||
}
|
||||
|
||||
@ -81,16 +81,16 @@ func NewState(ctx context.Context, installer *Installer, conn *Connection) (*Sta
|
||||
}
|
||||
}
|
||||
|
||||
var machineTypes []interface{}
|
||||
var machineTypes []any
|
||||
|
||||
if conn.ExpandingCluster() {
|
||||
machineTypes = []interface{}{
|
||||
machineTypes = []any{
|
||||
" worker ", machineapi.MachineConfig_MachineType(machine.TypeWorker),
|
||||
" control plane ", machineapi.MachineConfig_MachineType(machine.TypeControlPlane),
|
||||
}
|
||||
opts.MachineConfig.Type = machineapi.MachineConfig_MachineType(machine.TypeControlPlane)
|
||||
} else {
|
||||
machineTypes = []interface{}{
|
||||
machineTypes = []any{
|
||||
" control plane ", machineapi.MachineConfig_MachineType(machine.TypeInit),
|
||||
}
|
||||
}
|
||||
|
||||
@ -94,7 +94,7 @@ func (a Args) Args() []string {
|
||||
keys := maps.Keys(a)
|
||||
sort.Strings(keys)
|
||||
|
||||
args := []string{}
|
||||
args := make([]string, 0, len(a))
|
||||
|
||||
for _, key := range keys {
|
||||
args = append(args, fmt.Sprintf("--%s=%s", key, a[key]))
|
||||
|
||||
@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
// Fatalf prints formatted message to stderr and aborts execution.
|
||||
func Fatalf(message string, args ...interface{}) {
|
||||
func Fatalf(message string, args ...any) {
|
||||
if !strings.HasSuffix(message, "\n") {
|
||||
message += "\n"
|
||||
}
|
||||
@ -21,7 +21,7 @@ func Fatalf(message string, args ...interface{}) {
|
||||
}
|
||||
|
||||
// Warning prints formatted message to stderr.
|
||||
func Warning(message string, args ...interface{}) {
|
||||
func Warning(message string, args ...any) {
|
||||
if !strings.HasSuffix(message, "\n") {
|
||||
message += "\n"
|
||||
}
|
||||
|
||||
@ -469,9 +469,9 @@ func checkPodStatus(ctx context.Context, cluster UpgradeProvider, options Upgrad
|
||||
}
|
||||
|
||||
if _, err := informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) { channel.SendWithContext(ctx, notifyCh, obj.(*v1.Pod)) },
|
||||
DeleteFunc: func(_ interface{}) {},
|
||||
UpdateFunc: func(_, obj interface{}) { channel.SendWithContext(ctx, notifyCh, obj.(*v1.Pod)) },
|
||||
AddFunc: func(obj any) { channel.SendWithContext(ctx, notifyCh, obj.(*v1.Pod)) },
|
||||
DeleteFunc: func(_ any) {},
|
||||
UpdateFunc: func(_, obj any) { channel.SendWithContext(ctx, notifyCh, obj.(*v1.Pod)) },
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error adding watch event handler: %w", err)
|
||||
}
|
||||
|
||||
@ -43,7 +43,7 @@ type UpgradeOptions struct {
|
||||
}
|
||||
|
||||
// Log writes the line to logger or to stdout if no logger was provided.
|
||||
func (options *UpgradeOptions) Log(line string, args ...interface{}) {
|
||||
func (options *UpgradeOptions) Log(line string, args ...any) {
|
||||
if options.LogOutput != nil {
|
||||
options.LogOutput.Write([]byte(fmt.Sprintf(line, args...))) //nolint:errcheck
|
||||
|
||||
|
||||
@ -31,7 +31,7 @@ type gogoMessage interface {
|
||||
type Codec struct{}
|
||||
|
||||
// Marshal implements encoding.Codec.
|
||||
func (Codec) Marshal(v interface{}) ([]byte, error) {
|
||||
func (Codec) Marshal(v any) ([]byte, error) {
|
||||
// some third-party types (like from etcd and containerd) implement gogoMessage
|
||||
if gm, ok := v.(gogoMessage); ok {
|
||||
return gm.Marshal()
|
||||
@ -48,7 +48,7 @@ func (Codec) Marshal(v interface{}) ([]byte, error) {
|
||||
}
|
||||
|
||||
// Unmarshal implements encoding.Codec.
|
||||
func (Codec) Unmarshal(data []byte, v interface{}) error {
|
||||
func (Codec) Unmarshal(data []byte, v any) error {
|
||||
// some third-party types (like from etcd and containerd) implement gogoMessage
|
||||
if gm, ok := v.(gogoMessage); ok {
|
||||
return gm.Unmarshal(data)
|
||||
|
||||
@ -130,7 +130,7 @@ func WithReflection() Option {
|
||||
}
|
||||
|
||||
func recoveryHandler(logger *log.Logger) grpc_recovery.RecoveryHandlerFunc {
|
||||
return func(p interface{}) error {
|
||||
return func(p any) error {
|
||||
if logger != nil {
|
||||
logger.Printf("panic: %v\n%s", p, string(debug.Stack()))
|
||||
}
|
||||
|
||||
@ -78,7 +78,7 @@ func (b *TokenCredentials) authenticate(ctx context.Context) error {
|
||||
// UnaryInterceptor sets the UnaryServerInterceptor for the server and enforces
|
||||
// basic authentication.
|
||||
func (b *TokenCredentials) UnaryInterceptor() grpc.UnaryServerInterceptor {
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
|
||||
if err := b.authenticate(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -63,7 +63,7 @@ func (b *UsernameAndPasswordCredentials) authorize(ctx context.Context) error {
|
||||
// UnaryInterceptor sets the UnaryServerInterceptor for the server and enforces
|
||||
// basic authentication.
|
||||
func (b *UsernameAndPasswordCredentials) UnaryInterceptor() grpc.UnaryServerInterceptor {
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
|
||||
start := time.Now()
|
||||
|
||||
if err := b.authorize(ctx); err != nil {
|
||||
|
||||
@ -27,10 +27,10 @@ type Authorizer struct {
|
||||
FallbackRoles role.Set
|
||||
|
||||
// Logger.
|
||||
Logger func(format string, v ...interface{})
|
||||
Logger func(format string, v ...any)
|
||||
}
|
||||
|
||||
func (a *Authorizer) logf(format string, v ...interface{}) {
|
||||
func (a *Authorizer) logf(format string, v ...any) {
|
||||
if a.Logger != nil {
|
||||
a.Logger(format, v...)
|
||||
}
|
||||
@ -59,7 +59,7 @@ func (a *Authorizer) authorize(ctx context.Context, method string) error {
|
||||
|
||||
// UnaryInterceptor returns grpc UnaryServerInterceptor.
|
||||
func (a *Authorizer) UnaryInterceptor() grpc.UnaryServerInterceptor {
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
|
||||
if err := a.authorize(ctx, info.FullMethod); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -70,7 +70,7 @@ func (a *Authorizer) UnaryInterceptor() grpc.UnaryServerInterceptor {
|
||||
|
||||
// StreamInterceptor returns grpc StreamServerInterceptor.
|
||||
func (a *Authorizer) StreamInterceptor() grpc.StreamServerInterceptor {
|
||||
return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
return func(srv any, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
if err := a.authorize(stream.Context(), info.FullMethod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -58,10 +58,10 @@ type Injector struct {
|
||||
SideroLinkPeerCheckFunc SideroLinkPeerCheckFunc
|
||||
|
||||
// Logger.
|
||||
Logger func(format string, v ...interface{})
|
||||
Logger func(format string, v ...any)
|
||||
}
|
||||
|
||||
func (i *Injector) logf(format string, v ...interface{}) {
|
||||
func (i *Injector) logf(format string, v ...any) {
|
||||
if i.Logger != nil {
|
||||
i.Logger(format, v...)
|
||||
}
|
||||
@ -149,7 +149,7 @@ func (i *Injector) extractRoles(ctx context.Context) role.Set {
|
||||
|
||||
// UnaryInterceptor returns grpc UnaryServerInterceptor.
|
||||
func (i *Injector) UnaryInterceptor() grpc.UnaryServerInterceptor {
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
|
||||
ctx = ContextWithRoles(ctx, i.extractRoles(ctx))
|
||||
|
||||
return handler(ctx, req)
|
||||
@ -158,7 +158,7 @@ func (i *Injector) UnaryInterceptor() grpc.UnaryServerInterceptor {
|
||||
|
||||
// StreamInterceptor returns grpc StreamServerInterceptor.
|
||||
func (i *Injector) StreamInterceptor() grpc.StreamServerInterceptor {
|
||||
return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
return func(srv any, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
ctx := stream.Context()
|
||||
ctx = ContextWithRoles(ctx, i.extractRoles(ctx))
|
||||
|
||||
|
||||
@ -23,7 +23,7 @@ func SetMetadata(md metadata.MD, roles role.Set) {
|
||||
}
|
||||
|
||||
// getFromMetadata returns roles extracted from gRPC metadata.
|
||||
func getFromMetadata(ctx context.Context, logf func(format string, v ...interface{})) (role.Set, bool) {
|
||||
func getFromMetadata(ctx context.Context, logf func(format string, v ...any)) (role.Set, bool) {
|
||||
md, ok := metadata.FromIncomingContext(ctx)
|
||||
if !ok {
|
||||
panic("no request metadata")
|
||||
|
||||
@ -57,7 +57,7 @@ func ExtractMetadata(ctx context.Context) string {
|
||||
|
||||
// UnaryInterceptor returns grpc UnaryServerInterceptor.
|
||||
func (m *Middleware) UnaryInterceptor() grpc.UnaryServerInterceptor {
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
|
||||
startTime := time.Now()
|
||||
|
||||
resp, err := handler(ctx, req)
|
||||
@ -78,7 +78,7 @@ func (m *Middleware) UnaryInterceptor() grpc.UnaryServerInterceptor {
|
||||
|
||||
// StreamInterceptor returns grpc StreamServerInterceptor.
|
||||
func (m *Middleware) StreamInterceptor() grpc.StreamServerInterceptor {
|
||||
return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
return func(srv any, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
startTime := time.Now()
|
||||
|
||||
err := handler(srv, stream)
|
||||
|
||||
@ -142,7 +142,7 @@ func Generate(in *GenerateInput, out io.Writer) error {
|
||||
})
|
||||
}
|
||||
|
||||
func base64Encode(content interface{}) (string, error) {
|
||||
func base64Encode(content any) (string, error) {
|
||||
str, ok := content.(string)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("argument to base64 encode is not a string: %v", content)
|
||||
|
||||
@ -244,7 +244,7 @@ func (c *Client) Kubeconfig(ctx context.Context) ([]byte, error) {
|
||||
func (c *Client) ApplyConfiguration(ctx context.Context, req *machineapi.ApplyConfigurationRequest, callOptions ...grpc.CallOption) (resp *machineapi.ApplyConfigurationResponse, err error) {
|
||||
resp, err = c.MachineClient.ApplyConfiguration(ctx, req, callOptions...)
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*machineapi.ApplyConfigurationResponse) //nolint:errcheck
|
||||
|
||||
@ -255,7 +255,7 @@ func (c *Client) ApplyConfiguration(ctx context.Context, req *machineapi.ApplyCo
|
||||
func (c *Client) GenerateConfiguration(ctx context.Context, req *machineapi.GenerateConfigurationRequest, callOptions ...grpc.CallOption) (resp *machineapi.GenerateConfigurationResponse, err error) {
|
||||
resp, err = c.MachineClient.GenerateConfiguration(ctx, req, callOptions...)
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*machineapi.GenerateConfigurationResponse) //nolint:errcheck
|
||||
|
||||
@ -266,7 +266,7 @@ func (c *Client) GenerateConfiguration(ctx context.Context, req *machineapi.Gene
|
||||
func (c *Client) Disks(ctx context.Context, callOptions ...grpc.CallOption) (resp *storageapi.DisksResponse, err error) {
|
||||
resp, err = c.StorageClient.Disks(ctx, &emptypb.Empty{}, callOptions...)
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*storageapi.DisksResponse) //nolint:errcheck
|
||||
|
||||
@ -283,7 +283,7 @@ func (c *Client) Stats(ctx context.Context, namespace string, driver common.Cont
|
||||
callOptions...,
|
||||
)
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*machineapi.StatsResponse) //nolint:errcheck
|
||||
|
||||
@ -301,7 +301,7 @@ func (c *Client) Containers(ctx context.Context, namespace string, driver common
|
||||
callOptions...,
|
||||
)
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*machineapi.ContainersResponse) //nolint:errcheck
|
||||
|
||||
@ -468,7 +468,7 @@ func (c *Client) LogsContainers(ctx context.Context, callOptions ...grpc.CallOpt
|
||||
callOptions...,
|
||||
)
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*machineapi.LogsContainersResponse) //nolint:errcheck
|
||||
|
||||
@ -483,7 +483,7 @@ func (c *Client) Version(ctx context.Context, callOptions ...grpc.CallOption) (r
|
||||
callOptions...,
|
||||
)
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*machineapi.VersionResponse) //nolint:errcheck
|
||||
|
||||
@ -498,7 +498,7 @@ func (c *Client) Processes(ctx context.Context, callOptions ...grpc.CallOption)
|
||||
callOptions...,
|
||||
)
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*machineapi.ProcessesResponse) //nolint:errcheck
|
||||
|
||||
@ -513,7 +513,7 @@ func (c *Client) Memory(ctx context.Context, callOptions ...grpc.CallOption) (re
|
||||
callOptions...,
|
||||
)
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*machineapi.MemoryResponse) //nolint:errcheck
|
||||
|
||||
@ -528,7 +528,7 @@ func (c *Client) Mounts(ctx context.Context, callOptions ...grpc.CallOption) (re
|
||||
callOptions...,
|
||||
)
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*machineapi.MountsResponse) //nolint:errcheck
|
||||
|
||||
@ -649,7 +649,7 @@ func (c *Client) ServiceList(ctx context.Context, callOptions ...grpc.CallOption
|
||||
callOptions...,
|
||||
)
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*machineapi.ServiceListResponse) //nolint:errcheck
|
||||
|
||||
@ -678,7 +678,7 @@ func (c *Client) ServiceInfo(ctx context.Context, id string, callOptions ...grpc
|
||||
return services, err
|
||||
}
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*machineapi.ServiceListResponse) //nolint:errcheck
|
||||
|
||||
@ -710,7 +710,7 @@ func (c *Client) ServiceStart(ctx context.Context, id string, callOptions ...grp
|
||||
callOptions...,
|
||||
)
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*machineapi.ServiceStartResponse) //nolint:errcheck
|
||||
|
||||
@ -725,7 +725,7 @@ func (c *Client) ServiceStop(ctx context.Context, id string, callOptions ...grpc
|
||||
callOptions...,
|
||||
)
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*machineapi.ServiceStopResponse) //nolint:errcheck
|
||||
|
||||
@ -740,7 +740,7 @@ func (c *Client) ServiceRestart(ctx context.Context, id string, callOptions ...g
|
||||
callOptions...,
|
||||
)
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*machineapi.ServiceRestartResponse) //nolint:errcheck
|
||||
|
||||
@ -755,7 +755,7 @@ func (c *Client) Time(ctx context.Context, callOptions ...grpc.CallOption) (resp
|
||||
callOptions...,
|
||||
)
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*timeapi.TimeResponse) //nolint:errcheck
|
||||
|
||||
@ -770,7 +770,7 @@ func (c *Client) TimeCheck(ctx context.Context, server string, callOptions ...gr
|
||||
callOptions...,
|
||||
)
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*timeapi.TimeResponse) //nolint:errcheck
|
||||
|
||||
@ -824,7 +824,7 @@ func (c *Client) EtcdLeaveCluster(ctx context.Context, req *machineapi.EtcdLeave
|
||||
func (c *Client) EtcdForfeitLeadership(ctx context.Context, req *machineapi.EtcdForfeitLeadershipRequest, callOptions ...grpc.CallOption) (*machineapi.EtcdForfeitLeadershipResponse, error) {
|
||||
resp, err := c.MachineClient.EtcdForfeitLeadership(ctx, req, callOptions...)
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*machineapi.EtcdForfeitLeadershipResponse) //nolint:errcheck
|
||||
|
||||
@ -835,7 +835,7 @@ func (c *Client) EtcdForfeitLeadership(ctx context.Context, req *machineapi.Etcd
|
||||
func (c *Client) EtcdMemberList(ctx context.Context, req *machineapi.EtcdMemberListRequest, callOptions ...grpc.CallOption) (*machineapi.EtcdMemberListResponse, error) {
|
||||
resp, err := c.MachineClient.EtcdMemberList(ctx, req, callOptions...)
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*machineapi.EtcdMemberListResponse) //nolint:errcheck
|
||||
|
||||
@ -895,7 +895,7 @@ func (c *Client) EtcdRecover(ctx context.Context, snapshot io.Reader, callOption
|
||||
|
||||
resp, err := cli.CloseAndRecv()
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*machineapi.EtcdRecoverResponse) //nolint:errcheck
|
||||
|
||||
@ -908,7 +908,7 @@ func (c *Client) EtcdRecover(ctx context.Context, snapshot io.Reader, callOption
|
||||
func (c *Client) EtcdAlarmList(ctx context.Context, opts ...grpc.CallOption) (*machineapi.EtcdAlarmListResponse, error) {
|
||||
resp, err := c.MachineClient.EtcdAlarmList(ctx, &emptypb.Empty{}, opts...)
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*machineapi.EtcdAlarmListResponse) //nolint:errcheck
|
||||
|
||||
@ -921,7 +921,7 @@ func (c *Client) EtcdAlarmList(ctx context.Context, opts ...grpc.CallOption) (*m
|
||||
func (c *Client) EtcdAlarmDisarm(ctx context.Context, opts ...grpc.CallOption) (*machineapi.EtcdAlarmDisarmResponse, error) {
|
||||
resp, err := c.MachineClient.EtcdAlarmDisarm(ctx, &emptypb.Empty{}, opts...)
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*machineapi.EtcdAlarmDisarmResponse) //nolint:errcheck
|
||||
|
||||
@ -937,7 +937,7 @@ func (c *Client) EtcdAlarmDisarm(ctx context.Context, opts ...grpc.CallOption) (
|
||||
func (c *Client) EtcdDefragment(ctx context.Context, opts ...grpc.CallOption) (*machineapi.EtcdDefragmentResponse, error) {
|
||||
resp, err := c.MachineClient.EtcdDefragment(ctx, &emptypb.Empty{}, opts...)
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*machineapi.EtcdDefragmentResponse) //nolint:errcheck
|
||||
|
||||
@ -950,7 +950,7 @@ func (c *Client) EtcdDefragment(ctx context.Context, opts ...grpc.CallOption) (*
|
||||
func (c *Client) EtcdStatus(ctx context.Context, opts ...grpc.CallOption) (*machineapi.EtcdStatusResponse, error) {
|
||||
resp, err := c.MachineClient.EtcdStatus(ctx, &emptypb.Empty{}, opts...)
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*machineapi.EtcdStatusResponse) //nolint:errcheck
|
||||
|
||||
@ -961,7 +961,7 @@ func (c *Client) EtcdStatus(ctx context.Context, opts ...grpc.CallOption) (*mach
|
||||
func (c *Client) GenerateClientConfiguration(ctx context.Context, req *machineapi.GenerateClientConfigurationRequest, callOptions ...grpc.CallOption) (resp *machineapi.GenerateClientConfigurationResponse, err error) { //nolint:lll
|
||||
resp, err = c.MachineClient.GenerateClientConfiguration(ctx, req, callOptions...)
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*machineapi.GenerateClientConfigurationResponse) //nolint:errcheck
|
||||
|
||||
@ -1046,7 +1046,7 @@ func (c *Client) Netstat(ctx context.Context, req *machineapi.NetstatRequest, ca
|
||||
callOptions...,
|
||||
)
|
||||
|
||||
var filtered interface{}
|
||||
var filtered any
|
||||
filtered, err = FilterMessages(resp, err)
|
||||
resp, _ = filtered.(*machineapi.NetstatResponse) //nolint:errcheck
|
||||
|
||||
|
||||
@ -215,7 +215,8 @@ func (c *Config) Merge(cfg *Config) []Rename {
|
||||
}
|
||||
|
||||
mappedContexts := map[string]string{}
|
||||
renames := []Rename{}
|
||||
|
||||
var renames []Rename
|
||||
|
||||
for name, ctx := range cfg.Contexts {
|
||||
mergedName := name
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user