feat: add log level and format configuration

Allow log level and format to be configurable.

Signed-off-by: Utku Ozdemir <utku.ozdemir@siderolabs.com>
This commit is contained in:
Utku Ozdemir 2026-05-05 10:15:14 +02:00
parent 7fb5b164d5
commit 01742e71e6
No known key found for this signature in database
GPG Key ID: DBD13117B0A14E93
24 changed files with 175 additions and 68 deletions

View File

@ -62,28 +62,31 @@ func buildRootCommand() (*cobra.Command, error) {
return fmt.Errorf("failed to bind flags: %w", err)
}
var loggerConfig zap.Config
configs := make([]*config.Params, 0, len(configPaths)+1)
if constants.IsDebugBuild {
loggerConfig = zap.NewDevelopmentConfig()
loggerConfig.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
} else {
loggerConfig = zap.NewProductionConfig()
for _, configPath := range configPaths {
fileConfig, err := config.LoadFromFile(configPath)
if err != nil {
return fmt.Errorf("failed to load config from file %q: %w", configPath, err)
}
configs = append(configs, fileConfig)
}
if !debug {
loggerConfig.Level.SetLevel(zap.InfoLevel)
} else {
loggerConfig.Level.SetLevel(zap.DebugLevel)
configs = append(configs, flagConfig) // flags have the highest priority
cfg, err := config.Init(configSchema, configs...)
if err != nil {
return err
}
logger, err := loggerConfig.Build(
zap.AddStacktrace(zapcore.FatalLevel), // only print stack traces for fatal errors
)
logger, err := buildLogger(cfg.Logs, debug)
if err != nil {
return fmt.Errorf("failed to set up logging: %w", err)
}
logger.Info("initialized resource compression config", zap.Bool("enabled", cfg.Features.GetEnableConfigDataCompression()))
signals := make(chan os.Signal, 1)
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM)
@ -100,28 +103,9 @@ func buildRootCommand() (*cobra.Command, error) {
cancel()
}, logger)
configs := make([]*config.Params, 0, len(configPaths)+1)
for _, configPath := range configPaths {
var fileConfig *config.Params
if fileConfig, err = config.LoadFromFile(configPath); err != nil {
return fmt.Errorf("failed to load config from file %q: %w", configPath, err)
}
configs = append(configs, fileConfig)
}
configs = append(configs, flagConfig) // flags have the highest priority
config, err := config.Init(logger, configSchema, configs...)
if err != nil {
return err
}
ctx = actor.MarkContextAsInternalActor(ctx)
state, err := omni.NewState(ctx, config, logger, prometheus.DefaultRegisterer)
state, err := omni.NewState(ctx, cfg, logger, prometheus.DefaultRegisterer)
if err != nil {
return err
}
@ -132,7 +116,7 @@ func buildRootCommand() (*cobra.Command, error) {
}
}()
if err = config.ValidateState(ctx, state.Default()); err != nil {
if err = cfg.ValidateState(ctx, state.Default()); err != nil {
return err
}
@ -140,7 +124,7 @@ func buildRootCommand() (*cobra.Command, error) {
logger.Warn("running debug build")
}
return app.Run(ctx, state, config, logger)
return app.Run(ctx, state, cfg, logger)
},
}
@ -149,6 +133,8 @@ func buildRootCommand() (*cobra.Command, error) {
rootCmd.Flags().StringArrayVar(&configPaths, "config-path", nil, "config file(s) to load, can be specified multiple times, merged in order (flags have highest priority)")
rootCmd.Flags().BoolVar(&debug, "debug", constants.IsDebugBuild, "enable debug logs.")
rootCmd.Flags().MarkDeprecated("debug", "use --log-level debug") //nolint:errcheck
rootCmdFlagBinder.StringVar("account.id", &flagConfig.Account.Id)
rootCmdFlagBinder.StringVar("account.name", &flagConfig.Account.Name)
rootCmdFlagBinder.StringVar("account.userPilot.appToken", &flagConfig.Account.UserPilot.AppToken)
@ -279,6 +265,9 @@ func defineAuthFlags(rootCmd *cobra.Command, b *FlagBinder, flagConfig *config.P
}
func defineLogsFlags(rootCmd *cobra.Command, b *FlagBinder, flagConfig *config.Params) error {
EnumVar(b, "logs.level", &flagConfig.Logs.Level)
EnumVar(b, "logs.format", &flagConfig.Logs.Format)
b.DurationVar("logs.machine.storage.sqliteTimeout", &flagConfig.Logs.Machine.Storage.SqliteTimeout)
b.DurationVar("logs.machine.storage.cleanupInterval", &flagConfig.Logs.Machine.Storage.CleanupInterval)
b.DurationVar("logs.machine.storage.cleanupOlderThan", &flagConfig.Logs.Machine.Storage.CleanupOlderThan)
@ -389,3 +378,35 @@ func defineEulaFlags(rootCmd *cobra.Command, b *FlagBinder, flagConfig *config.P
rootCmd.MarkFlagsRequiredTogether(b.mustFlagName("eulaAccept.name"), b.mustFlagName("eulaAccept.email"))
}
func buildLogger(logs config.Logs, debug bool) (*zap.Logger, error) {
useTextFormat := constants.IsDebugBuild
if logs.Format != nil {
useTextFormat = *logs.Format == config.LogsFormatText
}
var loggerConfig zap.Config
if useTextFormat {
loggerConfig = zap.NewDevelopmentConfig()
loggerConfig.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
} else {
loggerConfig = zap.NewProductionConfig()
}
switch {
case logs.Level != nil:
var level zapcore.Level
if err := level.UnmarshalText([]byte(*logs.Level)); err != nil {
return nil, fmt.Errorf("invalid log level %q: %w", *logs.Level, err)
}
loggerConfig.Level.SetLevel(level)
case debug:
loggerConfig.Level.SetLevel(zap.DebugLevel)
default:
loggerConfig.Level.SetLevel(zap.InfoLevel)
}
return loggerConfig.Build(zap.AddStacktrace(zapcore.FatalLevel)) // only print stack traces for fatal errors
}

View File

@ -64,7 +64,7 @@ func Run(ctx context.Context, state *omni.State, cfg *config.Params, logger *zap
logger.Debug("using config", zap.Any("config", cfg))
klog.SetLogger(zapr.NewLogger(logger.WithOptions(zap.IncreaseLevel(zapcore.WarnLevel)).With(logging.Component("kubernetes"))))
klog.SetLogger(zapr.NewLogger(logging.IncreaseLevel(logger, zapcore.WarnLevel).With(logging.Component("kubernetes"))))
ctx = actor.MarkContextAsInternalActor(ctx)

View File

@ -71,6 +71,9 @@ examples:
initialUsers:
- initial-user-1@example.com
- initial-user-2@example.com
logs:
level: info
format: json
comments:
account:

View File

@ -347,6 +347,13 @@ config:
# @ignored
# Logs contains logging-related configuration.
logs:
# Level is the log level for the Omni application logger. Defaults to info, unless the --debug flag is set, in
# which case it defaults to debug.
#level: info
# Format is the log format for the Omni application logger. text uses a human-readable colored format
# (development), json uses structured JSON output (production). Defaults to text for debug builds, json
# otherwise.
#format: json
# Machine contains machine logs configuration.
machine:
# Storage contains configuration for machine logs storage.

View File

@ -40,6 +40,7 @@ import (
managementclient "github.com/siderolabs/omni/client/pkg/client/management"
"github.com/siderolabs/omni/client/pkg/omni/resources/omni"
grpcomni "github.com/siderolabs/omni/internal/backend/grpc"
"github.com/siderolabs/omni/internal/backend/logging"
"github.com/siderolabs/omni/internal/backend/runtime/kubernetes"
omniruntime "github.com/siderolabs/omni/internal/backend/runtime/omni"
omnictrl "github.com/siderolabs/omni/internal/backend/runtime/omni/controllers/omni"
@ -64,7 +65,7 @@ func TestGenerateConfigs(t *testing.T) {
rt, err := omniruntime.NewRuntime(config.Default(), nil, nil, nil,
nil, nil, nil, nil, nil, st, prometheus.NewRegistry(),
nil, kubernetesRuntime, nil, logger.WithOptions(zap.IncreaseLevel(zap.InfoLevel)))
nil, kubernetesRuntime, nil, logging.IncreaseLevel(logger, zap.InfoLevel))
require.NoError(t, err)
clusterName := "cluster1"

View File

@ -103,7 +103,7 @@ func (suite *GrpcSuite) SetupTest() {
suite.runtime, err = omniruntime.NewRuntime(
config.Default(), clientFactory, dnsService, workloadProxyReconciler, nil,
imageFactoryClient, nil, nil, nil, st,
prometheus.NewRegistry(), discoveryClientCache, kubernetesRuntime, nil, logger.WithOptions(zap.IncreaseLevel(zap.InfoLevel)),
prometheus.NewRegistry(), discoveryClientCache, kubernetesRuntime, nil, logging.IncreaseLevel(logger, zap.InfoLevel),
)
suite.Require().NoError(err)

View File

@ -8,9 +8,22 @@ package logging
import (
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// Component returns the well-known "component" zap field.
func Component(name string) zap.Field {
return zap.String("component", name)
}
// IncreaseLevel raises the logger's minimum level to lvl.
//
// Unlike zap.IncreaseLevel, it is a no-op when the underlying core is already
// at or above lvl, instead of failing and printing an error to stderr.
func IncreaseLevel(logger *zap.Logger, lvl zapcore.Level) *zap.Logger {
if logger.Level() >= lvl {
return logger
}
return logger.WithOptions(zap.IncreaseLevel(lvl))
}

View File

@ -11,6 +11,7 @@ import (
"github.com/siderolabs/go-loadbalancer/upstream"
"go.uber.org/zap"
"github.com/siderolabs/omni/internal/backend/logging"
"github.com/siderolabs/omni/internal/pkg/config"
)
@ -26,10 +27,12 @@ type NewFunc func(bindAddress string, bindPort int, logger *zap.Logger, lbConfig
// DefaultNew returns a new load balancer with default settings.
func DefaultNew(bindAddress string, bindPort int, logger *zap.Logger, lbConfig config.LoadBalancerService) (LoadBalancer, error) { //nolint:ireturn
logger.Level()
return controlplane.NewLoadBalancer(
bindAddress,
bindPort,
logger.WithOptions(zap.IncreaseLevel(zap.ErrorLevel)), // silence the load balancer logs
logging.IncreaseLevel(logger, zap.ErrorLevel), // silence the load balancer logs
controlplane.WithDialTimeout(lbConfig.GetDialTimeout()),
controlplane.WithKeepAlivePeriod(lbConfig.GetKeepAlivePeriod()),
controlplane.WithTCPUserTimeout(lbConfig.GetTcpUserTimeout()),

View File

@ -47,6 +47,7 @@ import (
"github.com/siderolabs/omni/client/api/omni/specs"
"github.com/siderolabs/omni/client/pkg/omni/resources/omni"
"github.com/siderolabs/omni/client/pkg/omni/resources/siderolink"
"github.com/siderolabs/omni/internal/backend/logging"
"github.com/siderolabs/omni/internal/backend/runtime/kubernetes"
omniruntime "github.com/siderolabs/omni/internal/backend/runtime/omni"
"github.com/siderolabs/omni/internal/backend/runtime/omni/controllers/testutils"
@ -366,7 +367,7 @@ func (suite *OmniSuite) SetupTest() {
logger := zaptest.NewLogger(suite.T())
suite.runtime, err = runtime.NewRuntime(suite.state, logger.WithOptions(zap.IncreaseLevel(zap.InfoLevel)), omniruntime.RuntimeCacheOptions()...)
suite.runtime, err = runtime.NewRuntime(suite.state, logging.IncreaseLevel(logger, zap.InfoLevel), omniruntime.RuntimeCacheOptions()...)
suite.Require().NoError(err)
suite.cachedState = state.WrapCore(suite.runtime.CachedState())

View File

@ -28,6 +28,7 @@ import (
authres "github.com/siderolabs/omni/client/pkg/omni/resources/auth"
"github.com/siderolabs/omni/client/pkg/omni/resources/omni"
"github.com/siderolabs/omni/client/pkg/omni/resources/siderolink"
"github.com/siderolabs/omni/internal/backend/logging"
omnictrl "github.com/siderolabs/omni/internal/backend/runtime/omni/controllers/omni"
"github.com/siderolabs/omni/internal/backend/runtime/omni/controllers/omni/clustermachine"
"github.com/siderolabs/omni/internal/backend/runtime/omni/migration"
@ -46,7 +47,7 @@ func (suite *MigrationSuite) SetupTest() {
suite.logger = zaptest.NewLogger(suite.T())
suite.manager = migration.NewManager(suite.state, suite.logger.WithOptions(zap.IncreaseLevel(zapcore.WarnLevel)))
suite.manager = migration.NewManager(suite.state, logging.IncreaseLevel(suite.logger, zapcore.WarnLevel))
}
func (suite *MigrationSuite) TestMoveClusterTaintFromResourceToLabel() {

View File

@ -27,6 +27,7 @@ import (
"github.com/siderolabs/omni/client/api/omni/specs"
"github.com/siderolabs/omni/client/pkg/omni/resources/omni"
"github.com/siderolabs/omni/internal/backend/dns"
"github.com/siderolabs/omni/internal/backend/logging"
"github.com/siderolabs/omni/internal/backend/runtime"
"github.com/siderolabs/omni/internal/backend/runtime/kubernetes"
omniruntime "github.com/siderolabs/omni/internal/backend/runtime/omni"
@ -95,7 +96,7 @@ func (suite *OmniRuntimeSuite) SetupTest() {
suite.runtime, err = omniruntime.NewRuntime(config.Default(), clientFactory, dnsService, workloadProxyReconciler, nil,
nil, nil, nil, nil, mockState, prometheus.NewRegistry(),
discoveryClientCache, kubernetesRuntime, nil, logger.WithOptions(zap.IncreaseLevel(zap.InfoLevel)))
discoveryClientCache, kubernetesRuntime, nil, logging.IncreaseLevel(logger, zap.InfoLevel))
suite.Require().NoError(err)

View File

@ -167,10 +167,8 @@ func getEtcdState(params *config.EtcdParams, logger *zap.Logger) (EtcdState, err
// getEmbeddedEtcdState runs the embedded etcd and creates a client for it.
func getEmbeddedEtcdState(params *config.EtcdParams, logger *zap.Logger) (EtcdState, error) {
logger = logger.WithOptions(
// never enable debug logs for etcd, they are too chatty
zap.IncreaseLevel(zap.InfoLevel),
).With(logging.Component("embedded_etcd"))
// never enable debug logs for etcd, they are too chatty
logger = logging.IncreaseLevel(logger, zap.InfoLevel).With(logging.Component("embedded_etcd"))
embeddedDBPath := params.GetEmbeddedDBPath()
logger.Info("starting embedded etcd server", zap.String("data_dir", embeddedDBPath))
@ -248,10 +246,8 @@ func getEmbeddedEtcdState(params *config.EtcdParams, logger *zap.Logger) (EtcdSt
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(constants.GRPCMaxMessageSize)),
grpc.WithSharedWriteBuffer(true),
},
Logger: logger.WithOptions(
// never enable debug logs for etcd client, they are too chatty
zap.IncreaseLevel(zap.InfoLevel),
).With(logging.Component("etcd_client")),
// never enable debug logs for etcd client, they are too chatty
Logger: logging.IncreaseLevel(logger, zap.InfoLevel).With(logging.Component("etcd_client")),
})
if err != nil {
embeddedServer.Close()
@ -306,10 +302,8 @@ func getExternalEtcdState(params *config.EtcdParams, logger *zap.Logger) (EtcdSt
grpc.WithSharedWriteBuffer(true),
},
TLS: tlsConfig,
Logger: logger.WithOptions(
// never enable debug logs for etcd client, they are too chatty
zap.IncreaseLevel(zap.InfoLevel),
).With(logging.Component("etcd_client")),
// never enable debug logs for etcd client, they are too chatty
Logger: logging.IncreaseLevel(logger, zap.InfoLevel).With(logging.Component("etcd_client")),
})
if err != nil {
return nil, err

View File

@ -23,6 +23,7 @@ import (
"github.com/siderolabs/omni/client/pkg/omni/resources/omni"
"github.com/siderolabs/omni/internal/backend/dns"
"github.com/siderolabs/omni/internal/backend/logging"
"github.com/siderolabs/omni/internal/backend/runtime/kubernetes"
omniruntime "github.com/siderolabs/omni/internal/backend/runtime/omni"
"github.com/siderolabs/omni/internal/backend/runtime/talos"
@ -46,7 +47,7 @@ func TestOperatorTalosconfig(t *testing.T) {
kubernetesRuntime := kubernetes.New(st.Default(), logger, "", "", "")
r, err := omniruntime.NewRuntime(omniconfig.Default(), clientFactory, dnsService, workloadProxyReconciler, nil, nil, nil, nil, nil,
st, prometheus.NewRegistry(), discoveryClientCache, kubernetesRuntime, nil, logger.WithOptions(zap.IncreaseLevel(zap.InfoLevel)))
st, prometheus.NewRegistry(), discoveryClientCache, kubernetesRuntime, nil, logging.IncreaseLevel(logger, zap.InfoLevel))
require.NoError(t, err)

View File

@ -1223,7 +1223,7 @@ func runEmbeddedDiscoveryService(ctx context.Context, secondaryStorageDB *sqlite
// Nodes connect over SideroLink, so the peer IP is a tunnel address, not a real public IP.
// Reporting it back would cause Talos to add it as a broken KubeSpan endpoint candidate.
DisableClientIPReporting: true,
}, logger.WithOptions(zap.IncreaseLevel(logLevel)).With(logging.Component("discovery_service")))
}, logging.IncreaseLevel(logger, logLevel).With(logging.Component("discovery_service")))
if errors.Is(err, syscall.EADDRNOTAVAIL) {
return retry.ExpectedError(err)

View File

@ -24,6 +24,7 @@ import (
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"github.com/siderolabs/omni/internal/backend/logging"
"github.com/siderolabs/omni/internal/backend/workloadproxy/lb"
)
@ -61,7 +62,7 @@ func NewReconciler(logger *zap.Logger, logLevel zapcore.Level, lazyStopAfter tim
clusterToAliasToLB: map[resource.ID]map[string]loadBalancer{},
aliasToCluster: map[string]resource.ID{},
logger: logger,
lbLogger: logger.WithOptions(zap.IncreaseLevel(zapcore.ErrorLevel)),
lbLogger: logging.IncreaseLevel(logger, zapcore.ErrorLevel),
logLevel: logLevel,
proxyDialer: &net.Dialer{
Timeout: 30 * time.Second,

View File

@ -477,7 +477,7 @@ func runOmni(t *testing.T) (string, error) {
return "", fmt.Errorf("failed to parse config schema: %w", err)
}
config, err := config.Init(logger, configSchema, params)
config, err := config.Init(configSchema, params)
if err != nil {
return "", err
}

View File

@ -776,6 +776,28 @@ func (s *LocalResourceService) SetPort(v int) {
s.Port = &v
}
func (s *Logs) GetFormat() LogsFormat {
if s == nil || s.Format == nil {
return *new(LogsFormat)
}
return *s.Format
}
func (s *Logs) SetFormat(v LogsFormat) {
s.Format = &v
}
func (s *Logs) GetLevel() LogsLevel {
if s == nil || s.Level == nil {
return *new(LogsLevel)
}
return *s.Level
}
func (s *Logs) SetLevel(v LogsLevel) {
s.Level = &v
}
func (s *LogsAudit) GetCleanupProbability() float64 {
if s == nil || s.CleanupProbability == nil {
return *new(float64)

View File

@ -21,7 +21,6 @@ import (
"github.com/cosi-project/runtime/pkg/state"
"github.com/siderolabs/gen/xyaml"
"github.com/siderolabs/talos/pkg/machinery/config/merge"
"go.uber.org/zap"
"go.yaml.in/yaml/v4"
"github.com/siderolabs/omni/client/pkg/compression"
@ -110,7 +109,7 @@ func Default() *Params {
}
// Init the config using defaults, merge with overrides, populate fallbacks and validate.
func Init(logger *zap.Logger, schema *jsonschema.Schema, params ...*Params) (*Params, error) {
func Init(schema *jsonschema.Schema, params ...*Params) (*Params, error) {
config, err := LoadDefault()
if err != nil {
return nil, err
@ -133,8 +132,6 @@ func Init(logger *zap.Logger, schema *jsonschema.Schema, params ...*Params) (*Pa
return nil, err
}
logger.Info("initialized resource compression config", zap.Bool("enabled", enableCompression))
return config, nil
}

View File

@ -21,7 +21,6 @@ import (
"github.com/santhosh-tekuri/jsonschema/v6"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
"github.com/siderolabs/omni/client/pkg/omni/resources/omni"
"github.com/siderolabs/omni/internal/pkg/config"
@ -60,7 +59,7 @@ func TestMergeConfig(t *testing.T) {
configSchema, parseErr := config.ParseSchema()
require.NoError(t, parseErr)
cfg, err := config.Init(zaptest.NewLogger(t), configSchema, params)
cfg, err := config.Init(configSchema, params)
require.NoError(t, err)
assert.True(t, cfg.Services.EmbeddedDiscoveryService.GetEnabled())

View File

@ -1053,6 +1053,24 @@
"stripe"
],
"properties": {
"level": {
"description": "Level is the log level for the Omni application logger. Defaults to info, unless the --debug flag is set, in which case it defaults to debug.",
"x-cli-flag": "log-level",
"type": "string",
"enum": ["debug", "info", "warn", "error"],
"goJSONSchema": {
"pointer": true
}
},
"format": {
"description": "Format is the log format for the Omni application logger. text uses a human-readable colored format (development), json uses structured JSON output (production). Defaults to text for debug builds, json otherwise.",
"x-cli-flag": "log-format",
"type": "string",
"enum": ["text", "json"],
"goJSONSchema": {
"pointer": true
}
},
"machine": {
"description": "Machine contains machine logs configuration.",
"$ref": "#/definitions/LogsMachine"

View File

@ -361,6 +361,15 @@ type Logs struct {
// Audit contains audit logs configuration.
Audit LogsAudit `json:"audit" yaml:"audit"`
// Format is the log format for the Omni application logger. text uses a
// human-readable colored format (development), json uses structured JSON output
// (production). Defaults to text for debug builds, json otherwise.
Format *LogsFormat `json:"format,omitempty,omitzero" yaml:"format,omitempty"`
// Level is the log level for the Omni application logger. Defaults to info,
// unless the --debug flag is set, in which case it defaults to debug.
Level *LogsLevel `json:"level,omitempty,omitzero" yaml:"level,omitempty"`
// Machine contains machine logs configuration.
Machine LogsMachine `json:"machine" yaml:"machine"`
@ -395,6 +404,18 @@ type LogsAudit struct {
SqliteTimeout *time.Duration `json:"sqliteTimeout,omitempty,omitzero" yaml:"sqliteTimeout,omitempty"`
}
type LogsFormat string
const LogsFormatJson LogsFormat = "json"
const LogsFormatText LogsFormat = "text"
type LogsLevel string
const LogsLevelDebug LogsLevel = "debug"
const LogsLevelError LogsLevel = "error"
const LogsLevelInfo LogsLevel = "info"
const LogsLevelWarn LogsLevel = "warn"
type LogsMachine struct {
// Storage contains configuration for machine logs storage.
Storage LogsMachineStorage `json:"storage" yaml:"storage"`

View File

@ -42,6 +42,7 @@ import (
"github.com/siderolabs/omni/client/pkg/constants"
"github.com/siderolabs/omni/client/pkg/jointoken"
"github.com/siderolabs/omni/client/pkg/omni/resources/siderolink"
"github.com/siderolabs/omni/internal/backend/logging"
"github.com/siderolabs/omni/internal/pkg/config"
"github.com/siderolabs/omni/internal/pkg/errgroup"
"github.com/siderolabs/omni/internal/pkg/grpcutil"
@ -245,7 +246,7 @@ func createListener(ctx context.Context, host, port string) (net.Listener, error
func (manager *Manager) Register(server *grpc.Server) {
pb.RegisterProvisionServiceServer(server, manager.provisionServer)
pb.RegisterWireGuardOverGRPCServiceServer(server,
wggrpc.NewService(manager.peerTraffic, manager.allowedPeers, manager.logger.WithOptions(zap.IncreaseLevel(zap.InfoLevel))))
wggrpc.NewService(manager.peerTraffic, manager.allowedPeers, logging.IncreaseLevel(manager.logger, zap.InfoLevel)))
}
// Run implements controller.Manager interface.
@ -487,7 +488,7 @@ func (manager *Manager) startWireguard(ctx context.Context, eg *errgroup.Group,
}
}()
return manager.wgHandler.Run(ctx, manager.logger.WithOptions(zap.IncreaseLevel(zap.InfoLevel)))
return manager.wgHandler.Run(ctx, logging.IncreaseLevel(manager.logger, zap.InfoLevel))
})
return nil

View File

@ -35,6 +35,7 @@ import (
"github.com/siderolabs/omni/client/pkg/omni/resources/infra"
"github.com/siderolabs/omni/client/pkg/omni/resources/omni"
siderolinkres "github.com/siderolabs/omni/client/pkg/omni/resources/siderolink"
"github.com/siderolabs/omni/internal/backend/logging"
omniruntime "github.com/siderolabs/omni/internal/backend/runtime/omni"
omnictrl "github.com/siderolabs/omni/internal/backend/runtime/omni/controllers/omni"
"github.com/siderolabs/omni/internal/pkg/config"
@ -91,7 +92,7 @@ func TestProvision(t *testing.T) {
ctx, cancel := context.WithCancel(ctx)
runtime, err := runtime.NewRuntime(state, logger.WithOptions(zap.IncreaseLevel(zap.InfoLevel)), omniruntime.RuntimeCacheOptions()...)
runtime, err := runtime.NewRuntime(state, logging.IncreaseLevel(logger, zap.InfoLevel), omniruntime.RuntimeCacheOptions()...)
require.NoError(t, err)
peers := siderolink.NewPeersPool(logger, &fakeWireguardHandler{peers: map[string]wgtypes.Peer{}})

View File

@ -40,6 +40,7 @@ import (
"github.com/siderolabs/omni/client/pkg/jointoken"
"github.com/siderolabs/omni/client/pkg/omni/resources/omni"
"github.com/siderolabs/omni/client/pkg/omni/resources/siderolink"
"github.com/siderolabs/omni/internal/backend/logging"
omniruntime "github.com/siderolabs/omni/internal/backend/runtime/omni"
omnictrl "github.com/siderolabs/omni/internal/backend/runtime/omni/controllers/omni"
"github.com/siderolabs/omni/internal/pkg/config"
@ -146,7 +147,7 @@ func (suite *SiderolinkSuite) SetupTest() {
logger := zaptest.NewLogger(suite.T())
suite.runtime, err = runtime.NewRuntime(suite.state, logger.WithOptions(zap.IncreaseLevel(zap.InfoLevel)), omniruntime.RuntimeCacheOptions()...)
suite.runtime, err = runtime.NewRuntime(suite.state, logging.IncreaseLevel(logger, zap.InfoLevel), omniruntime.RuntimeCacheOptions()...)
suite.Require().NoError(err)
suite.wg.Add(1)